repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
enigmampc/catalyst | tests/pipeline/test_downsampling.py | 1 | 24481 | """
Tests for Downsampled Filters/Factors/Classifiers
"""
import pandas as pd
from pandas.util.testing import assert_frame_equal
from catalyst.pipeline import (
Pipeline,
CustomFactor,
CustomFilter,
CustomClassifier,
)
from catalyst.pipeline.data.testing import TestingDataSet
from catalyst.pipeline.factors.equity import SimpleMovingAverage
from catalyst.pipeline.filters.smoothing import All
from catalyst.testing import CatalystTestCase, parameter_space
from catalyst.testing.fixtures import (
WithTradingSessions,
WithSeededRandomPipelineEngine,
)
from catalyst.utils.input_validation import _qualified_name
from catalyst.utils.numpy_utils import int64_dtype
class NDaysAgoFactor(CustomFactor):
inputs = [TestingDataSet.float_col]
def compute(self, today, assets, out, floats):
out[:] = floats[0]
class NDaysAgoFilter(CustomFilter):
inputs = [TestingDataSet.bool_col]
def compute(self, today, assets, out, bools):
out[:] = bools[0]
class NDaysAgoClassifier(CustomClassifier):
inputs = [TestingDataSet.categorical_col]
dtype = TestingDataSet.categorical_col.dtype
def compute(self, today, assets, out, cats):
out[:] = cats[0]
class ComputeExtraRowsTestcase(WithTradingSessions, CatalystTestCase):
DATA_MIN_DAY = pd.Timestamp('2012-06', tz='UTC')
DATA_MAX_DAY = pd.Timestamp('2015', tz='UTC')
TRADING_CALENDAR_STRS = ('NYSE',)
# Test with different window_lengths to ensure that window length is not
# used when calculating exra rows for the top-level term.
factor1 = TestingDataSet.float_col.latest
factor11 = NDaysAgoFactor(window_length=11)
factor91 = NDaysAgoFactor(window_length=91)
filter1 = TestingDataSet.bool_col.latest
filter11 = NDaysAgoFilter(window_length=11)
filter91 = NDaysAgoFilter(window_length=91)
classifier1 = TestingDataSet.categorical_col.latest
classifier11 = NDaysAgoClassifier(window_length=11)
classifier91 = NDaysAgoClassifier(window_length=91)
all_terms = [
factor1,
factor11,
factor91,
filter1,
filter11,
filter91,
classifier1,
classifier11,
classifier91,
]
@parameter_space(
calendar_name=TRADING_CALENDAR_STRS,
base_terms=[
(factor1, factor11, factor91),
(filter1, filter11, filter91),
(classifier1, classifier11, classifier91),
],
__fail_fast=True
)
def test_yearly(self, base_terms, calendar_name):
downsampled_terms = tuple(
t.downsample('year_start') for t in base_terms
)
all_terms = base_terms + downsampled_terms
all_sessions = self.trading_sessions[calendar_name]
end_session = all_sessions[-1]
years = all_sessions.year
sessions_in_2012 = all_sessions[years == 2012]
sessions_in_2013 = all_sessions[years == 2013]
sessions_in_2014 = all_sessions[years == 2014]
# Simulate requesting computation where the unaltered lookback would
# land exactly on the first date in 2014. We shouldn't request any
# additional rows for the regular terms or the downsampled terms.
for i in range(0, 30, 5):
start_session = sessions_in_2014[i]
self.check_extra_row_calculations(
all_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land on the second date in 2014. We should request one more extra
# row in the downsampled terms to push us back to the first date in
# 2014.
for i in range(0, 30, 5):
start_session = sessions_in_2014[i + 1]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i + 1,
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land on the last date of 2013. The downsampled terms should request
# enough extra rows to push us back to the start of 2013.
for i in range(0, 30, 5):
start_session = sessions_in_2014[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(sessions_in_2013),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
# Simulate requesting computation where the unaltered lookback would
# land on the last date of 2012. The downsampled terms should request
# enough extra rows to push us back to the first known date, which is
# in the middle of 2012
for i in range(0, 30, 5):
start_session = sessions_in_2013[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(sessions_in_2012),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
@parameter_space(
calendar_name=TRADING_CALENDAR_STRS,
base_terms=[
(factor1, factor11, factor91),
(filter1, filter11, filter91),
(classifier1, classifier11, classifier91),
],
__fail_fast=True
)
def test_quarterly(self, calendar_name, base_terms):
downsampled_terms = tuple(
t.downsample('quarter_start') for t in base_terms
)
all_terms = base_terms + downsampled_terms
# This region intersects with Q4 2013, Q1 2014, and Q2 2014.
tmp = self.trading_sessions[calendar_name]
all_sessions = tmp[tmp.slice_indexer('2013-12-15', '2014-04-30')]
end_session = all_sessions[-1]
months = all_sessions.month
Q4_2013 = all_sessions[months == 12]
Q1_2014 = all_sessions[(months == 1) | (months == 2) | (months == 3)]
Q2_2014 = all_sessions[months == 4]
# Simulate requesting computation where the unaltered lookback would
# land exactly on the first date in Q2 2014. We shouldn't request any
# additional rows for the regular terms or the downsampled terms.
for i in range(0, 15, 5):
start_session = Q2_2014[i]
self.check_extra_row_calculations(
all_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land exactly on the second date in Q2 2014.
# The downsampled terms should request one more extra row.
for i in range(0, 15, 5):
start_session = Q2_2014[i + 1]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i + 1,
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land exactly on the last date in Q1 2014. The downsampled terms
# should request enough extra rows to push us back to the first date of
# Q1 2014.
for i in range(0, 15, 5):
start_session = Q2_2014[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(Q1_2014),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
# Simulate requesting computation where the unaltered lookback would
# land exactly on the last date in Q4 2013. The downsampled terms
# should request enough extra rows to push us back to the first known
# date, which is in the middle of december 2013.
for i in range(0, 15, 5):
start_session = Q1_2014[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(Q4_2013),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
@parameter_space(
calendar_name=TRADING_CALENDAR_STRS,
base_terms=[
(factor1, factor11, factor91),
(filter1, filter11, filter91),
(classifier1, classifier11, classifier91),
],
__fail_fast=True
)
def test_monthly(self, calendar_name, base_terms):
downsampled_terms = tuple(
t.downsample('month_start') for t in base_terms
)
all_terms = base_terms + downsampled_terms
# This region intersects with Dec 2013, Jan 2014, and Feb 2014.
tmp = self.trading_sessions[calendar_name]
all_sessions = tmp[tmp.slice_indexer('2013-12-15', '2014-02-28')]
end_session = all_sessions[-1]
months = all_sessions.month
dec2013 = all_sessions[months == 12]
jan2014 = all_sessions[months == 1]
feb2014 = all_sessions[months == 2]
# Simulate requesting computation where the unaltered lookback would
# land exactly on the first date in feb 2014. We shouldn't request any
# additional rows for the regular terms or the downsampled terms.
for i in range(0, 10, 2):
start_session = feb2014[i]
self.check_extra_row_calculations(
all_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land on the second date in feb 2014. We should request one more
# extra row in the downsampled terms to push us back to the first date
# in 2014.
for i in range(0, 10, 2):
start_session = feb2014[i + 1]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i + 1,
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land on the last date of jan 2014. The downsampled terms should
# request enough extra rows to push us back to the start of jan 2014.
for i in range(0, 10, 2):
start_session = feb2014[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(jan2014),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
# Simulate requesting computation where the unaltered lookback would
# land on the last date of dec 2013. The downsampled terms should
# request enough extra rows to push us back to the first known date,
# which is in the middle of december 2013.
for i in range(0, 10, 2):
start_session = jan2014[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(dec2013),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
@parameter_space(
calendar_name=TRADING_CALENDAR_STRS,
base_terms=[
(factor1, factor11, factor91),
(filter1, filter11, filter91),
(classifier1, classifier11, classifier91),
],
__fail_fast=True
)
def test_weekly(self, calendar_name, base_terms):
downsampled_terms = tuple(
t.downsample('week_start') for t in base_terms
)
all_terms = base_terms + downsampled_terms
# December 2013
# Mo Tu We Th Fr Sa Su
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30 31
# January 2014
# Mo Tu We Th Fr Sa Su
# 1 2 3 4 5
# 6 7 8 9 10 11 12
# 13 14 15 16 17 18 19
# 20 21 22 23 24 25 26
# 27 28 29 30 31
# This region intersects with the last full week of 2013, the week
# shared by 2013 and 2014, and the first full week of 2014.
tmp = self.trading_sessions[calendar_name]
all_sessions = tmp[tmp.slice_indexer('2013-12-27', '2014-01-12')]
end_session = all_sessions[-1]
week0 = all_sessions[
all_sessions.slice_indexer('2013-12-27', '2013-12-29')
]
week1 = all_sessions[
all_sessions.slice_indexer('2013-12-30', '2014-01-05')
]
week2 = all_sessions[
all_sessions.slice_indexer('2014-01-06', '2014-01-12')
]
# Simulate requesting computation where the unaltered lookback would
# land exactly on the first date in week 2. We shouldn't request any
# additional rows for the regular terms or the downsampled terms.
for i in range(3):
start_session = week2[i]
self.check_extra_row_calculations(
all_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land exactly on the second date in week 2. The downsampled terms
# should request one more extra row.
for i in range(3):
start_session = week2[i + 1]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i + 1,
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i,
expected_extra_rows=i,
)
# Simulate requesting computation where the unaltered lookback would
# land exactly on the last date in week 1. The downsampled terms
# should request enough extra rows to push us back to the first date of
# week 1.
for i in range(3):
start_session = week2[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(week1),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
# Simulate requesting computation where the unaltered lookback would
# land exactly on the last date in week0. The downsampled terms
# should request enough extra rows to push us back to the first known
# date, which is in the middle of december 2013.
for i in range(3):
start_session = week1[i]
self.check_extra_row_calculations(
downsampled_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + len(week0),
)
self.check_extra_row_calculations(
base_terms,
all_sessions,
start_session,
end_session,
min_extra_rows=i + 1,
expected_extra_rows=i + 1,
)
def check_extra_row_calculations(self,
terms,
all_sessions,
start_session,
end_session,
min_extra_rows,
expected_extra_rows):
"""
Check that each term in ``terms`` computes an expected number of extra
rows for the given parameters.
"""
for term in terms:
result = term.compute_extra_rows(
all_sessions,
start_session,
end_session,
min_extra_rows,
)
self.assertEqual(
result,
expected_extra_rows,
"Expected {} extra_rows from {}, but got {}.".format(
expected_extra_rows,
term,
result,
)
)
class DownsampledPipelineTestCase(WithSeededRandomPipelineEngine,
CatalystTestCase):
# Extend into the last few days of 2013 to test year/quarter boundaries.
START_DATE = pd.Timestamp('2013-12-15', tz='UTC')
# Extend into the first few days of 2015 to test year/quarter boundaries.
END_DATE = pd.Timestamp('2015-01-06', tz='UTC')
ASSET_FINDER_EQUITY_SIDS = tuple(range(10))
def check_downsampled_term(self, term):
# June 2014
# Mo Tu We Th Fr Sa Su
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
all_sessions = self.nyse_sessions
compute_dates = all_sessions[
all_sessions.slice_indexer('2014-06-05', '2015-01-06')
]
start_date, end_date = compute_dates[[0, -1]]
pipe = Pipeline({
'year': term.downsample(frequency='year_start'),
'quarter': term.downsample(frequency='quarter_start'),
'month': term.downsample(frequency='month_start'),
'week': term.downsample(frequency='week_start'),
})
# Raw values for term, computed each day from 2014 to the end of the
# target period.
raw_term_results = self.run_pipeline(
Pipeline({'term': term}),
start_date=pd.Timestamp('2014-01-02', tz='UTC'),
end_date=pd.Timestamp('2015-01-06', tz='UTC'),
)['term'].unstack()
expected_results = {
'year': (raw_term_results
.groupby(pd.TimeGrouper('AS'))
.first()
.reindex(compute_dates, method='ffill')),
'quarter': (raw_term_results
.groupby(pd.TimeGrouper('QS'))
.first()
.reindex(compute_dates, method='ffill')),
'month': (raw_term_results
.groupby(pd.TimeGrouper('MS'))
.first()
.reindex(compute_dates, method='ffill')),
'week': (raw_term_results
.groupby(pd.TimeGrouper('W', label='left'))
.first()
.reindex(compute_dates, method='ffill')),
}
results = self.run_pipeline(pipe, start_date, end_date)
for frequency in expected_results:
result = results[frequency].unstack()
expected = expected_results[frequency]
assert_frame_equal(result, expected)
def _test_downsample_windowed_factor(self):
self.check_downsampled_term(
SimpleMovingAverage(
inputs=[TestingDataSet.float_col],
window_length=5,
)
)
def _test_downsample_non_windowed_factor(self):
sma = SimpleMovingAverage(
inputs=[TestingDataSet.float_col],
window_length=5,
)
self.check_downsampled_term(((sma + sma) / 2).rank())
def _test_downsample_windowed_filter(self):
sma = SimpleMovingAverage(
inputs=[TestingDataSet.float_col],
window_length=5,
)
self.check_downsampled_term(All(inputs=[sma.top(4)], window_length=5))
def _test_downsample_nonwindowed_filter(self):
sma = SimpleMovingAverage(
inputs=[TestingDataSet.float_col],
window_length=5,
)
self.check_downsampled_term(sma > 5)
def _test_downsample_windowed_classifier(self):
class IntSumClassifier(CustomClassifier):
inputs = [TestingDataSet.float_col]
window_length = 8
dtype = int64_dtype
missing_value = -1
def compute(self, today, assets, out, floats):
out[:] = floats.sum(axis=0).astype(int) % 4
self.check_downsampled_term(IntSumClassifier())
def _test_downsample_nonwindowed_classifier(self):
sma = SimpleMovingAverage(
inputs=[TestingDataSet.float_col],
window_length=5,
)
self.check_downsampled_term(sma.quantiles(5))
def test_errors_on_bad_downsample_frequency(self):
f = NDaysAgoFactor(window_length=3)
with self.assertRaises(ValueError) as e:
f.downsample('bad')
expected = (
"{}() expected a value in "
"('month_start', 'quarter_start', 'week_start', 'year_start') "
"for argument 'frequency', but got 'bad' instead."
).format(_qualified_name(f.downsample))
self.assertEqual(str(e.exception), expected)
| apache-2.0 |
JarronL/pynrc | pynrc/nrc_utils.py | 1 | 160856 | """pyNRC utility functions"""
from __future__ import absolute_import, division, print_function, unicode_literals
# The six library is useful for Python 2 and 3 compatibility
import six
import os, re
# Import libraries
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
on_rtd = os.environ.get('READTHEDOCS') == 'True'
# Update matplotlib settings
rcvals = {'xtick.minor.visible': True, 'ytick.minor.visible': True,
'xtick.direction': 'in', 'ytick.direction': 'in',
'xtick.top': True, 'ytick.right': True, 'font.family': ['serif'],
'xtick.major.size': 6, 'ytick.major.size': 6,
'xtick.minor.size': 3, 'ytick.minor.size': 3,
'image.interpolation': 'nearest', 'image.origin': 'lower',
'figure.figsize': [8,6], 'mathtext.fontset':'cm'}#,
#'text.usetex': True, 'text.latex.preamble': ['\usepackage{gensymb}']}
if not on_rtd:
matplotlib.rcParams.update(rcvals)
cmap_pri, cmap_alt = ('viridis', 'gist_heat')
matplotlib.rcParams['image.cmap'] = cmap_pri if cmap_pri in plt.colormaps() else cmap_alt
import datetime, time
import sys, platform
import multiprocessing as mp
import traceback
from astropy.io import fits, ascii
from astropy.table import Table
from astropy.time import Time
# from astropy import units
#from scipy.optimize import least_squares#, leastsq
#from scipy.ndimage import fourier_shift
from scipy.interpolate import griddata, RegularGridInterpolator, interp1d
from numpy.polynomial import legendre
from . import conf
from .logging_utils import setup_logging
from .maths import robust
from .maths.fast_poly import *
from .maths.image_manip import *
from .maths.coords import *
# from .maths.image_manip import frebin, fshift, pad_or_cut_to_size
# from .maths.image_manip import hist_indices, binned_statistic
# from .maths.coords import dist_image, xy_to_rtheta, rtheta_to_xy, xy_rot
# from .maths.coords import det_to_sci, sci_to_det, plotAxes
# OPD info
from .opds import opd_default, OPDFile_to_HDUList
###########################################################################
#
# Logging info
#
###########################################################################
import logging
_log = logging.getLogger('pynrc')
try:
import webbpsf
except ImportError:
raise ImportError('WebbPSF is not installed. pyNRC depends on its inclusion.')
# Some useful functions for displaying and measuring PSFs
import poppy
from poppy import (radial_profile, measure_radial, measure_fwhm, measure_ee)
from poppy import (measure_sharpness, measure_centroid, measure_strehl)
#from poppy import (display_PSF, display_PSF_difference, display_EE, display_profiles, radial_profile,
# measure_EE, measure_radial, measure_fwhm, measure_sharpness, measure_centroid, measure_strehl,
# specFromSpectralType, fwcentroid)
import pysynphot as S
# Extend default wavelength range to 5.6 um
S.refs.set_default_waveset(minwave=500, maxwave=56000, num=10000.0, delta=None, log=False)
# JWST 25m^2 collecting area
# Flux loss from masks and occulters are taken into account in WebbPSF
S.refs.setref(area = 25.4e4) # cm^2
# The following won't work on readthedocs compilation
if not on_rtd:
# Grab WebbPSF assumed pixel scales
log_prev = conf.logging_level
setup_logging('WARN', verbose=False)
nc_temp = webbpsf.NIRCam()
setup_logging(log_prev, verbose=False)
pixscale_SW = nc_temp._pixelscale_short
pixscale_LW = nc_temp._pixelscale_long
del nc_temp
_jbt_exists = True
try:
from jwst_backgrounds import jbt
except ImportError:
_log.info(" jwst_backgrounds is not installed and will not be used for bg estimates.")
_jbt_exists = False
#__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
#__location__ += '/'
__epsilon = np.finfo(float).eps
###########################################################################
#
# Pysynphot Bandpasses
#
###########################################################################
def bp_igood(bp, min_trans=0.001, fext=0.05):
"""
Given a bandpass with transmission 0.0-1.0, return the indices that
cover only the region of interest and ignore those wavelengths with
very low transmission less than and greater than the bandpass width.
"""
# Select which wavelengths to use
igood = bp.throughput > min_trans
# Select the "good" wavelengths
wgood = (bp.wave)[igood]
w1 = wgood.min()
w2 = wgood.max()
wr = w2 - w1
# Extend by 5% on either side
w1 -= fext*wr
w2 += fext*wr
# Now choose EVERYTHING between w1 and w2 (not just th>0.001)
ind = ((bp.wave >= w1) & (bp.wave <= w2))
return ind
def read_filter(filter, pupil=None, mask=None, module=None, ND_acq=False,
ice_scale=None, nvr_scale=None, ote_scale=None, nc_scale=None,
grism_order=1, coron_substrate=False, **kwargs):
"""Read filter bandpass.
Read in filter throughput curve from file generated by STScI.
Includes: OTE, NRC mirrors, dichroic, filter curve, and detector QE.
To Do: Account for pupil size reduction for DHS and grism observations.
Parameters
----------
filter : str
Name of a filter.
pupil : str, None
NIRCam pupil elements such as grisms or lyot stops.
mask : str, None
Specify the coronagraphic occulter (spots or bar).
module : str
Module 'A' or 'B'.
ND_acq : bool
ND acquisition square in coronagraphic mask.
ice_scale : float
Add in additional OTE H2O absorption. This is a scale factor
relative to 0.0131 um thickness. Also includes about 0.0150 um of
photolyzed Carbon.
nvr_scale : float
Modify NIRCam non-volatile residue. This is a scale factor relative
to 0.280 um thickness already built into filter throughput curves.
If set to None, then assumes a scale factor of 1.0.
Setting nvr_scale=0 will remove these contributions.
ote_scale : float
Scale factor of OTE contaminants relative to End of Life model.
This is the same as setting ice_scale. Will override ice_scale value.
nc_scale : float
Scale factor for NIRCam contaminants relative to End of Life model.
This model assumes 0.189 um of NVR and 0.050 um of water ice on
the NIRCam optical elements. Setting this keyword will remove all
NVR contributions built into the NIRCam filter curves.
Overrides nvr_scale value.
grism_order : int
Option to use 2nd order grism throughputs instead. Useful if
someone wanted to overlay the 2nd order contributions onto a
wide field observation.
coron_substrate : bool
Explicit option to include coronagraphic substrate transmission
even if mask=None. Gives the option of using LYOT or grism pupils
with or without coron substrate.
Returns
-------
:mod:`pysynphot.obsbandpass`
A Pysynphot bandpass object.
"""
if module is None: module = 'A'
# Select filter file and read
filter = filter.upper()
mod = module.lower()
filt_dir = conf.PYNRC_PATH + 'throughputs/'
filt_file = filter + '_nircam_plus_ote_throughput_mod' + mod + '_sorted.txt'
bp = S.FileBandpass(filt_dir+filt_file)
bp_name = filter
_log.debug('Reading file: '+filt_file)
# Select channel (SW or LW) for minor decisions later on
channel = 'SW' if bp.avgwave()/1e4 < 2.3 else 'LW'
# Select which wavelengths to keep
igood = bp_igood(bp, min_trans=0.005, fext=0.1)
wgood = (bp.wave)[igood]
w1 = wgood.min()
w2 = wgood.max()
wrange = w2 - w1
# Read in grism throughput and multiply filter bandpass
if (pupil is not None) and ('GRISM' in pupil):
# Grism transmission curve follows a 3rd-order polynomial
# The following coefficients assume that wavelength is in um
if (module == 'A') and (grism_order==1):
cf_g = np.array([0.068695897, -0.943894294, 4.1768413, -5.306475735])
elif (module == 'B') and (grism_order==1):
cf_g = np.array([0.050758635, -0.697433006, 3.086221627, -3.92089596])
elif (module == 'A') and (grism_order==2):
cf_g = np.array([0.05172, -0.85065, 5.22254, -14.18118, 14.37131])
elif (module == 'B') and (grism_order==2):
cf_g = np.array([0.03821, -0.62853, 3.85887, -10.47832, 10.61880])
# Create polynomial function for grism throughput from coefficients
p = np.poly1d(cf_g)
th_grism = p(bp.wave/1e4)
th_grism[th_grism < 0] = 0
# Multiply filter throughput by grism
th_new = th_grism * bp.throughput
bp = S.ArrayBandpass(bp.wave, th_new)
# spectral resolution in um/pixel
# res is in pixels/um and dw is inverse
res, dw = grism_res(pupil, module, m=grism_order)
# Convert to Angstrom
dw *= 10000 # Angstrom
npts = np.int(wrange/dw)+1
warr = np.linspace(w1, w1+dw*npts, npts)
bp = bp.resample(warr)
# Read in DHS throughput and multiply filter bandpass
elif (pupil is not None) and ('DHS' in pupil):
# DHS transmission curve follows a 3rd-order polynomial
# The following coefficients assume that wavelength is in um
cf_d = np.array([0.3192, -3.4719, 14.972, -31.979, 33.311, -12.582])
p = np.poly1d(cf_d)
th_dhs = p(bp.wave/1e4)
th_dhs[th_dhs < 0] = 0
th_dhs[bp.wave > 3e4] = 0
# Multiply filter throughput by DHS
th_new = th_dhs * bp.throughput
bp = S.ArrayBandpass(bp.wave, th_new)
# Mean spectral dispersion (dw/pix)
res = 290.0
dw = 1. / res # um/pixel
dw *= 10000 # Angstrom/pixel
npts = np.int(wrange/dw)+1
warr = np.linspace(w1, w1+dw*npts, npts)
bp = bp.resample(warr)
# Coronagraphic throughput modifications
# Substrate transmission
if ((mask is not None) and ('MASK' in mask)) or coron_substrate or ND_acq:
# Sapphire mask transmission values for coronagraphic substrate
hdulist = fits.open(conf.PYNRC_PATH + 'throughputs/jwst_nircam_moda_com_substrate_trans.fits')
wtemp = hdulist[1].data['WAVELENGTH']
ttemp = hdulist[1].data['THROUGHPUT']
# Estimates for w<1.5um
wtemp = np.insert(wtemp, 0, [0.5, 0.7, 1.2, 1.40])
ttemp = np.insert(ttemp, 0, [0.2, 0.2, 0.5, 0.15])
# Estimates for w>5.0um
wtemp = np.append(wtemp, [6.00])
ttemp = np.append(ttemp, [0.22])
# Did we explicitly set the ND acquisition square?
# This is a special case and doesn't necessarily need to be set.
# WebbPSF has a provision to include ND filters in the field, but we include
# this option if the user doesn't want to figure out offset positions.
if ND_acq:
fname = 'NDspot_ODvsWavelength.txt'
path_ND = conf.PYNRC_PATH + 'throughputs/' + fname
data = ascii.read(path_ND)
wdata = data[data.colnames[0]].data # Wavelength (um)
odata = data[data.colnames[1]].data # Optical Density
# Estimates for w<1.5um
wdata = np.insert(wdata, 0, [0.5])
odata = np.insert(odata, 0, [3.8])
# Estimates for w>5.0um
wdata = np.append(wdata, [6.00])
odata = np.append(odata, [2.97])
# CV3 data suggests OD needs to be multiplied by 0.93
# compared to Barr measurements
odata *= 0.93
otemp = np.interp(wtemp, wdata, odata, left=0, right=0)
ttemp *= 10**(-1*otemp)
# Interpolate substrate transmission onto filter wavelength grid and multiply
th_coron_sub = np.interp(bp.wave/1e4, wtemp, ttemp, left=0, right=0)
th_new = th_coron_sub * bp.throughput
bp = S.ArrayBandpass(bp.wave, th_new)
# Lyot stop wedge modifications
# Substrate transmission
if (pupil is not None) and ('LYOT' in pupil):
# Transmission values for wedges in Lyot stop
if 'SW' in channel:
fname = 'jwst_nircam_sw-lyot_trans_modmean.fits'
hdulist = fits.open(conf.PYNRC_PATH + 'throughputs/' + fname)
wtemp = hdulist[1].data['WAVELENGTH']
ttemp = hdulist[1].data['THROUGHPUT']
# Estimates for w<1.5um
wtemp = np.insert(wtemp, 0, [0.50, 1.00])
ttemp = np.insert(ttemp, 0, [0.95, 0.95])
# Estimates for w>2.3um
wtemp = np.append(wtemp, [2.50,3.00])
ttemp = np.append(ttemp, [0.85,0.85])
# Interpolate substrate transmission onto filter wavelength grid
th_wedge = np.interp(bp.wave/1e4, wtemp, ttemp, left=0, right=0)
elif 'LW' in channel:
fname = 'jwst_nircam_lw-lyot_trans_modmean.fits'
hdulist = fits.open(conf.PYNRC_PATH + 'throughputs/' + fname)
wtemp = hdulist[1].data['WAVELENGTH']
ttemp = hdulist[1].data['THROUGHPUT']
ttemp *= 100 # Factors of 100 error in saved values
# Smooth the raw data
ws = 200
s = np.r_[ttemp[ws-1:0:-1],ttemp,ttemp[-1:-ws:-1]]
w = np.blackman(ws)
y = np.convolve(w/w.sum(),s,mode='valid')
ttemp = y[int((ws/2-1)):int(-(ws/2))]
# Estimates for w<2.3um
wtemp = np.insert(wtemp, 0, [1.00])
ttemp = np.insert(ttemp, 0, [0.95])
# Estimates for w>5.0um
wtemp = np.append(wtemp, [6.0])
ttemp = np.append(ttemp, [0.9])
# Interpolate substrate transmission onto filter wavelength grid
th_wedge = np.interp(bp.wave/1e4, wtemp, ttemp, left=0, right=0)
th_new = th_wedge * bp.throughput
bp = S.ArrayBandpass(bp.wave, th_new, name=bp.name)
# Weak Lens substrate transmission
if (pupil is not None) and (('WL' in pupil) or ('WEAK LENS' in pupil)):
if 'WL' in pupil:
wl_alt = {'WLP4' :'WEAK LENS +4',
'WLP8' :'WEAK LENS +8',
'WLP12':'WEAK LENS +12 (=4+8)',
'WLM4' :'WEAK LENS -4 (=4-8)',
'WLM8' :'WEAK LENS -8'}
wl_name = wl_alt.get(pupil, pupil)
else:
wl_name = pupil
# Throughput for WL+4
hdulist = fits.open(conf.PYNRC_PATH + 'throughputs/jwst_nircam_wlp4.fits')
wtemp = hdulist[1].data['WAVELENGTH']
ttemp = hdulist[1].data['THROUGHPUT']
th_wl4 = np.interp(bp.wave/1e4, wtemp, ttemp, left=0, right=0)
# Throughput for WL+/-8
hdulist = fits.open(conf.PYNRC_PATH + 'throughputs/jwst_nircam_wlp8.fits')
wtemp = hdulist[1].data['WAVELENGTH']
ttemp = hdulist[1].data['THROUGHPUT']
th_wl8 = np.interp(bp.wave/1e4, wtemp, ttemp, left=0, right=0)
# If two lenses
wl48_list = ['WEAK LENS +12 (=4+8)', 'WEAK LENS -4 (=4-8)']
if (wl_name in wl48_list):
th_wl = th_wl4 * th_wl8
bp_name = 'F212N'
elif 'WEAK LENS +4' in wl_name:
th_wl = th_wl4
bp_name = 'F212N'
else:
th_wl = th_wl8
th_new = th_wl * bp.throughput
bp = S.ArrayBandpass(bp.wave, th_new)
# Select which wavelengths to keep
igood = bp_igood(bp, min_trans=0.005, fext=0.1)
wgood = (bp.wave)[igood]
w1 = wgood.min()
w2 = wgood.max()
wrange = w2 - w1
# OTE scaling (use ice_scale keyword)
if ote_scale is not None:
ice_scale = ote_scale
if nc_scale is not None:
nvr_scale = 0
# Water ice and NVR additions (for LW channel only)
if ((ice_scale is not None) or (nvr_scale is not None)) and ('LW' in channel):
fname = conf.PYNRC_PATH + 'throughputs/ote_nc_sim_1.00.txt'
names = ['Wave', 't_ice', 't_nvr', 't_sys']
data = ascii.read(fname, data_start=1, names=names)
wtemp = data['Wave']
wtemp = np.insert(wtemp, 0, [1.0]) # Estimates for w<2.5um
wtemp = np.append(wtemp, [6.0]) # Estimates for w>5.0um
th_new = bp.throughput
if ice_scale is not None:
ttemp = data['t_ice']
ttemp = np.insert(ttemp, 0, [1.0]) # Estimates for w<2.5um
ttemp = np.append(ttemp, [1.0]) # Estimates for w>5.0um
# Interpolate transmission onto filter wavelength grid
ttemp = np.interp(bp.wave/1e4, wtemp, ttemp)#, left=0, right=0)
# Scale is fraction of absorption feature depth, not of layer thickness
th_new = th_new * (1 - ice_scale * (1 - ttemp))
# th_ice = np.exp(ice_scale * np.log(ttemp))
# th_new = th_ice * th_new
if nvr_scale is not None:
ttemp = data['t_nvr']
ttemp = np.insert(ttemp, 0, [1.0]) # Estimates for w<2.5um
ttemp = np.append(ttemp, [1.0]) # Estimates for w>5.0um
# Interpolate transmission onto filter wavelength grid
ttemp = np.interp(bp.wave/1e4, wtemp, ttemp)#, left=0, right=0)
# Scale is fraction of absorption feature depth, not of layer thickness
# First, remove NVR contributions already included in throughput curve
th_new = th_new / ttemp
th_new = th_new * (1 - nvr_scale * (1 - ttemp))
# The "-1" removes NVR contributions already included in
# NIRCam throughput curves
# th_nvr = np.exp((nvr_scale-1) * np.log(ttemp))
# th_new = th_nvr * th_new
if nc_scale is not None:
names = ['Wave', 'coeff'] # coeff is per um path length
path = conf.PYNRC_PATH
data_ice = ascii.read(path + 'throughputs/h2o_abs.txt', names=names)
data_nvr = ascii.read(path + 'throughputs/nvr_abs.txt', names=names)
w_ice = data_ice['Wave']
a_ice = data_ice['coeff']
a_ice = np.interp(bp.wave/1e4, w_ice, a_ice)
w_nvr = data_nvr['Wave']
a_nvr = data_nvr['coeff']
a_nvr = np.interp(bp.wave/1e4, w_nvr, a_nvr)
ttemp = np.exp(-0.189 * a_nvr - 0.050 * a_ice)
th_new = th_new * (1 - nc_scale * (1 - ttemp))
# ttemp = np.exp(-nc_scale*(a_nvr*0.189 + a_ice*0.05))
# th_new = ttemp * th_new
# Create new bandpass
bp = S.ArrayBandpass(bp.wave, th_new)
# Resample to common dw to ensure consistency
dw_arr = bp.wave[1:] - bp.wave[:-1]
#if not np.isclose(dw_arr.min(),dw_arr.max()):
dw = np.median(dw_arr)
warr = np.arange(w1,w2, dw)
bp = bp.resample(warr)
# Need to place zeros at either end so Pysynphot doesn't extrapolate
warr = np.concatenate(([bp.wave.min()-dw],bp.wave,[bp.wave.max()+dw]))
tarr = np.concatenate(([0],bp.throughput,[0]))
bp = S.ArrayBandpass(warr, tarr, name=bp_name)
return bp
###########################################################################
#
# Sensitivities and Saturation Limits
#
###########################################################################
def channel_select(bp):
"""Select wavelength channel
Based on input bandpass, return the pixel scale, dark current, and
excess read noise parameters. These values are typical for either
a SW or LW NIRCam detector.
Parameters
----------
bp : :mod:`pysynphot.obsbandpass`
NIRCam filter bandpass.
"""
if bp.avgwave()/1e4 < 2.3:
pix_scale = pixscale_SW # pixel scale (arcsec/pixel)
idark = 0.003 # dark current (e/sec)
pex = (1.0,5.0)
else:
pix_scale = pixscale_LW
idark = 0.03
pex = (1.5,10.0)
return (pix_scale, idark, pex)
def grism_wref(pupil='GRISM', module='A'):
"""Grism undeviated wavelength"""
# Option for GRISMR/GRISMC
if 'GRISMR' in pupil:
pupil = 'GRISM0'
elif 'GRISMC' in pupil:
pupil = 'GRISM90'
# Mean spectral dispersion in number of pixels per um
if ('GRISM90' in pupil) and (module == 'A'):
wref = 3.978
elif ('GRISM0' in pupil) and (module == 'A'):
wref = 3.937
elif ('GRISM90' in pupil) and (module == 'B'):
wref = 3.923
elif ('GRISM0' in pupil) and (module == 'B'):
wref = 3.960
else:
wref = 3.95
return wref
def grism_res(pupil='GRISM', module='A', m=1):
"""Grism resolution
Based on the pupil input and module, return the spectral
dispersion and resolution as a tuple (res, dw).
Parameters
----------
pupil : str
'GRISM0' or 'GRISM90', otherwise assume res=1000 pix/um.
'GRISM0' is GRISMR; 'GRISM90' is GRISMC
module : str
'A' or 'B'
m : int
Spectral order (1 or 2).
"""
# Option for GRISMR/GRISMC
if 'GRISMR' in pupil:
pupil = 'GRISM0'
elif 'GRISMC' in pupil:
pupil = 'GRISM90'
# Mean spectral dispersion in number of pixels per um
if ('GRISM90' in pupil) and (module == 'A'):
res = 1003.12
elif ('GRISM0' in pupil) and (module == 'A'):
res = 996.48
elif ('GRISM90' in pupil) and (module == 'B'):
res = 1008.64
elif ('GRISM0' in pupil) and (module == 'B'):
res = 1009.13
else:
res = 1000.0
if m==2:
res *= 2
# Spectral resolution in um/pixel
dw = 1. / res
return (res, dw)
def place_grismr_tso(waves, imarr, siaf_ap, wref=None, im_coords='sci'):
"""
Shift image such that undeviated wavelength sits at the
SIAF aperture reference location.
"""
from .maths.coords import det_to_sci
if len(imarr.shape) > 2:
nz, ny_in, nx_in = imarr.shape
else:
nz = 1
ny_in, nx_in = imarr.shape
imarr = imarr.reshape([nz,ny_in,nx_in])
# Convert to sci coordinates
if im_coords=='det':
det_name = siaf_ap.AperName[3:5]
imarr = det_to_sci(imarr, det_name)
# Determine reference wavelength
if wref is None:
if 'GRISMC' in siaf_ap.AperName:
pupil = 'GRISMC'
elif 'GRISM' in siaf_ap.AperName:
pupil = 'GRISMR'
else: # generic grism
pupil = 'GRISM'
module = 'A' if 'NRCA' in siaf_ap.AperName else 'B'
wref = grism_wref(pupil, module)
# Get reference coordinates
yref, xref = (siaf_ap.YSciRef, siaf_ap.XSciRef)
# Final image size
ny_out, nx_out = (siaf_ap.YSciSize, siaf_ap.XSciSize)
# Empirically determine shift value in dispersion direction
wnew_temp = pad_or_cut_to_size(waves, nx_out)
# Index of reference wavelength associated with ref pixel
ind = (wnew_temp>wref-0.01) & (wnew_temp<wref+0.01)
xnew_temp = np.interp(wref, wnew_temp[ind], np.arange(nx_out)[ind])
xoff = xref - xnew_temp
# Move to correct position in y
yoff = yref - (int(ny_out/2) - 1)
# if np.mod(ny_in,2)==0: # If even, shift by half a pixel?
# yoff = yoff + 0.5
imarr = pad_or_cut_to_size(imarr, (ny_out,nx_out), offset_vals=(yoff,xoff), fill_val=np.nan)
waves = pad_or_cut_to_size(waves, nx_out, offset_vals=xoff, fill_val=np.nan)
# Remove NaNs
ind_nan = np.isnan(imarr)
imarr[ind_nan] = np.min(imarr[~ind_nan])
# Remove NaNs
# Fill in with wavelength solution (linear extrapolation)
ind_nan = np.isnan(waves)
# waves[ind_nan] = 0
arr = np.arange(nx_out)
cf = jl_poly_fit(arr[~ind_nan], waves[~ind_nan])
waves[ind_nan] = jl_poly(arr[ind_nan], cf)
return waves, imarr
def get_SNR(filter_or_bp, pupil=None, mask=None, module='A', pix_scale=None,
sp=None, tf=10.737, ngroup=2, nf=1, nd2=0, nint=1,
coeff=None, coeff_hdr=None, fov_pix=11, oversample=4, quiet=True, **kwargs):
"""SNR per pixel
Obtain the SNR of an input source spectrum with specified instrument setup.
This is simply a wrapper for bg_sensitivity(forwardSNR=True).
"""
return bg_sensitivity(filter_or_bp, \
pupil=pupil, mask=mask, module=module, pix_scale=pix_scale, \
sp=sp, tf=tf, ngroup=ngroup, nf=nf, nd2=ngroup, nint=nint, \
coeff=coeff, coeff_hdr=None, fov_pix=fov_pix, oversample=oversample, \
quiet=quiet, forwardSNR=True, **kwargs)
def _mlim_helper(sub_im, mag_norm=10, mag_arr=np.arange(5,35,1),
nsig=5, nint=1, snr_fact=1, forwardSNR=False, **kwargs):
"""Helper function for determining grism sensitivities"""
sub_im_sum = sub_im.sum()
# Just return the SNR for the input sub image
if forwardSNR:
im_var = pix_noise(fsrc=sub_im, **kwargs)**2
ns_sum = np.sqrt(np.sum(im_var) / nint)
return snr_fact * sub_im_sum / ns_sum
fact_arr = 10**((mag_arr-mag_norm)/2.5)
snr_arr = []
for f in fact_arr:
im = sub_im / f
im_var = pix_noise(fsrc=im, **kwargs)**2
im_sum = sub_im_sum / f
ns_sum = np.sqrt(np.sum(im_var) / nint)
snr_arr.append(im_sum / ns_sum)
snr_arr = snr_fact*np.asarray(snr_arr)
return np.interp(nsig, snr_arr[::-1], mag_arr[::-1])
def bg_sensitivity(filter_or_bp, pupil=None, mask=None, module='A', pix_scale=None,
sp=None, units=None, nsig=10, tf=10.737, ngroup=2, nf=1, nd2=0, nint=1,
coeff=None, coeff_hdr=None, fov_pix=11, oversample=4, quiet=True, forwardSNR=False,
offset_r=0, offset_theta=0, return_image=False, image=None,
cr_noise=True, dw_bin=None, ap_spec=None, rad_EE=None, **kwargs):
"""Sensitivity Estimates
Estimates the sensitivity for a set of instrument parameters.
By default, a flat spectrum is convolved with the specified bandpass.
For imaging, this function also returns the surface brightness sensitivity.
The number of photo-electrons are computed for a source at some magnitude
as well as the noise from the detector readout and some average zodiacal
background flux. Detector readout noise follows an analytical form that
matches extensive long dark observations during cryo-vac testing.
This function returns the n-sigma background limit in units of uJy (unless
otherwise specified; valid units can be found on the Pysynphot webpage at
https://pysynphot.readthedocs.io/).
For imaging, a single value is given assuming aperture photometry with a
radius of ~1 FWHM rounded to the next highest integer pixel (or 2.5 pixels,
whichever is larger). For spectral observations, this function returns an
array of sensitivities at 0.1um intervals with apertures corresponding to
2 spectral pixels and a number of spatial pixels equivalent to 1 FWHM rounded
to the next highest integer (minimum of 5 spatial pixels).
Parameters
==========
Instrument Settings
-------------------
filter_or_bp : Either the name of the filter or pre-computed Pysynphot bandpass.
pupil : NIRCam pupil elements such as grisms or lyot stops
mask : Specify the coronagraphic occulter (spots or bar)
module : 'A' or 'B'
pix_scale : Pixel scale in arcsec/pixel
Spectrum Settings
-------------------
sp : A pysynphot spectral object to calculate sensitivity
(default: Flat spectrum in photlam)
nsig : Desired nsigma sensitivity
units : Output units (defaults to uJy for grisms, nJy for imaging)
forwardSNR : Find the SNR of the input spectrum instead of determining sensitivity.
Ramp Settings
-------------------
tf : Time per frame
ngroup : Number of groups per integration
nf : Number of averaged frames per group
nd2 : Number of dropped frames per group
nint : Number of integrations/ramps to consider
PSF Information
-------------------
coeff : A cube of polynomial coefficients for generating PSFs. This is
generally oversampled with a shape (fov_pix*oversamp, fov_pix*oversamp, deg).
If not set, this will be calculated using :func:`gen_psf_coeff`.
coeff_hdr : Header associated with coeff cube.
fov_pix : Number of detector pixels in the image coefficient and PSF.
oversample : Factor of oversampling of detector pixels.
offset_r : Radial offset of the target from center.
offset_theta : Position angle for that offset, in degrees CCW (+Y).
Misc.
-------------------
image : Explicitly pass image data rather than calculating from coeff.
return_image : Instead of calculating sensitivity, return the image calced from coeff.
Useful if needing to calculate sensitivities for many different settings.
rad_EE : Extraction aperture radius (in pixels) for imaging mode.
dw_bin : Delta wavelength to calculate spectral sensitivities (grisms & DHS).
ap_spec : Instead of dw_bin, specify the spectral extraction aperture in pixels.
Takes priority over dw_bin. Value will get rounded up to nearest int.
cr_noise : Include noise from cosmic ray hits?
Keyword Args
-------------------
zodi_spec - zfact, ra, dec, thisday, [locstr, year, day]
pix_noise - rn, ktc, idark, and p_excess
gen_psf_coeff - npsf and ndeg
read_filter - ND_acq
"""
# PSF coefficients
from pynrc.psfs import gen_image_coeff
grism_obs = (pupil is not None) and ('GRISM' in pupil)
dhs_obs = (pupil is not None) and ('DHS' in pupil)
coron_obs = (pupil is not None) and ('LYOT' in pupil)
# Get filter throughput and create bandpass
if isinstance(filter_or_bp, six.string_types):
filter = filter_or_bp
bp = read_filter(filter, pupil=pupil, mask=mask, module=module, **kwargs)
else:
bp = filter_or_bp
filter = bp.name
waveset = np.copy(bp.wave)
# If not set, select some settings based on filter (SW or LW)
args = channel_select(bp)
if pix_scale is None: pix_scale = args[0] # Pixel scale (arcsec/pixel)
# Spectrum and bandpass to report magnitude that saturates NIRCam band
if sp is None:
sp = S.ArraySpectrum(waveset, 0*waveset + 10.)
sp.name = 'Flat spectrum in photlam'
if forwardSNR:
sp_norm = sp
else:
# Renormalize to 10th magnitude star
mag_norm = 10
sp_norm = sp.renorm(mag_norm, 'vegamag', bp)
sp_norm.name = sp.name
# Zodiacal Light Stuff
sp_zodi = zodi_spec(**kwargs)
obs_zodi = S.Observation(sp_zodi, bp, binset=waveset)
fzodi_pix = obs_zodi.countrate() * (pix_scale/206265.0)**2 # e-/sec/pixel
# Collecting area gets reduced for coronagraphic observations
# This isn't accounted for later, because zodiacal light doesn't use PSF information
if coron_obs: fzodi_pix *= 0.19
# The number of pixels to span spatially for WebbPSF calculations
fov_pix = int(fov_pix)
oversample = int(oversample)
# Generate the PSF image for analysis.
# This process can take a while if being done over and over again.
# Let's provide the option to skip this with a pre-generated image.
# Skip image generation if `image` keyword is not None.
# Remember, this is for a very specific NORMALIZED spectrum
t0 = time.time()
if image is None:
image = gen_image_coeff(bp, pupil=pupil, mask=mask, module=module,
sp_norm=sp_norm, coeff=coeff, coeff_hdr=coeff_hdr,
fov_pix=fov_pix, oversample=oversample,
offset_r=offset_r, offset_theta=offset_theta, **kwargs)
t1 = time.time()
_log.debug('fov_pix={0}, oversample={1}'.format(fov_pix,oversample))
_log.debug('Took %.2f seconds to generate images' % (t1-t0))
if return_image:
return image
# Cosmic Ray Loss (JWST-STScI-001721)
# SNR with cosmic ray events depends directly on ramp integration time
if cr_noise:
tint = (ngroup*nf + (ngroup-1)*nd2) * tf
snr_fact = 1.0 - tint*6.7781e-5
else:
snr_fact = 1.0
# Central position (in pixel coords) of PSF
if offset_r==0:
center = None
else:
xp, yp = rtheta_to_xy(offset_r/pix_scale, offset_theta)
xp += image.shape[1] / 2.0 # x value in pixel position
yp += image.shape[0] / 2.0 # y value in pixel position
center = (xp, yp)
# If grism spectroscopy
if grism_obs:
if units is None: units = 'uJy'
wspec, spec = image
# Wavelengths to grab sensitivity values
#igood2 = bp.throughput > (bp.throughput.max()/4)
igood2 = bp_igood(bp, min_trans=bp.throughput.max()/3, fext=0)
wgood2 = waveset[igood2] / 1e4
wsen_arr = np.unique((wgood2*10 + 0.5).astype('int')) / 10
# Add an addition 0.1 on either side
dw = 0.1
wsen_arr = np.concatenate(([wsen_arr.min()-dw],wsen_arr,[wsen_arr.max()+dw]))
#wdel = wsen_arr[1] - wsen_arr[0]
# FWHM at each pixel position
#fwhm_pix_arr = np.ceil(wsen_arr * 0.206265 / 6.5 / pix_scale)
# Make sure there's at least 5 total pixels in spatial dimension
#temp = fwhm_pix_arr.repeat(2).reshape([fwhm_pix_arr.size,2])
#temp[:,0] = 2
#rad_arr = temp.max(axis=1)
# Ignore the above, let's always do a 5pix spatial aperture
rad_arr = np.zeros(wsen_arr.size) + 2 # (2*2+1)
# Spatial aperture size at each wavelength
ap_spat = (2*rad_arr+1).astype('int')
# Indices with spectral image
ispat1 = (fov_pix - ap_spat) // 2
ispat2 = ispat1 + ap_spat
# Get spectral indices on the spectral image
if (dw_bin is None) and (ap_spec is None):
ap_spec = 2
elif (dw_bin is not None) and (ap_spec is None):
ap_spec = wspec.size * dw_bin / (wspec.max() - wspec.min())
ap_spec = int(ap_spec+0.5)
else:
ap_spec = int(ap_spec+0.5)
diff = abs(wspec.reshape(wspec.size,1) - wsen_arr)
ind_wave = []
for i in np.arange(wsen_arr.size):
ind = (np.where(diff[:,i]==min(diff[:,i])))[0]
ind_wave.append(ind[0])
ispec1 = np.asarray(ind_wave) - ap_spec // 2
ispec2 = ispec1 + ap_spec
# At each wavelength, grab a sub image and find the limiting magnitude
bglim_arr = []
for i in np.arange(wsen_arr.size):
sub_im = spec[ispat1[i]:ispat2[i],ispec1[i]:ispec2[i]]
if forwardSNR:
snr = _mlim_helper(sub_im, nint=nint, forwardSNR=forwardSNR,
ngroup=ngroup, nf=nf, nd2=nd2, tf=tf, fzodi=fzodi_pix,
snr_fact=snr_fact, **kwargs)
bglim_arr.append(snr)
else:
# Interpolate over a coarse magnitude grid
mag_arr=np.arange(5,35,1)
mag_lim = _mlim_helper(sub_im, mag_norm, mag_arr, nsig=nsig, nint=nint,
ngroup=ngroup, nf=nf, nd2=nd2, tf=tf, fzodi=fzodi_pix,
snr_fact=snr_fact, **kwargs)
# Zoom in and interoplate over finer grid
mag_arr = np.arange(mag_lim-1,mag_lim+1,0.05)
mag_lim = _mlim_helper(sub_im, mag_norm, mag_arr, nsig=nsig, nint=nint,
ngroup=ngroup, nf=nf, nd2=nd2, tf=tf, fzodi=fzodi_pix,
snr_fact=snr_fact, **kwargs)
# Renormalize spectrum to magnitude limit and convert to desired units
sp_norm2 = sp.renorm(mag_lim, 'vegamag', bp)
sp_norm2.convert(units)
bglim = np.interp(wsen_arr[i],sp_norm2.wave/1e4, sp_norm2.flux)
bglim_arr.append(bglim)
bglim_arr = np.asarray(bglim_arr)
# Return sensitivity list along with corresponding wavelengths to dictionary
if forwardSNR:
sp_norm.convert(units)
fvals = np.interp(wsen_arr, sp_norm.wave/1e4, sp_norm.flux)
out = {'wave':wsen_arr.tolist(), 'snr':bglim_arr.tolist(),
'flux_units':units, 'flux':fvals.tolist(), 'Spectrum':sp.name}
if quiet == False:
print('{0} SNR for {1} source'.format(bp.name,sp.name))
names = ('Wave','SNR','Flux ({})'.format(units))
tbl = Table([wsen_arr,bglim_arr, fvals], names=names)
for k in tbl.keys():
tbl[k].format = '9.2f'
print(tbl)
else:
out = {'wave':wsen_arr.tolist(), 'sensitivity':bglim_arr.tolist(),
'units':units, 'nsig':nsig, 'Spectrum':sp.name}
if quiet == False:
print('{} Background Sensitivity ({}-sigma) for {} source'.\
format(bp.name,nsig,sp.name))
names = ('Wave','Limit ({})'.format(units))
tbl = Table([wsen_arr,bglim_arr], names=names)
for k in tbl.keys():
tbl[k].format = '9.2f'
print(tbl)
return out
# DHS spectroscopy
elif dhs_obs:
raise NotImplementedError('DHS has yet to be fully included')
# Imaging (includes coronagraphy)
else:
if units is None: units = 'nJy'
# Wavelength to grab sensitivity values
obs = S.Observation(sp_norm, bp, binset=waveset)
efflam = obs.efflam()*1e-4 # microns
# Encircled energy
rho_pix = dist_image(image)
bins = np.arange(rho_pix.min(), rho_pix.max() + 1, 1)
# Groups indices for each radial bin
igroups, _, rad_pix = hist_indices(rho_pix, bins, True)
# Sum of each radial annulus
sums = binned_statistic(igroups, image, func=np.sum)
# Encircled energy within each radius
EE_flux = np.cumsum(sums)
# How many pixels do we want?
fwhm_pix = 1.2 * efflam * 0.206265 / 6.5 / pix_scale
if rad_EE is None:
rad_EE = np.max([fwhm_pix,2.5])
npix_EE = np.pi * rad_EE**2
# For surface brightness sensitivity (extended object)
# Assume the fiducial (sp_norm) to be in terms of mag/arcsec^2
# Multiply countrate() by pix_scale^2 to get in terms of per pixel (area)
# This is the count rate per pixel for the fiducial starting point
image_ext = obs.countrate() * pix_scale**2 # e-/sec/pixel
#print(image_ext)
if forwardSNR:
im_var = pix_noise(ngroup=ngroup, nf=nf, nd2=nd2, tf=tf,
fzodi=fzodi_pix, fsrc=image, **kwargs)**2
# root squared sum of noise within each radius
sums = binned_statistic(igroups, im_var, func=np.sum)
EE_var = np.cumsum(sums)
EE_sig = np.sqrt(EE_var / nint)
EE_snr = snr_fact * EE_flux / EE_sig
snr_rad = np.interp(rad_EE, rad_pix, EE_snr)
flux_val = obs.effstim(units)
out1 = {'type':'Point Source', 'snr':snr_rad, 'Spectrum':sp.name,
'flux':flux_val, 'flux_units':units}
# Extended object surfrace brightness
im_var = pix_noise(ngroup=ngroup, nf=nf, nd2=nd2, tf=tf,
fzodi=fzodi_pix, fsrc=image_ext, **kwargs)**2
im_sig = np.sqrt(im_var*npix_EE / nint)
# Total number of pixels within r=fwhm or 2.5 pixels
fsum2 = image_ext * npix_EE
snr2 = snr_fact * fsum2 / im_sig # SNR per "resolution element"ish
out2 = {'type':'Surface Brightness', 'snr':snr2, 'Spectrum':sp.name,
'flux':flux_val, 'flux_units':units+'/arcsec^2'}
if quiet == False:
for out in [out1,out2]:
print('{} SNR ({:.2f} {}): {:.2f} sigma'.\
format(out['type'], out['flux'], out['flux_units'], out['snr']))
else:
# Interpolate over a coarse magnitude grid to get SNR
# Then again over a finer grid
for ii in np.arange(2):
if ii==0: mag_arr = np.arange(5,35,1)
else: mag_arr = np.arange(mag_lim-1,mag_lim+1,0.05)
fact_arr = 10**((mag_arr-mag_norm)/2.5)
snr_arr = []
for f in fact_arr:
#im_var = image/f/tint + var_const
im_var = pix_noise(ngroup=ngroup, nf=nf, nd2=nd2, tf=tf,
fzodi=fzodi_pix, fsrc=image/f, **kwargs)**2
# root squared sum of noise within each radius
sums = binned_statistic(igroups, im_var, func=np.sum)
EE_var = np.cumsum(sums)
EE_sig = np.sqrt(EE_var / nint)
EE_snr = snr_fact * (EE_flux/f) / EE_sig
snr_rad = np.interp(rad_EE, rad_pix, EE_snr)
snr_arr.append(snr_rad)
snr_arr = np.asarray(snr_arr)
mag_lim = np.interp(nsig, snr_arr[::-1], mag_arr[::-1])
_log.debug('Mag Limits [{0:.2f},{1:.2f}]; {2:.0f}-sig: {3:.2f}'.\
format(mag_arr.min(),mag_arr.max(),nsig,mag_lim))
# Renormalize spectrum at given magnitude limit
sp_norm2 = sp.renorm(mag_lim, 'vegamag', bp)
# Determine effective stimulus
obs2 = S.Observation(sp_norm2, bp, binset=waveset)
bglim = obs2.effstim(units)
out1 = {'sensitivity':bglim, 'units':units, 'nsig':nsig, 'Spectrum':sp.name}
# Same thing as above, but for surface brightness
for ii in np.arange(2):
if ii==0: mag_arr = np.arange(5,35,1)
else: mag_arr = np.arange(mag_lim-1,mag_lim+1,0.05)
fact_arr = 10**((mag_arr-mag_norm)/2.5)
snr_arr = []
for f in fact_arr:
im_var = pix_noise(ngroup=ngroup, nf=nf, nd2=nd2, tf=tf,
fzodi=fzodi_pix, fsrc=image_ext/f, **kwargs)**2
im_sig = np.sqrt(im_var*npix_EE / nint)
fsum2 = image_ext * npix_EE / f
snr2 = snr_fact * fsum2 / im_sig
#print('{:.5f} {:.5f} {:.2f}'.format(fsum2,im_sig,snr2))
snr_arr.append(snr2)
snr_arr = np.asarray(snr_arr)
mag_lim = np.interp(nsig, snr_arr[::-1], mag_arr[::-1])
_log.debug('Mag Limits (mag/asec^2) [{0:.2f},{1:.2f}]; {2:.0f}-sig: {3:.2f}'.\
format(mag_arr.min(),mag_arr.max(),nsig,mag_lim))
# mag_lim is in terms of mag/arcsec^2 (same as mag_norm)
sp_norm2 = sp.renorm(mag_lim, 'vegamag', bp)
obs2 = S.Observation(sp_norm2, bp, binset=waveset)
bglim2 = obs2.effstim(units) # units/arcsec**2
out2 = out1.copy()
out2['sensitivity'] = bglim2
out2['units'] = units+'/arcsec^2'
if quiet == False:
print('{} Sensitivity ({}-sigma): {:.2f} {}'.\
format('Point Source', nsig, bglim, out1['units']))
print('{} Sensitivity ({}-sigma): {:.2f} {}'.\
format('Surface Brightness', nsig, bglim2, out2['units']))
return out1, out2
def sat_limit_webbpsf(filter_or_bp, pupil=None, mask=None, module='A', pix_scale=None,
sp=None, bp_lim=None, int_time=21.47354, full_well=81e3, well_frac=0.8,
coeff=None, coeff_hdr=None, fov_pix=11, oversample=4, quiet=True, units='vegamag',
offset_r=0, offset_theta=0, **kwargs):
"""Saturation limits
Estimate the saturation limit of a point source for some bandpass.
By default, it outputs the max K-Band magnitude assuming a G2V star,
following the convention on the UA NIRCam webpage. This can be useful if
one doesn't know how bright a source is in the selected NIRCam filter
bandpass. However any user-defined bandpass (or user-defined spectrum)
can be specifed. These must follow the Pysynphot conventions found here:
http://pysynphot.readthedocs.org/en/latest/using_pysynphot.html
This function returns the saturation limit in Vega magnitudes by default,
however, any flux unit supported by Pysynphot is possible via the 'units'
keyword.
Parameters
==========
Instrument Settings
-------------------
filter_or_bp : Either the name of the filter or pre-computed Pysynphot bandpass.
pupil : NIRCam pupil elements such as grisms or lyot stops
mask : Specify the coronagraphic occulter (spots or bar)
module : 'A' or 'B'
Spectrum Settings
-------------------
sp : A Pysynphot spectrum to calculate saturation (default: G2V star)
bp_lim : A Pysynphot bandpass at which we report the magnitude that will
saturate the NIRCam band assuming some spectrum sp
units : Output units for saturation limit
Detector Settings
-------------------
int_time : Integration time in seconds (default corresponds to 2 full frames)
full_well : Detector full well level in electrons.
well_frac : Fraction of full well to consider "saturated." 0.8 by default.
PSF Information
-------------------
coeff : A cube of polynomial coefficients for generating PSFs. This is
generally oversampled and has the shape:
[fov_pix*oversample, fov_pix*oversample, deg]
If not set, this this will be calculated from fov_pix, oversample,
and npsf by generating a number of webbPSF images within the bandpass
and fitting a high-order polynomial.
fov_pix : Number of detector pixels in the image coefficient and PSF.
oversample : Factor of oversampling of detector pixels.
offset_r : Radial offset of the target from center.
offset_theta : Position angle for that offset, in degrees CCW (+Y).
Keyword Args
-------------------
gen_psf_coeff - npsf and ndeg
read_filter - ND_acq
"""
# PSF coefficients
from pynrc.psfs import gen_image_coeff
# Get filter throughput and create bandpass
if isinstance(filter_or_bp, six.string_types):
filter = filter_or_bp
bp = read_filter(filter, pupil=pupil, mask=mask, module=module, **kwargs)
else:
bp = filter_or_bp
filter = bp.name
if bp_lim is None:
bp_lim = S.ObsBandpass('johnson,k')
bp_lim.name = 'K-Band'
# If not set, select some settings based on filter (SW or LW)
args = channel_select(bp)
if pix_scale is None:
pix_scale = args[0] # Pixel scale (arcsec/pixel)
# Spectrum and bandpass to report magnitude that saturates NIRCam band
if sp is None:
sp = stellar_spectrum('G2V')
# Just for good measure, make sure we're all in the same wave units
bp_lim.convert(bp.waveunits)
sp.convert(bp.waveunits)
# Renormalize to 10th magnitude star (Vega mags)
mag_norm = 10.0
sp_norm = sp.renorm(mag_norm, 'vegamag', bp_lim)
sp_norm.name = sp.name
# Set up an observation of the spectrum using the specified bandpass
# Use the bandpass wavelengths to bin the fluxes
obs = S.Observation(sp_norm, bp, binset=bp.wave)
# Convert observation to counts (e/sec)
obs.convert('counts')
# The number of pixels to span spatially
fov_pix = int(fov_pix)
oversample = int(oversample)
# Generate the PSF image for analysis
t0 = time.time()
result = gen_image_coeff(bp, pupil=pupil, mask=mask, module=module,
sp_norm=sp_norm, coeff=coeff, coeff_hdr=coeff_hdr,
fov_pix=fov_pix, oversample=oversample,
offset_r=offset_r, offset_theta=offset_theta, **kwargs)
t1 = time.time()
_log.debug('Took %.2f seconds to generate images' % (t1-t0))
# Total stellar flux and associated magnitude
star_flux = obs.countrate() # e/sec
mag_nrc = obs.effstim('vegamag')
_log.debug('Total Source Count Rate for {0} = {1:0.1f} mags: {2:.0f} e-/sec'.\
format(bp_lim.name, mag_norm, star_flux))
_log.debug('Magnitude in {0} band: {1:.2f}'.format(bp.name, mag_nrc))
# Saturation level (some fraction of full well) in electrons
sat_level = well_frac * full_well
# If grism spectroscopy
if (pupil is not None) and ('GRISM' in pupil):
wspec, spec = result
# Time to saturation for 10-mag source
sat_time = sat_level / spec
_log.debug('Approximate Time to {1:.2f} of Saturation: {0:.1f} sec'.\
format(sat_time.min(),well_frac))
# Magnitude necessary to saturate a given pixel
ratio = int_time/sat_time
ratio[ratio < __epsilon] = __epsilon
sat_mag = mag_norm + 2.5*np.log10(ratio)
# Wavelengths to grab saturation values
igood2 = bp.throughput > (bp.throughput.max()/4)
wgood2 = bp.wave[igood2] / 1e4
wsat_arr = np.unique((wgood2*10 + 0.5).astype('int')) / 10
wdel = wsat_arr[1] - wsat_arr[0]
msat_arr = []
for w in wsat_arr:
l1 = w-wdel/4
l2 = w+wdel/4
ind = ((wspec > l1) & (wspec <= l2))
msat = sat_mag[fov_pix//2-1:fov_pix//2+2,ind].max()
sp_temp = sp.renorm(msat, 'vegamag', bp_lim)
obs_temp = S.Observation(sp_temp, bp_lim, binset=bp_lim.wave)
msat_arr.append(obs_temp.effstim(units))
msat_arr = np.array(msat_arr)
# Print verbose information
if not quiet:
if bp_lim.name == bp.name:
print('{0} Saturation Limit assuming {1} source:'.\
format(bp_lim.name,sp.name))
else:
print('{0} Saturation Limit for {1} assuming {2} source:'.\
format(bp_lim.name,bp.name,sp.name))
names = ('Wave','Sat Limit ({})'.format(units))
tbl = Table([wsat_arr,msat_arr], names=names)
for k in tbl.keys():
tbl[k].format = '9.2f'
print(tbl)
# Return saturation list along with corresponding wavelengths to dictionary
return {'wave':wsat_arr.tolist(), 'satmag':msat_arr.tolist(),
'units':units, 'Spectrum':sp_norm.name, 'bp_lim':bp_lim.name}
# DHS spectroscopy
elif (pupil is not None) and ('DHS' in pupil):
raise NotImplementedError
# Imaging
else:
psf = result
# Time to saturation for 10-mag source
# Only need the maximum pixel value
sat_time = sat_level / psf.max()
_log.debug('Point source approximate Time to {1:.2f} of Saturation: {0:.2f} sec'.\
format(sat_time,well_frac))
# Magnitude necessary to saturate a given pixel
ratio = int_time/sat_time
sat_mag = mag_norm + 2.5*np.log10(ratio)
# Convert to desired unit
sp_temp = sp.renorm(sat_mag, 'vegamag', bp_lim)
obs_temp = S.Observation(sp_temp, bp_lim, binset=bp_lim.wave)
res1 = obs_temp.effstim(units)
out1 = {'satlim':res1, 'units':units, 'bp_lim':bp_lim.name, 'Spectrum':sp_norm.name}
# For surface brightness saturation (extended object)
# Assume the fiducial (sp_norm) to be in terms of mag/arcsec^2
# Multiply countrate() by pix_scale^2 to get in terms of per pixel (area)
# This is the count rate per pixel for the fiducial starting point
image_ext = obs.countrate() * pix_scale**2 # e-/sec/pixel
sat_time = sat_level / image_ext
_log.debug('Extended object approximate Time to {1:.2f} of Saturation: {0:.2f} sec'.\
format(sat_time,well_frac))
# Magnitude necessary to saturate a given pixel
ratio = int_time / sat_time
sat_mag_ext = mag_norm + 2.5*np.log10(ratio)
# Convert to desired unit
sp_temp = sp.renorm(sat_mag_ext, 'vegamag', bp_lim)
obs_temp = S.Observation(sp_temp, bp_lim, binset=bp_lim.wave)
res2 = obs_temp.effstim(units)
out2 = out1.copy()
out2['satlim'] = res2
out2['units'] = units+'/arcsec^2'
# Print verbose information
if not quiet:
if bp_lim.name == bp.name:
print('{} Saturation Limit assuming {} source (point source): {:.2f} {}'.\
format(bp_lim.name, sp_norm.name, out1['satlim'], out1['units']) )
print('{} Saturation Limit assuming {} source (extended): {:.2f} {}'.\
format(bp_lim.name, sp_norm.name, out2['satlim'], out2['units']) )
else:
print('{} Saturation Limit for {} assuming {} source (point source): {:.2f} {}'.\
format(bp_lim.name, bp.name, sp_norm.name, out1['satlim'], out1['units']) )
print('{} Saturation Limit for {} assuming {} source (extended): {:.2f} {}'.\
format(bp_lim.name, bp.name, sp_norm.name, out2['satlim'], out2['units']) )
return out1, out2
def var_ex_model(ng, nf, params):
return 12. * (ng - 1.)/(ng + 1.) * params[0]**2 - params[1] / nf**0.5
def pix_noise(ngroup=2, nf=1, nd2=0, tf=10.73677, rn=15.0, ktc=29.0, p_excess=(0,0),
fsrc=0.0, idark=0.003, fzodi=0, fbg=0, ideal_Poisson=False,
ff_noise=False, **kwargs):
"""Noise per pixel
Theoretical noise calculation of a generalized MULTIACCUM ramp in terms of e-/sec.
Includes flat field errors from JWST-CALC-003894.
Parameters
----------
n : int
Number of groups in integration rampl
m : int
Number of frames in each groupl
s : int
Number of dropped frames in each groupl
tf : float
Frame time
rn : float
Read Noise per pixel (e-).
ktc : float
kTC noise (in e-). Only valid for single frame (n=1)l
p_excess : array-like
An array or list of two elements that holds the parameters
describing the excess variance observed in effective noise plots.
By default these are both 0. For NIRCam detectors, recommended
values are [1.0,5.0] for SW and [1.5,10.0] for LW.
fsrc : float
Flux of source in e-/sec/pix.
idark : float
Dark current in e-/sec/pix.
fzodi : float
Zodiacal light emission in e-/sec/pix.
fbg : float
Any additional background (telescope emission or scattered light?)
ideal_Poisson : bool
If set to True, use total signal for noise estimate,
otherwise MULTIACCUM equation is used.
ff_noise : bool
Include flat field errors in calculation? From JWST-CALC-003894.
Default=False.
Notes
-----
Various parameters can either be single values or numpy arrays.
If multiple inputs are arrays, make sure their array sizes match.
Variables that need to have the same array shapes (or a single value):
- n, m, s, & tf
- rn, idark, ktc, fsrc, fzodi, & fbg
Array broadcasting also works.
Example
-------
>>> n = np.arange(50)+1 # An array of different ngroups to test out
>>> # Create 2D Gaussian PSF with FWHM = 3 pix
>>> npix = 20 # Number of pixels in x and y direction
>>> fwhm = 3.0
>>> x = np.arange(0, npix, 1, dtype=float)
>>> y = x[:,np.newaxis]
>>> x0 = y0 = npix // 2 # Center position
>>> fsrc = np.exp(-4*np.log(2.) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
>>> fsrc /= fsrc.max()
>>> fsrc *= 10 # 10 counts/sec in peak pixel
>>> fsrc = fsrc.reshape(npix,npix,1) # Necessary for broadcasting
>>> # Represents pixel array w/ slightly different RN/pix
>>> rn = 15 + np.random.normal(loc=0, scale=0.5, size=(1,npix,npix))
>>> # Results is a 50x(20x20) showing the noise in e-/sec/pix at each group
>>> noise = pix_noise(ngroup=n, rn=rn, fsrc=fsrc)
"""
# Convert everything to arrays
n = np.array(ngroup)
m = np.array(nf)
s = np.array(nd2)
tf = np.array(tf)
# Total flux (e-/sec/pix)
ftot = fsrc + idark + fzodi + fbg
# Special case if n=1
# To be inserted at the end
if (n==1).any():
# Variance after averaging m frames
var = ktc**2 + (rn**2 + ftot*tf) / m
noise = np.sqrt(var)
noise /= tf # In terms of e-/sec
if (n==1).all(): return noise
noise_n1 = noise
ind_n1 = (n==1)
temp = np.array(rn+ktc+ftot)
temp_bool = np.zeros(temp.shape, dtype=bool)
ind_n1_all = (temp_bool | ind_n1)
# Group time
tg = tf * (m + s)
# Effective integration time
tint = tg * (n - 1)
# Read noise, group time, and frame time variances
# This is the MULTIACCUM eq from Rauscher et al. (2007).
# This equation assumes that the slope-fitting routine uses
# incorrect covariance matrix that doesn't take into account
# the correlated Poisson noise up the ramp.
var_rn = rn**2 * 12. * (n - 1.) / (m * n * (n + 1.))
var_gp = ftot * tint * 6. * (n**2. + 1.) / (5 * n * (n + 1.))
var_fm = ftot * tf * 2. * (m**2. - 1.) * (n - 1.) / (m * n * (n + 1.))
# Functional form for excess variance above theoretical
# Empirically measured formulation
# var_ex = 12. * (n - 1.)/(n + 1.) * p_excess[0]**2 - p_excess[1] / m**0.5
var_ex = var_ex_model(n, m, p_excess)
# Variance of total signal
var_poisson = (ftot * tint) if ideal_Poisson else (var_gp - var_fm)
# Total variance
var = var_rn + var_poisson + var_ex
sig = np.sqrt(var)
# Noise in e-/sec
noise = sig / tint
# Make sure to copy over ngroup=1 cases
if (n==1).any():
noise[ind_n1_all] = noise_n1[ind_n1_all]
#print(ind_n1_all.shape,noise.shape,noise_n1.shape)
# Include flat field noise
# JWST-CALC-003894
if ff_noise:
noise_ff = 1E-4 # Uncertainty in the flat field
factor = 1 + noise_ff*np.sqrt(ftot)
noise *= factor
return noise
def radial_std(im_diff, pixscale=None, oversample=None, supersample=False, func=np.std):
"""Generate contrast curve of PSF difference
Find the standard deviation within fixed radial bins of a differenced image.
Returns two arrays representing the 1-sigma contrast curve at given distances.
Parameters
==========
im_diff : ndarray
Differenced image of two PSFs, for instance.
Keywords
========
pixscale : float
Pixel scale of the input image
oversample : int
Is the input image oversampled compared to detector? If set, then
the binsize will be pixscale*oversample (if supersample=False).
supersample : bool
If set, then oversampled data will have a binsize of pixscale,
otherwise the binsize is pixscale*oversample.
func_std : func
The function to use for calculating the radial standard deviation.
"""
from astropy.convolution import convolve, Gaussian1DKernel
# Set oversample to 1 if supersample keyword is set
oversample = 1 if supersample or (oversample is None) else oversample
# Rebin data
data_rebin = frebin(im_diff, scale=1/oversample)
# Determine pixel scale of rebinned data
pixscale = 1 if pixscale is None else oversample*pixscale
# Pixel distances
rho = dist_image(data_rebin, pixscale=pixscale)
# Get radial profiles
binsize = pixscale
bins = np.arange(rho.min(), rho.max() + binsize, binsize)
nan_mask = np.isnan(data_rebin)
igroups, _, rr = hist_indices(rho[~nan_mask], bins, True)
stds = binned_statistic(igroups, data_rebin[~nan_mask], func=func)
stds = convolve(stds, Gaussian1DKernel(1))
# Ignore corner regions
arr_size = np.min(data_rebin.shape) * pixscale
mask = rr < (arr_size/2)
return rr[mask], stds[mask]
###########################################################################
#
# Pysynphot Spectrum Wrappers
#
###########################################################################
def bp_2mass(filter):
"""2MASS Bandpass
Create a 2MASS J, H, or Ks filter bandpass used to generate
synthetic photometry.
Parameters
----------
filter : str
Filter 'j', 'h', or 'k'.
Returns
-------
:mod:`pysynphot.obsbandpass`
A Pysynphot bandpass object.
"""
dir = conf.PYNRC_PATH + 'throughputs/2MASS/'
if 'j' in filter.lower():
file = '2mass_j.txt'
name = 'J-Band'
elif 'h' in filter.lower():
file = '2mass_h.txt'
name = 'H-Band'
elif 'k' in filter.lower():
file = '2mass_ks.txt'
name = 'Ks-Band'
else:
raise ValueError('{} not a valid 2MASS filter'.format(filter))
tbl = ascii.read(dir + file, names=['Wave', 'Throughput'])
bp = S.ArrayBandpass(tbl['Wave']*1e4, tbl['Throughput'], name=name)
return bp
def bp_wise(filter):
"""WISE Bandpass
Create a WISE W1-W4 filter bandpass used to generate
synthetic photometry.
Parameters
----------
filter : str
Filter 'w1', 'w2', 'w3', or 'w4'.
Returns
-------
:mod:`pysynphot.obsbandpass`
A Pysynphot bandpass object.
"""
dir = conf.PYNRC_PATH + 'throughputs/WISE/'
if 'w1' in filter.lower():
file = 'RSR-W1.txt'
name = 'W1'
elif 'w2' in filter.lower():
file = 'RSR-W2.txt'
name = 'W2'
elif 'w3' in filter.lower():
file = 'RSR-W3.txt'
name = 'W3'
elif 'w4' in filter.lower():
file = 'RSR-W4.txt'
name = 'W4'
else:
raise ValueError('{} not a valid WISE filter'.format(filter))
tbl = ascii.read(dir + file, data_start=0)
bp = S.ArrayBandpass(tbl['col1']*1e4, tbl['col2'], name=name)
return bp
def bin_spectrum(sp, wave, waveunits='um'):
"""Rebin spectrum
Rebin a :mod:`pysynphot.spectrum` to a different wavelength grid.
This function first converts the input spectrum to units
of counts then combines the photon flux onto the
specified wavelength grid.
Output spectrum units are the same as the input spectrum.
Parameters
-----------
sp : :mod:`pysynphot.spectrum`
Spectrum to rebin.
wave : array_like
Wavelength grid to rebin onto.
waveunits : str
Units of wave input. Must be recognizeable by Pysynphot.
Returns
-------
:mod:`pysynphot.spectrum`
Rebinned spectrum in same units as input spectrum.
"""
waveunits0 = sp.waveunits
fluxunits0 = sp.fluxunits
# Convert wavelength of input spectrum to desired output units
sp.convert(waveunits)
# We also want input to be in terms of counts to conserve flux
sp.convert('flam')
edges = S.binning.calculate_bin_edges(wave)
ind = (sp.wave >= edges[0]) & (sp.wave <= edges[-1])
binflux = binned_statistic(sp.wave[ind], sp.flux[ind], np.mean, bins=edges)
# Interpolate over NaNs
ind_nan = np.isnan(binflux)
finterp = interp1d(wave[~ind_nan], binflux[~ind_nan], kind='cubic')
binflux[ind_nan] = finterp(wave[ind_nan])
sp2 = S.ArraySpectrum(wave, binflux, waveunits=waveunits, fluxunits='flam')
sp2.convert(waveunits0)
sp2.convert(fluxunits0)
# Put back units of original input spectrum
sp.convert(waveunits0)
sp.convert(fluxunits0)
return sp2
def zodi_spec(zfact=None, ra=None, dec=None, thisday=None, **kwargs):
"""Zodiacal light spectrum.
New: Use `ra`, `dec`, and `thisday` keywords to call `jwst_backgrounds`
to obtain more accurate predictions of the background.
Creates a spectrum of the zodiacal light emission in order to estimate the
in-band sky background flux. This is primarily the addition of two blackbodies
at T=5300K (solar scattered light) and T=282K (thermal dust emission)
that have been scaled to match literature flux values.
In reality, the intensity of the zodiacal dust emission varies as a
function of viewing position. In this case, we have added the option
to scale the zodiacal level (or each component individually) by some
user-defined factor 'zfact'. The user can set zfact as a scalar in order
to scale the entire spectrum. If defined as a list, tuple, or np array,
then the each component gets scaled where T=5300K corresponds to the first
elements and T=282K is the second element of the array.
The `zfact` parameter has no effect if `jwst_backgrounds` is called.
Representative values for zfact:
* 0.0 - No zodiacal emission
* 1.0 - Minimum zodiacal emission from JWST-CALC-003894
* 1.2 - Required NIRCam performance
* 2.5 - Average (default)
* 5.0 - High
* 10.0 - Maximum
Parameters
----------
zfact : float
Factor to scale Zodiacal spectrum (default 2.5).
ra : float
Right ascension in decimal degrees
dec : float
Declination in decimal degrees
thisday: int
Calendar day to use for background calculation. If not given, will
use the average of visible calendar days.
Returns
-------
:mod:`pysynphot.spectrum`
Output is a Pysynphot spectrum with default units of flam (erg/s/cm^2/A/sr).
Note: Pysynphot doesn't recognize that it's per steradian, but we must keep
that in mind when integrating the flux per pixel.
Notes
-----
Added the ability to query the Euclid background model using
:func:`zodi_euclid` for a specific location and observing time.
The two blackbodies will be scaled to the 1.0 and 5.5 um emission.
This functionality is deprecated in favor of jwst_backgrounds.
Keyword Args
------------
locstr :
Object name or RA/DEC (decimal degrees or sexigesimal).
Queries the `IPAC Euclid Background Model
<http://irsa.ipac.caltech.edu/applications/BackgroundModel/>`_
year : int
Year of observation.
day : float
Day of observation.
"""
if (ra is not None) and (dec is not None):
if _jbt_exists == False:
_log.warning("`jwst_backgrounds` not installed. `ra`, `dec`, and `thisday` parameters will not work.")
else:
# Wavelength for "bathtub plot" (not used here)
wave_bath = 2.5
bkg = jbt.background(ra, dec, wave_bath)
# Get wavelength and flux values
wvals = bkg.bkg_data['wave_array'] # Wavelength (um)
farr = bkg.bkg_data['total_bg'] # Total background (MJy/sr)
if thisday is None:
# Use average of visible calendar days
ftot = farr.mean(axis=0)
else:
calendar = bkg.bkg_data['calendar']
if thisday in calendar:
ind = np.where(calendar==thisday)[0][0]
ftot = farr[ind]
else:
_log.warning("The input calendar day {}".format(thisday)+" is not available. \
Choosing closest visible day.")
diff = np.abs(calendar-thisday)
ind = np.argmin(diff)
ftot = farr[ind]
sp = S.ArraySpectrum(wave=wvals*1e4, flux=ftot*1e6, fluxunits='Jy')
sp.convert('flam')
sp.name = 'Total Background'
return sp
if zfact is None:
zfact = 2.5
#_log.debug('zfact:{0:.1f}'.format(zfact))
if isinstance(zfact, (list, tuple, np.ndarray)):
f1, f2 = zfact
else:
f1 = f2 = zfact
# These values have been scaled to match JWST-CALC-003894 values
# in order to work with Pysynphot's blackbody function.
# Pysynphot's BB function is normalized to 1Rsun at 1kpc by default.
f1 *= 4.0e7
f2 *= 2.0e13
bb1 = f1 * S.BlackBody(5300.0)
bb2 = f2 * S.BlackBody(282.0)
# Query Euclid Background Model
locstr = kwargs.get('locstr')
year = kwargs.get('year')
day = kwargs.get('day')
if (locstr is not None) and (year is not None) and (day is not None):
# Wavelengths in um and values in MJy
waves = np.array([1.0,5.5])
vals = zodi_euclid(locstr, year, day, waves, **kwargs)
bb1.convert('Jy')
bb2.convert('Jy')
# MJy at wavelength locations
f_bb1 = bb1.sample(waves*1e4) / 1e6
f_bb2 = bb2.sample(waves*1e4) / 1e6
bb1 *= (vals[0]-f_bb2[0])/f_bb1[0]
bb2 *= (vals[1]-f_bb1[1])/f_bb2[1]
sp_zodi = bb1 + bb2
sp_zodi.convert('flam')
sp_zodi.name = 'Zodiacal Light'
return sp_zodi
def zodi_euclid(locstr, year, day, wavelengths=[1,5.5], ido_viewin=0, **kwargs):
"""IPAC Euclid Background Model
Queries the `IPAC Euclid Background Model
<http://irsa.ipac.caltech.edu/applications/BackgroundModel/>`_
in order to get date and position-specific zodiacal dust emission.
The program relies on ``urllib3`` to download the page in XML format.
However, the website only allows single wavelength queries, so
this program implements a multithreaded procedure to query
multiple wavelengths simultaneously. However, due to the nature
of the library, only so many requests are allowed to go out at a time,
so this process can take some time to complete.
Testing shows about 500 wavelengths in 10 seconds as a rough ballpark.
Recommended to grab only a few wavelengths for normalization purposes.
Parameters
----------
locstr : str
This input field must contain either coordinates (as string),
or an object name resolveable via NED or SIMBAD.
year: string
Year. Limited to 2018 to 2029 for L2 position.
day : string
Day of year (1-366). Limited to 2018 Day 274 to 2029 Day 120
for L2 position and ido_viewin=0.
wavelength : array-like
Wavelength in microns (0.5-1000).
ido_viewin : 0 or 1
If set to 0, returns zodiacal emission at specific location for input time.
If set to 1, then gives the median value for times of the year that the object
is in a typical spacecraft viewing zone. Currently this is set to solar
elongations between 85 and 120 degrees.
References
----------
See the `Euclid Help Website
<http://irsa.ipac.caltech.edu/applications/BackgroundModel/docs/dustProgramInterface.html>`_
for more details.
"""
# from urllib2 import urlopen
import urllib3
import xmltodict
from multiprocessing.pool import ThreadPool
def fetch_url(url):
"""
TODO: Add error handling.
"""
# response = urlopen(url)
# response = response.read()
http = urllib3.PoolManager()
response = http.request('GET', url)
d = xmltodict.parse(response.data, xml_attribs=True)
fl_str = d['results']['result']['statistics']['zody']
return float(fl_str.split(' ')[0])
#locstr="17:26:44 -73:19:56"
#locstr = locstr.replace(' ', '+')
#year=2019
#day=1
#obslocin=0
#ido_viewin=1
#wavelengths=None
req_list = []
for w in wavelengths:
url = 'http://irsa.ipac.caltech.edu/cgi-bin/BackgroundModel/nph-bgmodel?'
req = "{}&locstr={}&wavelength={:.2f}&year={}&day={}&obslocin=0&ido_viewin={}"\
.format(url, locstr, w, year, day, ido_viewin)
req_list.append(req)
nthread = np.min([50,len(wavelengths)])
pool = ThreadPool(nthread)
results = pool.imap(fetch_url, req_list)
res = []
for r in results: res.append(r)
pool.close()
return np.array(res)
# def _zodi_spec_old(level=2):
# """
# Create a spectrum of the zodiacal light emission in order to estimate the
# in-band sky background flux. This is simply the addition of two blackbodies
# at T=5800K (solar scattered light) and T=300K (thermal dust emission)
# that have been scaled to match the literature flux values.
#
# In reality, the intensity of the zodiacal dust emission varies as a
# function of viewing position. In this case, we have added different levels
# intensity similiar to the results given by old NIRCam ETC. These have not
# been validated in any way and should be used with caution, but at least
# give an order of magnitude of the zodiacal light background flux.
#
# There are four different levels that can be passed through the level
# parameter: 0=None, 1=Low, 2=Avg, 3=High
#
# For instance set sp_zodi = zodi_spec(3) for a highish sky flux.
# Default is 2
# """
#
# bb1 = S.BlackBody(5800.); bb2 = S.BlackBody(300.)
# sp_zodi = (1.7e7*bb1 + 2.3e13*bb2) * 3.73
# sp_zodi.convert('flam')
#
# # This is how some case statements are done in Python
# # Select the level of zodiacal light emission
# # 0=None, 1=Low, 2=Avg, 3=High
# switcher = {0:0.0, 1:0.5, 2:1.0, 3:1.8}
# factor = switcher.get(level, None)
#
# if factor is None:
# _log.warning('The input parameter level=%s is not valid. Setting zodiacal light to 0.' % level)
# _log.warning('Valid values inlclude: %s' % switcher.keys())
# factor = 0
#
# sp_zodi *= factor
# sp_zodi.name = 'Zodiacal Light'
#
# return sp_zodi
def grism_background_image(filter, pupil='GRISM0', module='A', sp_bg=None,
include_com=True, **kwargs):
"""Create full grism background image"""
# Option for GRISMR/GRISMC
if 'GRISMR' in pupil:
pupil = 'GRISM0'
elif 'GRISMC' in pupil:
pupil = 'GRISM90'
upper = 9.6 if include_com else 31.2
g_bg = grism_background(filter, pupil, module, sp_bg, upper=upper, **kwargs)
final_image = np.zeros([2048,2048])
if 'GRISM0' in pupil:
final_image = final_image + g_bg.reshape([1,-1])
else:
final_image = final_image + g_bg.reshape([-1,1])
# Add COM background
if include_com:
final_image += grism_background_com(filter, pupil, module, sp_bg, **kwargs)
return final_image
def grism_background(filter, pupil='GRISM0', module='A', sp_bg=None,
orders=[1,2], wref=None, upper=9.6, **kwargs):
"""
Returns a 1D array of grism Zodiacal/thermal background
emission model, including roll-off from pick-off mirror (POM)
edges. By default, this includes light dispersed by the
1st and 2nd grism orders (m=1 and m=2).
For column dipsersion, we ignore the upper region occupied by
the coronagraphic mask region by default. The preferred way to
include this region is to add the dispersed COM image from the
`grism_background_com` function to create the full 2048x2048
image. Or, more simply (but less accurate) is to set an `upper`
value of 31.2, which is the approximately distance (in arcsec)
from the top of the detector to the top of the coronagraphic
field of view.
Parameters
==========
filter : str
Name of filter (Long Wave only).
pupil : str
Either 'GRISM0' ('GRISMR') or 'GRISM90' ('GRISMC').
module : str
NIRCam 'A' or 'B' module.
sp_bg : :mod:`pysynphot.spectrum`
Spectrum of Zodiacal background emission, which gets
multiplied by bandpass throughput to determine final
wavelength-dependent flux that is then dispersed.
orders : array-like
What spectral orders to include? Valid orders are 1 and 2.
wref : float or None
Option to set the undeviated wavelength, otherwise this will
search a lookup table depending on the grism.
upper : float
Set the maximum bounds for out-of-field flux to be dispersed
onto the detector. By default, this value is 9.6", corresponding
to the bottom of the coronagraphic mask. Use `grism_background_com`
to then include image of dispersed COM mask.
If you want something simpler, increase this value to 31.2" to
assume the coronagraphic FoV is free of any holder blockages or
substrate and occulting masks.
Keyword Args
============
zfact : float
Factor to scale Zodiacal spectrum (default 2.5).
ra : float
Right ascension in decimal degrees
dec : float
Declination in decimal degrees
thisday: int
Calendar day to use for background calculation. If not given, will
use the average of visible calendar days.
"""
# Option for GRISMR/GRISMC
if 'GRISMR' in pupil:
pupil = 'GRISM0'
elif 'GRISMC' in pupil:
pupil = 'GRISM90'
# Pixel scale
pix_scale, _, _ = channel_select(read_filter(filter))
# Undeviated wavelength
if wref is None:
wref = grism_wref(pupil, module)
# Background spectrum
if sp_bg is None:
sp_bg = zodi_spec(**kwargs)
# Total number of "virtual" pixels spanned by pick-off mirror
border = np.array([8.4, 8.0]) if ('GRISM0' in pupil) else np.array([12.6, upper])
extra_pix = (border / pix_scale + 0.5).astype('int')
extra_pix[extra_pix<=0] = 1 # Ensure there's at least 1 extra pixel
npix_tot = 2048 + extra_pix.sum()
flux_all = np.zeros(npix_tot)
for grism_order in orders:
# Get filter throughput and create bandpass
bp = read_filter(filter, pupil=pupil, module=module,
grism_order=grism_order, **kwargs)
# Get wavelength dispersion solution
res, dw = grism_res(pupil, module, grism_order) # Resolution and dispersion
# Observation spectrum converted to count rate
obs_bg = S.Observation(sp_bg, bp, bp.wave)
obs_bg.convert('counts')
# Total background flux per pixel (not dispersed)
area_scale = (pix_scale/206265.0)**2
fbg_tot = obs_bg.countrate() * area_scale
# Total counts/sec within each wavelength bin
binwave = obs_bg.binwave/1e4
binflux = obs_bg.binflux*area_scale
# Interpolation function
fint = interp1d(binwave, binflux, kind='cubic')
# Wavelengths at each pixel to interpolate
wave_vals = np.arange(binwave.min(), binwave.max(), dw)
# Get flux values and preserve total flux
flux_vals = fint(wave_vals)
flux_vals = fbg_tot * flux_vals / flux_vals.sum()
# # Wavelengths at each pixel to interpolate
# wave_vals = np.arange(bp.wave.min()/1e4, bp.wave.max()/1e4, dw)
# # Rebin onto desired wavelength grid
# sp_new = bin_spectrum(sp_bg, wave_vals, waveunits='um')
# obs_bg = S.Observation(sp_new, bp, binset=sp_new.wave)
# # Get flux values per pixel
# obs_bg.convert('counts')
# flux_vals = obs_bg.binflux * (pix_scale/206265.0)**2
# Index of reference wavelength
iref = int((wref - wave_vals[0]) / (wave_vals[1] - wave_vals[0]))
# Determine the array indices that contribute for each pixel
# Use indexing rather than array shifting for speed
# This depends on the size of the POM relative to detector
offset = -1*int(wref*res/2 + 0.5) if grism_order==2 else 0
i1_arr = np.arange(iref,iref-npix_tot,-1)[::-1] + offset
i2_arr = np.arange(iref,iref+npix_tot,+1) + offset
i1_arr[i1_arr<0] = 0
i1_arr[i1_arr>len(wave_vals)] = len(wave_vals)
i2_arr[i2_arr<0] = 0
i2_arr[i2_arr>len(wave_vals)] = len(wave_vals)
flux_all += np.array([flux_vals[i1:i2].sum() for i1,i2 in zip(i1_arr,i2_arr)])
# Crop only detector pixels
flux_all = flux_all[extra_pix[0]:-extra_pix[1]]
# Module B GRISM0/R disperses in opposite direction ('sci' coords)
if ('GRISM0' in pupil) and (module=='B'):
flux_all = flux_all[::-1]
# Return single
return flux_all
def grism_background_com(filter, pupil='GRISM90', module='A', sp_bg=None,
wref=None, **kwargs):
# Option for GRISMR/GRISMC
if 'GRISMR' in pupil:
pupil = 'GRISM0'
elif 'GRISMC' in pupil:
pupil = 'GRISM90'
if 'GRISM0' in pupil:
_log.info('COM feature not present for row grisms.')
return 0
# Only see COM for 1st order
# Minimum wavelength is 2.4um, which means 2nd order is 2400 pixels away.
grism_order = 1
# Get filter throughput and create bandpass
bp = read_filter(filter, pupil=pupil, module=module, grism_order=grism_order,
coron_substrate=True, **kwargs)
# Pixel scale
pix_scale, _, _ = channel_select(read_filter(filter))
# Get wavelength dispersion solution
res, dw = grism_res(pupil, module, grism_order)
# Undeviated wavelength
if wref is None:
wref = grism_wref(pupil, module)
# Background spectrum
if sp_bg is None:
sp_bg = zodi_spec(**kwargs)
# Coronagraphic mask image
im_com = build_mask_detid(module+'5')
# Crop to mask holder
# Remove anything that is 0 or max
im_collapse = im_com.sum(axis=1)
ind_cut = (im_collapse == im_collapse.max()) | (im_collapse == 0)
im_com = im_com[~ind_cut]
ny_com, nx_com = im_com.shape
# Observation spectrum converted to count rate
obs_bg = S.Observation(sp_bg, bp, bp.wave)
obs_bg.convert('counts')
# Total background flux per pixel (not dispersed)
area_scale = (pix_scale/206265.0)**2
fbg_tot = obs_bg.countrate() * area_scale
# Total counts/sec within each wavelength bin
binwave = obs_bg.binwave/1e4
binflux = obs_bg.binflux*area_scale
# Interpolation function
fint = interp1d(binwave, binflux, kind='cubic')
# Wavelengths at each pixel to interpolate
wave_vals = np.arange(binwave.min(), binwave.max(), dw)
# Get flux values and preserve total flux
flux_vals = fint(wave_vals)
flux_vals = fbg_tot * flux_vals / flux_vals.sum()
# Index of reference wavelength in spectrum
iref = int((wref - wave_vals[0]) / (wave_vals[1] - wave_vals[0]))
# Pixel position of COM image lower and upper bounds
upper = 9.6
ipix_ref = 2048 + int(upper/pix_scale + 0.5)
ipix_lower = ipix_ref - iref
ipix_upper = ipix_lower + ny_com + len(flux_vals)
# print('COM', ipix_lower, ipix_upper)
# Only include if pixel positions overlap detector frame
if (ipix_upper>0) and (ipix_lower<2048):
# Shift and add images
im_shift = np.zeros([ny_com+len(flux_vals), nx_com])
# print(len(flux_vals))
for i, f in enumerate(flux_vals):
im_shift[i:i+ny_com,:] += im_com*f
# Position at appropriate location within detector frame
# First, either pad the lower, or crop to set bottom of detector
if ipix_lower>=0 and ipix_lower<2048:
im_shift = np.pad(im_shift, ((ipix_lower,0),(0,0)))
elif ipix_lower<0:
im_shift = im_shift[-ipix_lower:,:]
# Expand or contract to final full detector size
if im_shift.shape[0]<2048:
im_shift = np.pad(im_shift, ((0,2048-im_shift.shape[0]),(0,0)))
else:
im_shift = im_shift[0:2048,:]
res = im_shift
else:
res = 0
return res
def BOSZ_spectrum(Teff, metallicity, log_g, res=2000, interpolate=True, **kwargs):
"""BOSZ stellar atmospheres (Bohlin et al 2017).
Read in a spectrum from the BOSZ stellar atmosphere models database.
Returns a Pysynphot spectral object. Wavelength values range between
1000-32000 Angstroms. Teff range from 3500K to 36000K.
This function interoplates the model grid by reading in those models
closest in temperature, metallicity, and log g to the desired parameters,
then takes the weighted average of these models based on their relative
offsets. Can also just read in the closest model by setting interpolate=False.
Different spectral resolutions can also be specified, currently only
res=200 or 2000 or 20000.
Parameters
----------
Teff : float
Effective temperature ranging from 3500K to 30000K.
metallicity : float
Metallicity [Fe/H] value ranging from -2.5 to 0.5.
log_g : float
Surface gravity (log g) from 0 to 5.
Keyword Args
------------
res : str
Spectral resolution to use (200 or 2000 or 20000).
interpolate : bool
Interpolate spectrum using a weighted average of grid points
surrounding the desired input parameters.
References
----------
https://archive.stsci.edu/prepds/bosz/
"""
model_dir = conf.PYNRC_PATH + 'bosz_grids/'
res_dir = model_dir + 'R{}/'.format(res)
if not os.path.isdir(model_dir):
raise IOError('BOSZ model directory does not exist: {}'.format(model_dir))
if not os.path.isdir(res_dir):
raise IOError('Resolution directory does not exist: {}'.format(res_dir))
# Grid of computed temperature steps
teff_grid = list(range(3500,12000,250)) \
+ list(range(12000,20000,500)) \
+ list(range(20000,36000,1000))
teff_grid = np.array(teff_grid)
# Grid of log g steps for desired Teff
lg_max = 5
lg_step = 0.5
if Teff < 6250: lg_min = 0
elif Teff < 8250: lg_min = 1
elif Teff < 12500: lg_min = 2
elif Teff < 21000: lg_min = 3
elif Teff <= 30000: lg_min = 4
else: raise ValueError('Teff must be less than or equal to 30000.')
if log_g<lg_min:
raise ValueError('log_g must be >={}'.format(lg_min))
if log_g>lg_max:
raise ValueError('log_g must be <={}'.format(lg_max))
# Grid of log g values
logg_grid = np.arange(lg_min, lg_max+lg_step, lg_step)
# Grid of metallicity values
metal_grid = np.arange(-2.5,0.75,0.25)
# First, choose the two grid points closest in Teff
teff_diff = np.abs(teff_grid - Teff)
ind_sort = np.argsort(teff_diff)
if teff_diff[ind_sort[0]]==0: # Exact
teff_best = np.array([teff_grid[ind_sort[0]]])
else: # Want to interpolate
teff_best = teff_grid[ind_sort[0:2]]
# Choose the two best log g values
logg_diff = np.abs(logg_grid - log_g)
ind_sort = np.argsort(logg_diff)
if logg_diff[ind_sort[0]]==0: # Exact
logg_best = np.array([logg_grid[ind_sort[0]]])
else: # Want to interpolate
logg_best = logg_grid[ind_sort[0:2]]
# Choose the two best metallicity values
metal_diff = np.abs(metal_grid - metallicity)
ind_sort = np.argsort(metal_diff)
if metal_diff[ind_sort[0]]==0: # Exact
metal_best = np.array([metal_grid[ind_sort[0]]])
else: # Want to interpolate
metal_best = metal_grid[ind_sort[0:2]]
# Build files names for all combinations
teff_names = np.array(['t{:04.0f}'.format(n) for n in teff_best])
logg_names = np.array(['g{:02.0f}'.format(int(n*10)) for n in logg_best])
metal_names = np.array(['mp{:02.0f}'.format(int(abs(n*10)+0.5)) for n in metal_best])
ind_n = np.where(metal_best<0)[0]
for i in range(len(ind_n)):
j = ind_n[i]
s = metal_names[j]
metal_names[j] = s.replace('p', 'm')
# Build final file names
fnames = []
rstr = 'b{}'.format(res)
for t in teff_names:
for l in logg_names:
for m in metal_names:
fname = 'a{}cp00op00{}{}v20modrt0{}rs.fits'.format(m,t,l,rstr)
fnames.append(fname)
# Weight by relative distance from desired value
weights = []
teff_diff = np.abs(teff_best - Teff)
logg_diff = np.abs(logg_best - log_g)
metal_diff = np.abs(metal_best - metallicity)
for t in teff_diff:
wt = 1 if len(teff_diff)==1 else t / np.sum(teff_diff)
for l in logg_diff:
wl = 1 if len(logg_diff)==1 else l / np.sum(logg_diff)
for m in metal_diff:
wm = 1 if len(metal_diff)==1 else m / np.sum(metal_diff)
weights.append(wt*wl*wm)
weights = np.array(weights)
weights = weights / np.sum(weights)
if interpolate:
wave_all = []
flux_all = []
for i, f in enumerate(fnames):
d = fits.getdata(res_dir+f, 1)
wave_all.append(d['Wavelength'])
flux_all.append(d['SpecificIntensity'] * weights[i])
wfin = wave_all[0]
ffin = np.pi * np.array(flux_all).sum(axis=0) # erg/s/cm^2/A
else:
ind = np.where(weights==weights.max())[0][0]
f = fnames[ind]
d = fits.getdata(res_dir+f, 1)
wfin = d['Wavelength']
ffin = np.pi * d['SpecificIntensity'] # erg/s/cm^2/A
Teff = teff_best[ind]
log_g = logg_best[ind]
metallicity = metal_best[ind]
name = 'BOSZ(Teff={},z={},logG={})'.format(Teff,metallicity,log_g)
sp = S.ArraySpectrum(wfin[:-1], ffin[:-1], 'angstrom', 'flam', name=name)
return sp
def stellar_spectrum(sptype, *renorm_args, **kwargs):
"""Stellar spectrum
Similar to specFromSpectralType() in WebbPSF/Poppy, this function uses
a dictionary of fiducial values to determine an appropriate spectral model.
If the input spectral type is not found, this function interpolates the
effective temperature, metallicity, and log g values .
You can also specify renormalization arguments to pass to ``sp.renorm()``.
The order (after ``sptype``) should be (``value, units, bandpass``):
>>> sp = stellar_spectrum('G2V', 10, 'vegamag', bp)
Flat spectrum (in photlam) are also allowed via the 'flat' string.
Use ``catname='bosz'`` for BOSZ stellar atmosphere (ATLAS9) (default)
Use ``catname='ck04models'`` keyword for ck04 models
Use ``catname='phoenix'`` keyword for Phoenix models
Keywords exist to directly specify Teff, metallicity, an log_g rather
than a spectral type.
Parameters
----------
sptype : str
Spectral type, such as 'A0V' or 'K2III'.
renorm_args : tuple
Renormalization arguments to pass to ``sp.renorm()``.
The order (after ``sptype``) should be (``value, units, bandpass``)
Bandpass should be a :mod:`pysynphot.obsbandpass` type.
Keyword Args
------------
catname : str
Catalog name, including 'bosz', 'ck04models', and 'phoenix'.
Default is 'bosz', which comes from :func:`BOSZ_spectrum`.
Teff : float
Effective temperature ranging from 3500K to 30000K.
metallicity : float
Metallicity [Fe/H] value ranging from -2.5 to 0.5.
log_g : float
Surface gravity (log g) from 0 to 5.
res : str
BOSZ spectral resolution to use (200 or 2000 or 20000).
Default: 2000.
interpolate : bool
Interpolate BOSZ spectrum using a weighted average of grid points
surrounding the desired input parameters. Default is True.
Default: True
"""
Teff = kwargs.pop('Teff', None)
metallicity = kwargs.pop('metallicity', None)
log_g = kwargs.pop('log_g', None)
catname = kwargs.get('catname')
if catname is None: catname = 'bosz'
lookuptable = {
"O0V": (50000, 0.0, 4.0), # Bracketing for interpolation
"O3V": (45000, 0.0, 4.0),
"O5V": (41000, 0.0, 4.5),
"O7V": (37000, 0.0, 4.0),
"O9V": (33000, 0.0, 4.0),
"B0V": (30000, 0.0, 4.0),
"B1V": (25000, 0.0, 4.0),
"B3V": (19000, 0.0, 4.0),
"B5V": (15000, 0.0, 4.0),
"B8V": (12000, 0.0, 4.0),
"A0V": (9500, 0.0, 4.0),
"A1V": (9250, 0.0, 4.0),
"A3V": (8250, 0.0, 4.0),
"A5V": (8250, 0.0, 4.0),
"F0V": (7250, 0.0, 4.0),
"F2V": (7000, 0.0, 4.0),
"F5V": (6500, 0.0, 4.0),
"F8V": (6250, 0.0, 4.5),
"G0V": (6000, 0.0, 4.5),
"G2V": (5750, 0.0, 4.5),
"G5V": (5650, 0.0, 4.5),
"G8V": (5500, 0.0, 4.5),
"K0V": (5250, 0.0, 4.5),
"K2V": (4750, 0.0, 4.5),
"K5V": (4250, 0.0, 4.5),
"K7V": (4000, 0.0, 4.5),
"M0V": (3750, 0.0, 4.5),
"M2V": (3500, 0.0, 4.5),
"M5V": (3500, 0.0, 5.0),
"M9V": (3000, 0.0, 5.0), # Bracketing for interpolation
"O0IV": (50000, 0.0, 3.8), # Bracketing for interpolation
"B0IV": (30000, 0.0, 3.8),
"B8IV": (12000, 0.0, 3.8),
"A0IV": (9500, 0.0, 3.8),
"A5IV": (8250, 0.0, 3.8),
"F0IV": (7250, 0.0, 3.8),
"F8IV": (6250, 0.0, 4.3),
"G0IV": (6000, 0.0, 4.3),
"G8IV": (5500, 0.0, 4.3),
"K0IV": (5250, 0.0, 4.3),
"K7IV": (4000, 0.0, 4.3),
"M0IV": (3750, 0.0, 4.3),
"M9IV": (3000, 0.0, 4.7), # Bracketing for interpolation
"O0III": (55000, 0.0, 3.5), # Bracketing for interpolation
"B0III": (29000, 0.0, 3.5),
"B5III": (15000, 0.0, 3.5),
"G0III": (5750, 0.0, 3.0),
"G5III": (5250, 0.0, 2.5),
"K0III": (4750, 0.0, 2.0),
"K5III": (4000, 0.0, 1.5),
"M0III": (3750, 0.0, 1.5),
"M6III": (3000, 0.0, 1.0), # Bracketing for interpolation
"O0I": (45000, 0.0, 5.0), # Bracketing for interpolation
"O6I": (39000, 0.0, 4.5),
"O8I": (34000, 0.0, 4.0),
"B0I": (26000, 0.0, 3.0),
"B5I": (14000, 0.0, 2.5),
"A0I": (9750, 0.0, 2.0),
"A5I": (8500, 0.0, 2.0),
"F0I": (7750, 0.0, 2.0),
"F5I": (7000, 0.0, 1.5),
"G0I": (5500, 0.0, 1.5),
"G5I": (4750, 0.0, 1.0),
"K0I": (4500, 0.0, 1.0),
"K5I": (3750, 0.0, 0.5),
"M0I": (3750, 0.0, 0.0),
"M2I": (3500, 0.0, 0.0),
"M5I": (3000, 0.0, 0.0)} # Bracketing for interpolation
def sort_sptype(typestr):
letter = typestr[0]
lettervals = {'O': 0, 'B': 1, 'A': 2, 'F': 3, 'G': 4, 'K': 5, 'M': 6}
value = lettervals[letter] * 1.0
value += (int(typestr[1]) * 0.1)
if "III" in typestr:
value += 30
elif "I" in typestr:
value += 10
elif "V" in typestr:
value += 50
return value
# Generate list of spectral types
sptype_list = list(lookuptable.keys())
# Test if the user wants a flat spectrum (in photlam)
# Check if Teff, metallicity, and log_g are specified
if (Teff is not None) and (metallicity is not None) and (log_g is not None):
v0, v1, v2 = (Teff, metallicity, log_g)
if 'bosz' in catname.lower():
sp = BOSZ_spectrum(v0, v1, v2, **kwargs)
else:
if ('ck04models' in catname.lower()) and (v0<3500):
_log.warn("ck04 models stop at 3500K. Setting Teff=3500.")
v0 = 3500
sp = S.Icat(catname, v0, v1, v2)
sp.name = '({:.0f},{:0.1f},{:0.1f})'.format(v0,v1,v2)
elif 'flat' in sptype.lower():
# waveset = S.refs._default_waveset
# sp = S.ArraySpectrum(waveset, 0*waveset + 10.)
sp = S.FlatSpectrum(10, fluxunits='photlam')
sp.name = 'Flat spectrum in photlam'
elif sptype in sptype_list:
v0,v1,v2 = lookuptable[sptype]
if 'bosz' in catname.lower():
sp = BOSZ_spectrum(v0, v1, v2, **kwargs)
else:
if ('ck04models' in catname.lower()) and (v0<3500):
_log.warn("ck04 models stop at 3500K. Setting Teff=3500.")
v0 = 3500
sp = S.Icat(catname, v0, v1, v2)
sp.name = sptype
else: # Interpolate values for undefined sptype
# Sort the list and return their rank values
sptype_list.sort(key=sort_sptype)
rank_list = np.array([sort_sptype(st) for st in sptype_list])
# Find the rank of the input spec type
rank = sort_sptype(sptype)
# Grab values from tuples and interpolate based on rank
tup_list0 = np.array([lookuptable[st][0] for st in sptype_list])
tup_list1 = np.array([lookuptable[st][1] for st in sptype_list])
tup_list2 = np.array([lookuptable[st][2] for st in sptype_list])
v0 = np.interp(rank, rank_list, tup_list0)
v1 = np.interp(rank, rank_list, tup_list1)
v2 = np.interp(rank, rank_list, tup_list2)
if 'bosz' in catname.lower():
sp = BOSZ_spectrum(v0, v1, v2, **kwargs)
else:
if ('ck04models' in catname.lower()) and (v0<3500):
_log.warn("ck04 models stop at 3500K. Setting Teff=3500.")
v0 = 3500
sp = S.Icat(catname, v0, v1, v2)
sp.name = sptype
#print(int(v0),v1,v2)
# Renormalize if those args exist
if len(renorm_args) > 0:
sp_norm = sp.renorm(*renorm_args)
sp_norm.name = sp.name
sp = sp_norm
return sp
# Class for creating an input source spectrum
class source_spectrum(object):
"""Model source spectrum
The class ingests spectral information of a given target
and generates :mod:`pysynphot.spectrum` model fit to the
known photometric SED. Two model routines can fit. The
first is a very simple scale factor that is applied to the
input spectrum, while the second takes the input spectrum
and adds an IR excess modeled as a modified blackbody function.
Parameters
----------
name : string
Source name.
sptype : string
Assumed stellar spectral type. Not relevant if Teff, metallicity,
and log_g are specified.
mag_val : float
Magnitude of input bandpass for initial scaling of spectrum.
bp : :mod:`pysynphot.obsbandpass`
Bandpass to apply initial mag_val scaling.
votable_file: string
VOTable name that holds the source's photometry. The user can
find the relevant data at http://vizier.u-strasbg.fr/vizier/sed/
and click download data.
Keyword Args
------------
Teff : float
Effective temperature ranging from 3500K to 30000K.
metallicity : float
Metallicity [Fe/H] value ranging from -2.5 to 0.5.
log_g : float
Surface gravity (log g) from 0 to 5.
catname : str
Catalog name, including 'bosz', 'ck04models', and 'phoenix'.
Default is 'bosz', which comes from :func:`BOSZ_spectrum`.
res : str
Spectral resolution to use (200 or 2000 or 20000).
interpolate : bool
Interpolate spectrum using a weighted average of grid points
surrounding the desired input parameters.
Example
-------
Generate a source spectrum and fit photometric data
>>> import pynrc
>>> from pynrc.nrc_utils import source_spectrum
>>>
>>> name = 'HR8799'
>>> vot = 'votables/{}.vot'.format(name)
>>> bp_k = pynrc.bp_2mass('k')
>>>
>>> # Read in stellar spectrum model and normalize to Ks = 5.24
>>> src = source_spectrum(name, 'F0V', 5.24, bp_k, vot,
>>> Teff=7430, metallicity=-0.47, log_g=4.35)
>>> # Fit model to photometry from 0.1 - 30 micons
>>> # Saves pysynphot spectral object at src.sp_model
>>> src.fit_SED(wlim=[0.1,30])
>>> sp_sci = src.sp_model
"""
def __init__(self, name, sptype, mag_val, bp, votable_file,
Teff=None, metallicity=None, log_g=None, Av=None, **kwargs):
self.name = name
# Setup initial spectrum
kwargs['Teff'] = Teff
kwargs['metallicity'] = metallicity
kwargs['log_g'] = log_g
self.sp0 = stellar_spectrum(sptype, mag_val, 'vegamag', bp, **kwargs)
# Read in a low res version for photometry matching
kwargs['res'] = 200
self.sp_lowres = stellar_spectrum(sptype, mag_val, 'vegamag', bp, **kwargs)
if Av is not None:
Rv = 4
self.sp0 = self.sp0 * S.Extinction(Av/Rv,name='mwrv4')
self.sp_lowres = self.sp_lowres * S.Extinction(Av/Rv,name='mwrv4')
self.sp0 = self.sp0.renorm(mag_val, 'vegamag', bp)
self.sp_lowres = self.sp_lowres.renorm(mag_val, 'vegamag', bp)
self.sp0.name = sptype
self.sp_lowres.name = sptype
# Init model to None
self.sp_model = None
# Readin photometry
self.votable_file = votable_file
self._gen_table()
self._combine_fluxes()
def _gen_table(self):
"""Read VOTable and convert to astropy table"""
# Import source SED from VOTable
from astropy.io.votable import parse_single_table
table = parse_single_table(self.votable_file)
# Convert to astropy table
tbl = table.to_table()
freq = tbl['sed_freq'] * 1e9 # Hz
wave_m = 2.99792458E+08 / freq
wave_A = 1e10 * wave_m
# Add wavelength column
col = tbl.Column(wave_A, 'sed_wave')
col.unit = 'Angstrom'
tbl.add_column(col)
# Sort flux monotomically with wavelength
tbl.sort(['sed_wave', 'sed_flux'])
self.table = tbl
def _combine_fluxes(self):
"""Average duplicate data points
Creates average of duplicate point stored in self.sp_phot.
"""
table = self.table
wave = table['sed_wave']
flux = table["sed_flux"]
eflux = table["sed_eflux"]
# Average duplicate data points
uwave, ucnt = np.unique(wave, return_counts=True)
uflux = []
uflux_e = []
for i, w in enumerate(uwave):
ind = (wave==w)
flx = np.median(flux[ind]) if ucnt[i]>1 else flux[ind][0]
uflux.append(flx)
eflx = robust.medabsdev(flux[ind]) if ucnt[i]>1 else eflux[ind][0]
uflux_e.append(eflx)
uflux = np.array(uflux)
uflux_e = np.array(uflux_e)
# Photometric data points
sp_phot = S.ArraySpectrum(uwave, uflux,
waveunits=wave.unit.name,
fluxunits=flux.unit.name)
sp_phot.convert('Angstrom')
sp_phot.convert('Flam')
sp_phot_e = S.ArraySpectrum(uwave, uflux_e,
waveunits=wave.unit.name,
fluxunits=eflux.unit.name)
sp_phot_e.convert('Angstrom')
sp_phot_e.convert('Flam')
self.sp_phot = sp_phot
self.sp_phot_e = sp_phot_e
def bb_jy(self, wave, T):
"""Blackbody function (Jy)
For a given wavelength set (in um) and a Temperature (K),
return the blackbody curve in units of Jy.
Parameters
----------
wave : array_like
Wavelength array in microns
T : float
Temperature of blackbody (K)
"""
# Physical Constants
#H = 6.62620000E-27 # Planck's constant in cgs units
HS = 6.62620000E-34 # Planck's constant in standard units
C = 2.99792458E+08 # speed of light in standard units
K = 1.38064852E-23 # Boltzmann constant in standard units
# Blackbody coefficients (SI units)
C1 = 2.0 * HS * C # Power * unit area / steradian
C2 = HS * C / K
w_m = wave * 1e-6
exponent = C2 / (w_m * T)
expfactor = np.exp(exponent)
return 1.0E+26 * C1 * (w_m**-3.0) / (expfactor - 1.0)
def model_scale(self, x, sp=None):
"""Simple model to scale stellar spectrum"""
sp = self.sp_lowres if sp is None else sp
return x[0] * sp
def model_IRexcess(self, x, sp=None):
"""Model for stellar spectrum with IR excesss
Model of a stellar spectrum plus IR excess, where the
excess is a modified blackbody. The final model follows
the form:
.. math::
x_0 BB(\lambda, x_1) \lambda^{x_2}
"""
sp = self.sp_lowres if sp is None else sp
bb_flux = x[0] * self.bb_jy(sp.wave/1e4, x[1]) * (sp.wave/1e4)**x[2] / 1e17
sp_bb = S.ArraySpectrum(sp.wave, bb_flux, fluxunits='Jy')
sp_bb.convert('Flam')
return sp + sp_bb
def func_resid(self, x, IR_excess=False, wlim=[0.1, 30], use_err=True):
"""Calculate model residuals
Parameters
----------
x : array_like
Model parameters for either `model_scale` or `model_IRexcess`.
See these two functions for more details.
IR_excess: bool
Include IR excess in model fit? This is a simple modified blackbody.
wlim : array_like
Min and max limits for wavelengths to consider (microns).
use_err : bool
Should we use the uncertainties in the SED photometry for weighting?
"""
# Star model and photometric data
sp_star = self.sp_lowres
sp_phot = self.sp_phot
sp_phot_e = self.sp_phot_e
# Which model are we using?
func_model = self.model_IRexcess if IR_excess else self.model_scale
sp_model = func_model(x, sp_star)
wvals = sp_phot.wave
wmin, wmax = np.array(wlim)*1e4
ind = (wvals >= wmin) & (wvals <= wmax)
wvals = wvals[ind]
yvals = sp_phot.flux[ind]
evals = sp_phot_e.flux[ind]
# Instead of interpolating on a high-resolution grid,
# we should really rebin onto a more coarse grid.
mod_interp = np.interp(wvals, sp_star.wave, sp_model.flux)
# Normalize values so the residuals aren't super small/large
norm = np.mean(yvals)
resid = (mod_interp - yvals)
if use_err: resid /= evals
# Return non-NaN normalized values
return resid[~np.isnan(resid)] / norm
def fit_SED(self, x0=None, robust=True, use_err=True, IR_excess=False,
wlim=[0.3,10], verbose=True):
"""Fit a model function to photometry
Use :func:`scipy.optimize.least_squares` to find the best fit
model to the observed photometric data. If no parameters passed,
then defaults are set.
Keyword Args
------------
x0 : array_like
Initial guess of independent variables.
robust : bool
Perform an outlier-resistant fit.
use_err : bool
Should we use the uncertainties in the SED photometry for weighting?
IR_excess: bool
Include IR excess in model fit? This is a simple modified blackbody.
wlim : array_like
Min and max limits for wavelengths to consider (microns).
verbose : bool
Print out best-fit model parameters. Defalt is True.
"""
from scipy.optimize import least_squares
# Default initial starting parameters
if x0 is None:
x0 = [1.0, 2000.0, 0.5] if IR_excess else [1.0]
# Robust fit?
loss = 'soft_l1' if robust else 'linear'
# Perform least-squares fit
kwargs={'IR_excess':IR_excess, 'wlim':wlim, 'use_err':use_err}
res = least_squares(self.func_resid, x0, bounds=(0,np.inf), loss=loss,
kwargs=kwargs)
out = res.x
if verbose: print(out)
# Which model are we using?
func_model = self.model_IRexcess if IR_excess else self.model_scale
# Create final model spectrum
sp_model = func_model(out, self.sp0)
sp_model.name = self.name
self.sp_model = sp_model
def plot_SED(self, ax=None, return_figax=False, xr=[0.3,30], yr=None,
units='Jy', **kwargs):
sp0 = self.sp0
sp_phot = self.sp_phot
sp_phot_e = self.sp_phot_e
sp_model = self.sp_model
# Convert to Jy and save original units
sp0_units = sp0.fluxunits.name
sp_phot_units = sp_phot.fluxunits.name
# nuFnu or lamFlam?
if (units=='nufnu') or (units=='lamflam'):
units = 'flam'
lfl = True
else:
lfl = False
sp0.convert(units)
sp_phot.convert(units)
if ax is None:
fig, ax = plt.subplots(1,1, figsize=(8,5))
w = sp0.wave / 1e4
f = sp0.flux
if lfl:
f = f * sp0.wave
if xr is not None:
ind = (w>=xr[0]) & (w<=xr[1])
w, f = (w[ind], f[ind])
ax.loglog(w, f, lw=1, label='Photosphere', **kwargs)
w = sp_phot.wave / 1e4
f = sp_phot.flux
f_err = sp_phot_e.flux
if lfl:
f = f * sp_phot.wave
f_err = f_err * sp_phot.wave
if xr is not None:
ind = (w>=xr[0]) & (w<=xr[1])
w, f, f_err = (w[ind], f[ind], f_err[ind])
ax.errorbar(w, f, yerr=f_err, marker='.', ls='none', label='Photometry')
if sp_model is not None:
sp_model_units = sp_model.fluxunits.name
sp_model.convert(units)
w = sp_model.wave / 1e4
f = sp_model.flux
if lfl:
f = f * sp_model.wave
if xr is not None:
ind = (w>=xr[0]) & (w<=xr[1])
w, f = (w[ind], f[ind])
ax.plot(w, f, lw=1, label='Model Fit')
sp_model.convert(sp_model_units)
# Labels for various units
ulabels = {'photlam': u'photons s$^{-1}$ cm$^{-2}$ A$^{-1}$',
'photnu' : u'photons s$^{-1}$ cm$^{-2}$ Hz$^{-1}$',
'flam' : u'erg s$^{-1}$ cm$^{-2}$ A$^{-1}$',
'fnu' : u'erg s$^{-1}$ cm$^{-2}$ Hz$^{-1}$',
'counts' : u'photons s$^{-1}$',
}
if lfl: # Special case nuFnu or lamFlam
yunits = u'erg s$^{-1}$ cm$^{-2}$'
else:
yunits = ulabels.get(units, units)
ax.set_xlabel('Wavelength (microns)')
ax.set_ylabel('Flux ({})'.format(yunits))
ax.set_title(self.name)
if xr is not None:
ax.set_xlim(xr)
if yr is not None:
ax.set_ylim(yr)
# Better formatting of ticks marks
from matplotlib.ticker import LogLocator, AutoLocator, NullLocator
from matplotlib.ticker import FuncFormatter, NullFormatter
formatter = FuncFormatter(lambda y, _: '{:.16g}'.format(y))
xr = ax.get_xlim()
if xr[1] < 10*xr[0]:
ax.xaxis.set_major_locator(AutoLocator())
ax.xaxis.set_minor_locator(NullLocator())
else:
ax.xaxis.set_major_locator(LogLocator())
ax.xaxis.set_major_formatter(formatter)
yr = ax.get_ylim()
if yr[1] < 10*yr[0]:
ax.yaxis.set_major_locator(AutoLocator())
ax.yaxis.set_minor_formatter(NullFormatter())
ax.yaxis.get_major_locator().set_params(nbins=10, steps=[1,10])
else:
ax.yaxis.set_major_locator(LogLocator())
ax.yaxis.set_major_formatter(formatter)
ax.legend()
# Convert back to original units
sp0.convert(sp0_units)
sp_phot.convert(sp_phot_units)
if ax is None:
fig.tight_layout()
if return_figax: return (fig,ax)
# Class for reading in planet spectra
class planets_sb12(object):
"""Exoplanet spectrum from Spiegel & Burrows (2012)
This contains 1680 files, one for each of 4 atmosphere types, each of
15 masses, and each of 28 ages. Wavelength range of 0.8 - 15.0 um at
moderate resolution (R ~ 204).
The flux in the source files are at 10 pc. If the distance is specified,
then the flux will be scaled accordingly. This is also true if the distance
is changed by the user. All other properties (atmo, mass, age, entropy) are
not adjustable once loaded.
Parameters
----------
atmo: str
A string consisting of one of four atmosphere types:
- 'hy1s' = hybrid clouds, solar abundances
- 'hy3s' = hybrid clouds, 3x solar abundances
- 'cf1s' = cloud-free, solar abundances
- 'cf3s' = cloud-free, 3x solar abundances
mass: float
A number 1 to 15 Jupiter masses.
age: float
Age in millions of years (1-1000)
entropy: float
Initial entropy (8.0-13.0) in increments of 0.25
distance: float
Assumed distance in pc (default is 10pc)
accr : bool
Include accretion (default: False)?
mmdot : float
From Zhu et al. (2015), the Mjup^2/yr value.
If set to None then calculated from age and mass.
mdot : float
Or use mdot (Mjup/yr) instead of mmdot.
accr_rin : float
Inner radius of accretion disk (units of RJup; default: 2)
truncated: bool
Full disk or truncated (ie., MRI; default: False)?
base_dir: str, None
Location of atmospheric model sub-directories.
"""
# Define default self.base_dir
_base_dir = conf.PYNRC_PATH + 'spiegel/'
def __init__(self, atmo='hy1s', mass=1, age=100, entropy=10.0, distance=10,
accr=False, mmdot=None, mdot=None, accr_rin=2.0, truncated=False,
base_dir=None, **kwargs):
self._atmo = atmo
self._mass = mass
self._age = age
self._entropy = entropy
if base_dir is not None:
self._base_dir = base_dir
self.sub_dir = self._base_dir + 'SB.' + self.atmo + '/'
self._get_file()
self._read_file()
self.distance = distance
self.accr = accr
if not accr:
self.mmdot = 0
elif mmdot is not None:
self.mmdot = mmdot
elif mdot is not None:
self.mmdot = self.mass * mdot # MJup^2/yr
else:
mdot = self.mass / (1e6 * self.age) # Assumed MJup/yr
self.mmdot = self.mass * mdot # MJup^2/yr
self.rin = accr_rin
self.truncated = truncated
def _get_file(self):
"""Find the file closest to the input parameters"""
files = []; masses = []; ages = []
for file in os.listdir(self.sub_dir):
files.append(file)
fsplit = re.split('[_\.]',file)
ind_mass = fsplit.index('mass') + 1
ind_age = fsplit.index('age') + 1
masses.append(int(fsplit[ind_mass]))
ages.append(int(fsplit[ind_age]))
files = np.array(files)
ages = np.array(ages)
masses = np.array(masses)
# Find those indices closest in mass
mdiff = np.abs(masses - self.mass)
ind_mass = mdiff == np.min(mdiff)
# Of those masses, find the closest age
adiff = np.abs(ages - self.age)
ind_age = adiff[ind_mass] == np.min(adiff[ind_mass])
# Get the final file name
self.file = ((files[ind_mass])[ind_age])[0]
def _read_file(self):
"""Read in the file data"""
# Read in the file's content row-by-row (saved as a string)
with open(self.sub_dir + self.file) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
# Parse the strings into an array
# Row #, Value
# 1 col 1: age (Myr);
# cols 2-601: wavelength (in microns, in range 0.8-15.0)
# 2-end col 1: initial S;
# cols 2-601: F_nu (in mJy for a source at 10 pc)
ncol = len(content[0].split())
nrow = len(content)
arr = np.zeros([nrow,ncol])
for i,row in enumerate(content):
arr[i,:] = np.array(content[i].split(), dtype='float64')
# Find the closest entropy and save
entropy = arr[1:,0]
diff = np.abs(self.entropy - entropy)
ind = diff == np.min(diff)
self._flux = arr[1:,1:][ind,:].flatten()
self._fluxunits = 'mJy'
# Save the wavelength information
self._wave = arr[0,1:]
self._waveunits = 'um'
# Distance (10 pc)
self._distance = 10
@property
def mdot(self):
"""Accretion rate in MJup/yr"""
return self.mmdot / self.mass
@property
def wave(self):
"""Wavelength of spectrum"""
return self._wave
@property
def waveunits(self):
"""Wavelength units"""
return self._waveunits
@property
def flux(self):
"""Spectral flux"""
return self._flux
@property
def fluxunits(self):
"""Flux units"""
return self._fluxunits
@property
def distance(self):
"""Assumed distance to source (pc)"""
return self._distance
@distance.setter
def distance(self, value):
self._flux *= (self._distance/value)**2
self._distance = value
@property
def atmo(self):
"""Atmosphere type
"""
return self._atmo
@property
def mass(self):
"""Mass of planet (MJup)"""
return self._mass
@property
def age(self):
"""Age in millions of years"""
return self._age
@property
def entropy(self):
"""Initial entropy (8.0-13.0)"""
return self._entropy
def export_pysynphot(self, waveout='angstrom', fluxout='flam'):
"""Output to :mod:`pysynphot.spectrum` object
Export object settings to a :mod:`pysynphot.spectrum`.
Parameters
----------
waveout : str
Wavelength units for output
fluxout : str
Flux units for output
"""
w = self.wave; f = self.flux
name = (re.split('[\.]', self.file))[0]#[5:]
sp = S.ArraySpectrum(w, f, name=name, waveunits=self.waveunits, fluxunits=self.fluxunits)
sp.convert(waveout)
sp.convert(fluxout)
if self.accr and (self.mmdot>0):
sp_mdot = sp_accr(self.mmdot, rin=self.rin,
dist=self.distance, truncated=self.truncated,
waveout=waveout, fluxout=fluxout)
# Interpolate accretion spectrum at each wavelength
# and create new composite spectrum
fnew = np.interp(sp.wave, sp_mdot.wave, sp_mdot.flux)
sp_new = S.ArraySpectrum(sp.wave, sp.flux+fnew,
waveunits=waveout, fluxunits=fluxout)
return sp_new
else:
return sp
#class planets_sb11(planets_sb12):
# """Deprecated class. Use :class:`planets_sb12` instead."""
# # Turns out the paper is Spiegel & Burrows (2012), not 2011
# def __init__(self, *args, **kwargs):
#
# _log.warning('planets_sb11 is depcrecated. Use planets_sb12 instead.')
# planets_sb12.__init__(self, *args, **kwargs)
def sp_accr(mmdot, rin=2, dist=10, truncated=False,
waveout='angstrom', fluxout='flam', base_dir=None):
"""Exoplanet accretion flux values (Zhu et al., 2015).
Calculated the wavelength-dependent flux of an exoplanet accretion disk/shock
from Zhu et al. (2015). A
Note
----
This function only uses the table of photometric values to calculate
photometric brightness from a source, so not very useful for simulating
spectral observations.
Parameters
----------
mmdot : float
Product of the exoplanet mass and mass accretion rate (MJup^2/yr).
Values range from 1e-7 to 1e-2.
rin : float
Inner radius of accretion disk (units of RJup; default: 2).
dist : float
Distance to object (pc).
truncated: bool
If True, then the values are for a disk with Rout=50 RJup,
otherwise, values were calculated for a full disk (Rout=1000 RJup).
Accretion from a "tuncated disk" is due mainly to MRI.
Luminosities for full and truncated disks are very similar.
waveout : str
Wavelength units for output
fluxout : str
Flux units for output
base_dir: str, None
Location of accretion model sub-directories.
"""
base_dir = conf.PYNRC_PATH + 'spiegel/' if base_dir is None else base_dir
fname = base_dir + 'zhu15_accr.txt'
names = ('MMdot', 'Rin', 'Tmax', 'J', 'H', 'K', 'L', 'M', 'N', 'J2', 'H2', 'K2', 'L2', 'M2', 'N2')
tbl = ascii.read(fname, guess=True, names=names)
# Inner radius values and Mdot values
rin_vals = np.unique(tbl['Rin'])
mdot_vals = np.unique(tbl['MMdot'])
nmdot = len(mdot_vals)
assert (rin >=rin_vals.min()) & (rin <=rin_vals.max()), "rin is out of range"
assert (mmdot>=mdot_vals.min()) & (mmdot<=mdot_vals.max()), "mmdot is out of range"
if truncated:
mag_names = ('J2', 'H2', 'K2', 'L2', 'M2', 'N2')
else:
mag_names = ('J', 'H', 'K', 'L', 'M', 'N')
wcen = np.array([ 1.2, 1.6, 2.2, 3.8, 4.8, 10.0])
zpt = np.array([1600, 1020, 657, 252, 163, 39.8])
mag_arr = np.zeros([6,nmdot])
for i, mv in enumerate(mdot_vals):
for j, mag in enumerate(mag_names):
tbl_sub = tbl[tbl['MMdot']==mv]
rinvals = tbl_sub['Rin']
magvals = tbl_sub[mag]
mag_arr[j,i] = np.interp(rin, rinvals, magvals)
mag_vals = np.zeros(6)
for j in range(6):
xi = 10**(mmdot)
xp = 10**(mdot_vals)
yp = 10**(mag_arr[j])
mag_vals[j] = np.log10(np.interp(xi, xp, yp))
mag_vals += 5*np.log10(dist/10)
flux_Jy = 10**(-mag_vals/2.5) * zpt
sp = S.ArraySpectrum(wcen*1e4, flux_Jy, fluxunits='Jy')
sp.convert(waveout)
sp.convert(fluxout)
return sp
def jupiter_spec(dist=10, waveout='angstrom', fluxout='flam', base_dir=None):
"""Jupiter as an Exoplanet
Read in theoretical Jupiter spectrum from Irwin et al. 2014 and output
as a :mod:`pysynphot.spectrum`.
Parameters
===========
dist : float
Distance to Jupiter (pc).
waveout : str
Wavelength units for output.
fluxout : str
Flux units for output.
base_dir: str, None
Location of tabulated file irwin_2014_ref_spectra.txt.
"""
base_dir = conf.PYNRC_PATH + 'solar_system/' if base_dir is None else base_dir
fname = base_dir + 'irwin_2014_ref_spectra.txt'
# Column 1: Wavelength (in microns)
# Column 2: 100*Ap/Astar (Earth-Sun Primary Transit)
# Column 3: 100*Ap/Astar (Earth-Mdwarf Primary Transit)
# Column 4: 100*Ap/Astar (Jupiter-Sun Primary Transit)
# Column 5: Fp/Astar (Earth-Sun Secondary Eclipse)
# Column 6: Disc-averaged radiance of Earth (W cm-2 sr-1 micron-1)
# Column 7: Fp/Astar (Jupiter-Sun Secondary Eclipse)
# Column 8: Disc-averaged radiance of Jupiter (W cm-2 sr-1 micron-1)
# Column 9: Solar spectral irradiance spectrum (W micron-1)
# (Solar Radius = 695500.0 km)
# Column 10: Mdwarf spectral irradiance spectrum (W micron-1)
# (Mdwarf Radius = 97995.0 km)
data = ascii.read(fname, data_start=14)
wspec = data['col1'] * 1e4 # Angstrom
fspec = data['col8'] * 1e3 # erg s-1 cm^-2 A^-1 sr^-1
# Steradians to square arcsec
sr_to_asec2 = (3600*180/np.pi)**2
fspec /= sr_to_asec2 # *** / arcsec^2
# Angular size of Jupiter at some distance
RJup_km = 71492.0
au_to_km = 149597870.7
# Angular size (arcsec) of Jupiter radius
RJup_asec = RJup_km / au_to_km / dist
area = np.pi * RJup_asec**2
# flux in f_lambda
fspec *= area # erg s-1 cm^-2 A^-1
sp = S.ArraySpectrum(wspec, fspec, fluxunits='flam')
sp.convert(waveout)
sp.convert(fluxout)
return sp
def linder_table(file=None, **kwargs):
"""Load Linder Model Table
Function to read in isochrone models from Linder et al. 2019.
Returns an astropy Table.
Parameters
----------
age : float
Age in Myr. If set to None, then an array of ages from the file
is used to generate dictionary. If set, chooses the closest age
supplied in table.
file : string
Location and name of Linder et al file.
Default is 'BEX_evol_mags_-3_MH_0.00.dat'
"""
# Default file to read and load
if file is None:
indir = os.path.join(conf.PYNRC_PATH, 'linder/isochrones/')
file = indir + 'BEX_evol_mags_-3_MH_0.00.dat'
with open(file) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
cnames = content[2].split(',')
cnames = [name.split(':')[1] for name in cnames]
ncol = len(cnames)
content_arr = []
for line in content[4:]:
arr = np.array(line.split()).astype(np.float)
if len(arr)>0:
content_arr.append(arr)
content_arr = np.array(content_arr)
# Convert to Astropy Table
tbl = Table(rows=content_arr, names=cnames)
return tbl
def linder_filter(table, filt, age, dist=10, cond_interp=True, cond_file=None, **kwargs):
"""Linder Mags vs Mass Arrays
Given a Linder table, NIRCam filter, and age, return arrays of MJup
and Vega mags. If distance (pc) is provided, then return the apparent
magnitude, otherwise absolute magnitude at 10pc.
This function takes the isochrones tables from Linder et al 2019 and
creates a irregular contour grid of filter magnitude and log(age)
where the z-axis is log(mass). This is mapped onto a regular grid
that is interpolated within the data boundaries and linearly
extrapolated outside of the region of available data.
Parameters
==========
table : astropy table
Astropy table output from `linder_table`.
filt : string
Name of NIRCam filter.
age : float
Age of planet mass.
dist : float
Distance in pc. Default is 10pc (abs mag).
"""
def _trim_nan_image(xgrid, ygrid, zgrid):
"""NaN Trimming of Image
Remove rows/cols with NaN's while trying to preserve
the maximum footprint of real data.
"""
xgrid2, ygrid2, zgrid2 = xgrid, ygrid, zgrid
# Create a mask of NaN'ed values
nan_mask = np.isnan(zgrid2)
nrows, ncols = nan_mask.shape
# Determine number of NaN's along each row and col
num_nans_cols = nan_mask.sum(axis=0)
num_nans_rows = nan_mask.sum(axis=1)
# First, crop all rows/cols that are only NaN's
xind_good = np.where(num_nans_cols < nrows)[0]
yind_good = np.where(num_nans_rows < ncols)[0]
# get border limits
x1, x2 = (xind_good.min(), xind_good.max()+1)
y1, y2 = (yind_good.min(), yind_good.max()+1)
# Trim of NaN borders
xgrid2 = xgrid2[x1:x2]
ygrid2 = ygrid2[y1:y2]
zgrid2 = zgrid2[y1:y2,x1:x2]
# Find a optimal rectangule subsection free of NaN's
# Iterative cropping
ndiff = 5
while np.isnan(zgrid2.sum()):
# Make sure ndiff is not negative
if ndiff<0:
break
npix = zgrid2.size
# Create a mask of NaN'ed values
nan_mask = np.isnan(zgrid2)
nrows, ncols = nan_mask.shape
# Determine number of NaN's along each row and col
num_nans_cols = nan_mask.sum(axis=0)
num_nans_rows = nan_mask.sum(axis=1)
# Look for any appreciable diff row-to-row/col-to-col
col_diff = num_nans_cols - np.roll(num_nans_cols,-1)
row_diff = num_nans_rows - np.roll(num_nans_rows,-1)
# For edge wrapping, just use last minus previous
col_diff[-1] = col_diff[-2]
row_diff[-1] = row_diff[-2]
# Keep rows/cols composed mostly of real data
# and where number of NaN's don't change dramatically
xind_good = np.where( ( np.abs(col_diff) <= ndiff ) &
( num_nans_cols < 0.5*nrows ) )[0]
yind_good = np.where( ( np.abs(row_diff) <= ndiff ) &
( num_nans_rows < 0.5*ncols ) )[0]
# get border limits
x1, x2 = (xind_good.min(), xind_good.max()+1)
y1, y2 = (yind_good.min(), yind_good.max()+1)
# Trim of NaN borders
xgrid2 = xgrid2[x1:x2]
ygrid2 = ygrid2[y1:y2]
zgrid2 = zgrid2[y1:y2,x1:x2]
# Check for convergence
# If we've converged, reduce
if npix==zgrid2.size:
ndiff -= 1
# Last ditch effort in case there are still NaNs
# If so, remove rows/cols 1 by 1 until no NaNs
while np.isnan(zgrid2.sum()):
xgrid2 = xgrid2[1:-1]
ygrid2 = ygrid2[1:-1]
zgrid2 = zgrid2[1:-1,1:-1]
return xgrid2, ygrid2, zgrid2
try:
x = table[filt]
except KeyError:
# In case specific filter doesn't exist, interpolate
x = []
cnames = ['SPHEREY','NACOJ', 'NACOH', 'NACOKs', 'NACOLp', 'NACOMp',
'F115W', 'F150W', 'F200W', 'F277W', 'F356W', 'F444W', 'F560W']
wvals = np.array([1.04, 1.27, 1.66, 2.20, 3.80, 4.80,
1.15, 1.50, 2.00, 2.76, 3.57, 4.41, 5.60])
# Sort by wavelength
isort = np.argsort(wvals)
cnames = list(np.array(cnames)[isort])
wvals = wvals[isort]
# Turn table data into array and interpolate at filter wavelength
tbl_arr = np.array([table[cn].data for cn in cnames]).transpose()
bp = read_filter(filt)
wint = bp.avgwave() / 1e4
x = np.array([np.interp(wint, wvals, row) for row in tbl_arr])
y = table['log(Age/yr)'].data
z = table['Mass/Mearth'].data
zlog = np.log10(z)
#######################################################
# Grab COND model data to fill in higher masses
base_dir = conf.PYNRC_PATH + 'cond_models/'
if cond_file is None:
cond_file = base_dir + 'model.AMES-Cond-2000.M-0.0.JWST.Vega'
npsave_file = cond_file + '.{}.npy'.format(filt)
try:
mag2, age2, mass2_mjup = np.load(npsave_file)
except:
d_tbl2 = cond_table(file=cond_file) # Dictionary of ages
mass2_mjup = []
mag2 = []
age2 = []
for k in d_tbl2.keys():
tbl2 = d_tbl2[k]
mass2_mjup = mass2_mjup + list(tbl2['MJup'].data)
mag2 = mag2 + list(tbl2[filt+'a'].data)
age2 = age2 + list(np.ones(len(tbl2))*k)
mass2_mjup = np.array(mass2_mjup)
mag2 = np.array(mag2)
age2 = np.array(age2)
mag_age_mass = np.array([mag2,age2,mass2_mjup])
np.save(npsave_file, mag_age_mass)
# Irregular grid
x2 = mag2
y2 = np.log10(age2 * 1e6)
z2 = mass2_mjup * 318 # Convert to Earth masses
zlog2 = np.log10(z2)
#######################################################
xlim = np.array([x2.min(),x.max()+5])
ylim = np.array([6,10]) # 10^6 to 10^10 yrs
dx = (xlim[1] - xlim[0]) / 200
dy = (ylim[1] - ylim[0]) / 200
xgrid = np.arange(xlim[0], xlim[1]+dx, dx)
ygrid = np.arange(ylim[0], ylim[1]+dy, dy)
X, Y = np.meshgrid(xgrid, ygrid)
zgrid = griddata((x,y), zlog, (X, Y), method='cubic')
zgrid_cond = griddata((x2,y2), zlog2, (X, Y), method='cubic')
# There will be NaN's along the border that need to be replaced
ind_nan = np.isnan(zgrid)
# First replace with COND grid
zgrid[ind_nan] = zgrid_cond[ind_nan]
ind_nan = np.isnan(zgrid)
# Remove rows/cols with NaN's
xgrid2, ygrid2, zgrid2 = _trim_nan_image(xgrid, ygrid, zgrid)
# Create regular grid interpolator function for extrapolation at NaN's
func = RegularGridInterpolator((ygrid2,xgrid2), zgrid2, method='linear',
bounds_error=False, fill_value=None)
# Fix NaN's in zgrid and rebuild func
pts = np.array([Y[ind_nan], X[ind_nan]]).transpose()
zgrid[ind_nan] = func(pts)
func = RegularGridInterpolator((ygrid,xgrid), zgrid, method='linear',
bounds_error=False, fill_value=None)
# Get mass limits for series of magnitudes at a given age
age_log = np.log10(age*1e6)
mag_abs_arr = xgrid
pts = np.array([(age_log,xval) for xval in mag_abs_arr])
mass_arr = 10**func(pts) / 318.0 # Convert to MJup
# TODO: Rewrite this function to better extrapolate to lower and higher masses
# For now, fit low order polynomial
isort = np.argsort(mag_abs_arr)
mag_abs_arr = mag_abs_arr[isort]
mass_arr = mass_arr[isort]
ind_fit = mag_abs_arr<x.max()
lxmap = [mag_abs_arr.min(), mag_abs_arr.max()]
xfit = np.append(mag_abs_arr[ind_fit], mag_abs_arr[-1])
yfit = np.log10(np.append(mass_arr[ind_fit], mass_arr[-1]))
cf = jl_poly_fit(xfit, yfit, deg=4, use_legendre=False, lxmap=lxmap)
mass_arr = 10**jl_poly(mag_abs_arr,cf)
mag_app_arr = mag_abs_arr + 5*np.log10(dist/10.0)
# Sort by mass
isort = np.argsort(mass_arr)
mass_arr = mass_arr[isort]
mag_app_arr = mag_app_arr[isort]
return mass_arr, mag_app_arr
def cond_table(age=None, file=None, **kwargs):
"""Load COND Model Table
Function to read in the COND model tables, which have been formatted
in a very specific way. Has the option to return a dictionary of
astropy Tables, where each dictionary element corresponds to
the specific ages within the COND table. Or, if the age keyword is
specified, then this function only returns a single astropy table.
Parameters
----------
age : float
Age in Myr. If set to None, then an array of ages from the file
is used to generate dictionary. If set, chooses the closest age
supplied in table.
file : string
Location and name of COND file. See isochrones stored at
https://phoenix.ens-lyon.fr/Grids/.
Default is model.AMES-Cond-2000.M-0.0.JWST.Vega
"""
def make_table(*args):
i1, i2 = (ind1[i]+4, ind2[i])
rows = []
for line in content[i1:i2]:
if (line=='') or ('---' in line):
continue
else:
vals = np.array(line.split(), dtype='float64')
rows.append(tuple(vals))
tbl = Table(rows=rows, names=cnames)
# Convert to Jupiter masses
newcol = tbl['M/Ms'] * 1047.348644
newcol.name = 'MJup'
tbl.add_column(newcol, index=1)
tbl['MJup'].format = '.2f'
return tbl
# Default file to read and load
if file is None:
base_dir = conf.PYNRC_PATH + 'cond_models/'
file = base_dir + 'model.AMES-Cond-2000.M-0.0.JWST.Vega'
with open(file) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
# Column names
cnames = content[5].split()
cnames = ['M/Ms', 'Teff'] + cnames[1:]
ncol = len(cnames)
# Create a series of tables for each time
times_gyr = []
ind1 = []
for i, line in enumerate(content):
if 't (Gyr)' in line:
times_gyr.append(line.split()[-1])
ind1.append(i)
ntimes = len(times_gyr)
# Create start and stop indices for each age value
ind2 = ind1[1:] + [len(content)]
ind1 = np.array(ind1)
ind2 = np.array(ind2)-1
# Everything is Gyr, but prefer Myr
ages_str = np.array(times_gyr)
ages_gyr = np.array(times_gyr, dtype='float64')
ages_myr = np.array(ages_gyr * 1000, dtype='int')
#times = ['{:.0f}'.format(a) for a in ages_myr]
# Return all tables if no age specified
if age is None:
tables = {}
for i in range(ntimes):
tbl = make_table(i, ind1, ind2, content)
tables[ages_myr[i]] = tbl
return tables
else:
# This is faster if we only want one table
ages_diff = np.abs(ages_myr - age)
i = np.where(ages_diff==ages_diff.min())[0][0]
tbl = make_table(i, ind1, ind2, content)
return tbl
def cond_filter(table, filt, module='A', dist=None, **kwargs):
"""
Given a COND table and NIRCam filter, return arrays of MJup and Vega mags.
If distance (pc) is provided, then return the apparent magnitude,
otherwise absolute magnitude at 10pc.
"""
mcol = 'MJup'
fcol = filt + module.lower()
# Table Data
mass_data = table[mcol].data
mag_data = table[fcol].data
# Data to interpolate onto
mass_arr = list(np.arange(0.1,1,0.1)) + list(np.arange(1,10)) \
+ list(np.arange(10,200,10)) + list(np.arange(200,1400,100))
mass_arr = np.array(mass_arr)
# Interpolate
mag_arr = np.interp(mass_arr, mass_data, mag_data)
# Extrapolate
cf = jl_poly_fit(np.log(mass_data), mag_data)
ind_out = (mass_arr < mass_data.min()) | (mass_arr > mass_data.max())
mag_arr[ind_out] = jl_poly(np.log(mass_arr), cf)[ind_out]
# Distance modulus for apparent magnitude
if dist is not None:
mag_arr = mag_arr + 5*np.log10(dist/10)
return mass_arr, mag_arr
###########################################################################
#
# Coronagraphic Disk Imaging Routines
#
###########################################################################
def nproc_use_convolve(fov_pix, oversample, npsf=None):
"""
Attempt to estimate a reasonable number of processes to use for multiple
simultaneous convolve_fft calculations.
Here we attempt to estimate how many such calculations can happen in
parallel without swapping to disk, with a mixture of empiricism and conservatism.
One really does not want to end up swapping to disk with huge arrays.
NOTE: Requires psutil package. Otherwise defaults to mp.cpu_count() / 2
Parameters
-----------
fov_pix : int
Square size in detector-sampled pixels of final PSF image.
oversample : int
The optical system that we will be calculating for.
npsf : int
Number of PSFs. Sets maximum # of processes.
"""
try:
import psutil
except ImportError:
nproc = int(mp.cpu_count() // 2)
if nproc < 1: nproc = 1
_log.info("No psutil package available, cannot estimate optimal nprocesses.")
_log.info("Returning nproc=ncpu/2={}.".format(nproc))
return nproc
mem = psutil.virtual_memory()
avail_GB = mem.available / (1024**3) - 1.0 # Leave 1 GB
fov_pix_over = fov_pix * oversample
# Memory formulas are based on fits to memory usage stats for:
# fov_arr = np.array([16,32,128,160,256,320,512,640,1024,2048])
# os_arr = np.array([1,2,4,8])
# In MBytes
mem_total = 300*(fov_pix_over)**2 * 8 / (1024**2)
# Convert to GB
mem_total /= 1024
# How many processors to split into?
nproc = avail_GB // mem_total
nproc = np.min([nproc, mp.cpu_count(), poppy.conf.n_processes])
if npsf is not None:
nproc = np.min([nproc, npsf])
# Resource optimization:
# Split iterations evenly over processors to free up minimally used processors.
# For example, if there are 5 processes only doing 1 iteration, but a single
# processor doing 2 iterations, those 5 processors (and their memory) will not
# get freed until the final processor is finished. So, to minimize the number
# of idle resources, take the total iterations and divide by two (round up),
# and that should be the final number of processors to use.
np_max = np.ceil(npsf / nproc)
nproc = int(np.ceil(npsf / np_max))
if nproc < 1: nproc = 1
return int(nproc)
###########################################################################
#
# Coronagraphic Mask Transmission
#
###########################################################################
def offset_bar(filt, mask):
"""Bar mask offset locations
Get the appropriate offset in the x-position to place a source on a bar mask.
Each bar is 20" long with edges and centers corresponding to::
SWB: [1.03, 2.10, 3.10] (um) => [-10, 0, +10] (asec)
LWB: [2.30, 4.60, 6.90] (um) => [+10, 0, -10] (asec)
"""
if (mask is not None) and ('WB' in mask):
# What is the effective wavelength of the filter?
#bp = pynrc.read_filter(filter)
#w0 = bp.avgwave() / 1e4
w0 = np.float(filt[1:-1])/100
# Choose wavelength from dictionary
wdict = {'F182M': 1.84, 'F187N': 1.88, 'F210M': 2.09, 'F212N': 2.12,
'F250M': 2.50, 'F300M': 2.99, 'F335M': 3.35, 'F360M': 3.62,
'F410M': 4.09, 'F430M': 4.28, 'F460M': 4.63, 'F480M': 4.79,
'F200W': 2.23, 'F277W': 3.14, 'F356W': 3.97, 'F444W': 4.99}
w = wdict.get(filt, w0)
# Get appropriate x-offset
#xoff_asec = np.interp(w,wpos,xpos)
if 'SWB' in mask:
if filt[-1]=="W": xoff_asec = 6.83 * (w - 2.196)
else: xoff_asec = 7.14 * (w - 2.100)
elif 'LWB' in mask:
if filt[-1]=="W": xoff_asec = -3.16 * (w - 4.747)
else: xoff_asec = -3.26 * (w - 4.600)
#print(w, xoff_asec)
yoff_asec = 0.0
r, theta = xy_to_rtheta(xoff_asec, yoff_asec)
else:
r, theta = (0.0, 0.0)
# Want th_bar to be -90 so that r matches webbpsf
if theta>0:
r = -1 * r
theta = -1 * theta
#print(r, theta)
return r, theta
def coron_trans(name, module='A', pixscale=None, fov=20, nd_squares=True):
"""
Build a transmission image of a coronagraphic mask spanning
the 20" coronagraphic FoV.
Pulled from WebbPSF
"""
import scipy.special
import scipy
if name=='MASK210R':
sigma = 5.253
pixscale = pixscale_SW if pixscale is None else pixscale
elif name=='MASK335R':
sigma=3.2927866
pixscale = pixscale_LW if pixscale is None else pixscale
elif name=='MASK430R':
sigma=2.58832
pixscale = pixscale_LW if pixscale is None else pixscale
elif name=='MASKSWB':
pixscale = pixscale_SW if pixscale is None else pixscale
elif name=='MASKLWB':
pixscale = pixscale_LW if pixscale is None else pixscale
#pixscale=0.03
s = int(fov/pixscale + 0.5)
shape = (s,s)
y, x = np.indices(shape, dtype=float)
y -= shape[0] / 2.0
x -= shape[1] / 2.0
y,x = (pixscale * y, pixscale * x)
### Wedge Masks
if 'WB' in name:
scalefact = (2 + (-x + 7.5) * 4 / 15).clip(2, 6)
wedgesign = 1 if name == 'MASKSWB' else -1
scalefact = (2 + (x * wedgesign + 7.5) * 4 / 15).clip(2, 6)
if name == 'MASKSWB':
polyfitcoeffs = np.array([2.01210737e-04, -7.18758337e-03, 1.12381516e-01,
-1.00877701e+00, 5.72538509e+00, -2.12943497e+01,
5.18745152e+01, -7.97815606e+01, 7.02728734e+01])
# scalefact = scalefact[:, ::-1] # flip orientation left/right for SWB mask
elif name == 'MASKLWB':
polyfitcoeffs = np.array([9.16195583e-05, -3.27354831e-03, 5.11960734e-02,
-4.59674047e-01, 2.60963397e+00, -9.70881273e+00,
2.36585911e+01, -3.63978587e+01, 3.20703511e+01])
sigma_func = np.poly1d(polyfitcoeffs)
sigmas = sigma_func(scalefact)
sigmar = sigmas * np.abs(y)
# clip sigma: The minimum is to avoid divide by zero
# the maximum truncates after the first sidelobe to match the hardware
sigmar.clip(min=np.finfo(sigmar.dtype).tiny, max=2*np.pi, out=sigmar)
transmission = (1 - (np.sin(sigmar) / sigmar) ** 2)
# the bar should truncate at +- 10 arcsec
woutside = np.where(np.abs(x) > 10)
transmission[woutside] = 1.0
### Circular Masks
else:
r = poppy.accel_math._r(x, y)
sigmar = sigma * r
# clip sigma: The minimum is to avoid divide by zero
# the maximum truncates after the first sidelobe to match the hardware
bessel_j1_zero2 = scipy.special.jn_zeros(1, 2)[1]
sigmar.clip(np.finfo(sigmar.dtype).tiny, bessel_j1_zero2, out=sigmar) # avoid divide by zero -> NaNs
if poppy.accel_math._USE_NUMEXPR:
import numexpr as ne
# jn1 = scipy.special.j1(sigmar)
jn1 = scipy.special.jv(1,sigmar)
transmission = ne.evaluate("(1 - (2 * jn1 / sigmar) ** 2)")
else:
# transmission = (1 - (2 * scipy.special.j1(sigmar) / sigmar) ** 2)
transmission = (1 - (2 * scipy.special.jv(1,sigmar) / sigmar) ** 2)
# r = np.sqrt(x ** 2 + y ** 2)
# sigmar = sigma * r
# #sigmar.clip(np.finfo(sigmar.dtype).tiny, 2*np.pi, out=sigmar) # avoid divide by zero -> NaNs
# sigmar.clip(np.finfo(sigmar.dtype).tiny, 7.1559, out=sigmar) # avoid divide by zero -> NaNs
# transmission = (1 - (2 * scipy.special.jn(1, sigmar) / sigmar) ** 2)
transmission[r==0] = 0 # special case center point (value based on L'Hopital's rule)
if nd_squares:
# add in the ND squares. Note the positions are not exactly the same in the two wedges.
# See the figures in Krist et al. of how the 6 ND squares are spaced among the 5
# corongraph regions
# Note: 180 deg rotation needed relative to Krist's figures for the flight SCI orientation:
# We flip the signs of X and Y here as a shortcut to avoid recoding all of the below...
x *= -1
y *= -1
#x = x[::-1, ::-1]
#y = y[::-1, ::-1]
if ((module == 'A' and name == 'MASKLWB') or
(module == 'B' and name == 'MASK210R')):
wnd_5 = np.where(
((y > 5) & (y < 10)) &
(
((x < -5) & (x > -10)) |
((x > 7.5) & (x < 12.5))
)
)
wnd_2 = np.where(
((y > -10) & (y < -8)) &
(
((x < -8) & (x > -10)) |
((x > 9) & (x < 11))
)
)
elif ((module == 'A' and name == 'MASK210R') or
(module == 'B' and name == 'MASKSWB')):
wnd_5 = np.where(
((y > 5) & (y < 10)) &
(
((x > -12.5) & (x < -7.5)) |
((x > 5) & (x < 10))
)
)
wnd_2 = np.where(
((y > -10) & (y < -8)) &
(
((x > -11) & (x < -9)) |
((x > 8) & (x < 10))
)
)
else:
wnd_5 = np.where(
((y > 5) & (y < 10)) &
(np.abs(x) > 7.5) &
(np.abs(x) < 12.5)
)
wnd_2 = np.where(
((y > -10) & (y < -8)) &
(np.abs(x) > 9) &
(np.abs(x) < 11)
)
transmission[wnd_5] = np.sqrt(1e-3)
transmission[wnd_2] = np.sqrt(1e-3)
# Add in the opaque border of the coronagraph mask holder.
if ((module=='A' and name=='MASKLWB') or
(module=='B' and name=='MASK210R')):
# left edge
woutside = np.where((x < -10) & (y < 11.5 ))
transmission[woutside] = 0.0
elif ((module=='A' and name=='MASK210R') or
(module=='B' and name=='MASKSWB')):
# right edge
woutside = np.where((x > 10) & (y < 11.5))
transmission[woutside] = 0.0
# mask holder edge
woutside = np.where(y < -10)
transmission[woutside] = 0.0
# edge of mask itself
# TODO the mask edge is complex and partially opaque based on CV3 images?
# edge of glass plate rather than opaque mask I believe. To do later.
# The following is just a temporary placeholder with no quantitative accuracy.
# but this is outside the coronagraph FOV so that's fine - this only would matter in
# modeling atypical/nonstandard calibration exposures.
wedge = np.where(( y > 11.5) & (y < 13))
transmission[wedge] = 0.7
if not np.isfinite(transmission.sum()):
_log.warn("There are NaNs in the BLC mask - correcting to zero. (DEBUG LATER?)")
transmission[np.where(np.isfinite(transmission) == False)] = 0
return transmission
def build_mask(module='A', pixscale=0.03):
"""Create coronagraphic mask image
Return a truncated image of the full coronagraphic mask layout
for a given module.
+V3 is up, and +V2 is to the left.
"""
if module=='A':
names = ['MASK210R', 'MASK335R', 'MASK430R', 'MASKSWB', 'MASKLWB']
elif module=='B':
names = ['MASKSWB', 'MASKLWB', 'MASK430R', 'MASK335R', 'MASK210R']
allims = [coron_trans(name,module,pixscale) for name in names]
return np.concatenate(allims, axis=1)
def build_mask_detid(detid, oversample=1, ref_mask=None, pupil=None):
"""Create mask image for a given detector
Return a full coronagraphic mask image as seen by a given SCA.
+V3 is up, and +V2 is to the left.
Parameters
----------
detid : str
Name of detector, 'A1', A2', ... 'A5' (or 'ALONG'), etc.
oversample : float
How much to oversample output mask relative to detector sampling.
ref_mask : str or None
Reference mask for placement of coronagraphic mask elements.
If None, then defaults are chosen for each detector.
pupil : str or None
Which Lyot pupil stop is being used? This affects holder placement.
If None, then defaults based on ref_mask.
"""
names = ['A1', 'A2', 'A3', 'A4', 'A5',
'B1', 'B2', 'B3', 'B4', 'B5']
# In case input is 'NRC??'
if 'NRC' in detid:
detid = detid[3:]
# Convert ALONG to A5 name
module = detid[0]
detid = '{}5'.format(module) if 'LONG' in detid else detid
# Make sure we have a valid name
if detid not in names:
raise ValueError("Invalid detid: {0} \n Valid names are: {1}" \
.format(detid, ', '.join(names)))
# These detectors don't see any of the mask structure
names_ret0 = ['A1', 'A3', 'B2', 'B4']
if detid in names_ret0:
return None
pixscale = pixscale_LW if '5' in detid else pixscale_SW
pixscale_over = pixscale / oversample
# Build the full mask
xpix = ypix = 2048
xpix_over = int(xpix * oversample)
ypix_over = int(ypix * oversample)
if detid=='A2':
cnames = ['MASK210R', 'MASK335R', 'MASK430R']
ref_mask = 'MASK210R' if ref_mask is None else ref_mask
elif detid=='A4':
cnames = ['MASK430R', 'MASKSWB', 'MASKLWB']
ref_mask = 'MASKSWB' if ref_mask is None else ref_mask
elif detid=='A5':
cnames = ['MASK210R', 'MASK335R', 'MASK430R', 'MASKSWB', 'MASKLWB']
ref_mask = 'MASK430R' if ref_mask is None else ref_mask
elif detid=='B1':
cnames = ['MASK430R', 'MASK335R', 'MASK210R']
ref_mask = 'MASK210R' if ref_mask is None else ref_mask
elif detid=='B3':
cnames = ['MASKSWB', 'MASKLWB', 'MASK430R']
ref_mask = 'MASKSWB' if ref_mask is None else ref_mask
elif detid=='B5':
cnames = ['MASKSWB', 'MASKLWB', 'MASK430R', 'MASK335R', 'MASK210R']
ref_mask = 'MASK430R' if ref_mask is None else ref_mask
allims = [coron_trans(cname, module, pixscale_over) for cname in cnames]
if pupil is None:
pupil = 'WEDGELYOT' if 'WB' in ref_mask else 'CIRCLYOT'
channel = 'LW' if '5' in detid else 'SW'
cdict = coron_ap_locs(module, channel, ref_mask, pupil=pupil, full=False)
xdet, ydet = cdict['cen']
# Add an offset value before expanding to full size
cmask = np.concatenate(allims, axis=1) + 999
# A5 mask names need to be reversed for detector orientation
# along horizontal direction
if detid=='A5':
cnames = cnames[::-1]
xf_arr = np.arange(1,2*len(cnames)+1,2) / (2*len(cnames))
xf = xf_arr[np.array(cnames)==ref_mask][0]
xc = cmask.shape[1] * xf
xc += (ypix_over - cmask.shape[1]) / 2
yc = xpix_over / 2
# Cut to final image size
cmask = pad_or_cut_to_size(cmask, (ypix_over,xpix_over))
# Place cmask in detector coords
cmask = sci_to_det(cmask, detid)
# Shift cmask to appropriate location
# ie., move MASK430R from center
xdet_over, ydet_over = np.array([xdet,ydet]) * oversample
delx = xdet_over - xc
dely = ydet_over - yc
#print((xdet_over, ydet_over), (xc, yc), (delx, dely))
cmask = fshift(cmask, int(delx), int(dely), pad=True) + 1
cmask[cmask>10] = cmask[cmask>10] - 1000
# Place blocked region from coronagraph holder
if detid=='A2':
if 'CIRCLYOT' in pupil:
i1, i2 = [int(920*oversample), int(360*oversample)]
cmask[0:i1,0:i2]=0
i1 = int(220*oversample)
cmask[0:i1,:] = 0
else:
i1, i2 = [int(935*oversample), int(360*oversample)]
cmask[0:i1,0:i2]=0
i1 = int(235*oversample)
cmask[0:i1,:] = 0
elif detid=='A4':
if 'CIRCLYOT' in pupil:
i1, i2 = [int(920*oversample), int(1490*oversample)]
cmask[0:i1,i2:]=0
i1 = int(220*oversample)
cmask[0:i1,:] = 0
else:
i1, i2 = [int(935*oversample), int(1490*oversample)]
cmask[0:i1,i2:]=0
i1 = int(235*oversample)
cmask[0:i1,:] = 0
elif detid=='A5':
if 'CIRCLYOT' in pupil:
i1, i2 = [int(1480*oversample), int(260*oversample)]
cmask[i1:,0:i2]=0
i1, i2 = [int(1480*oversample), int(1890*oversample)]
cmask[i1:,i2:]=0
i1 = int(1825*oversample)
cmask[i1:,:] = 0
else:
i1, i2 = [int(1485*oversample), int(265*oversample)]
cmask[i1:,0:i2]=0
i1, i2 = [int(1485*oversample), int(1895*oversample)]
cmask[i1:,i2:]=0
i1 = int(1830*oversample)
cmask[i1:,:] = 0
elif detid=='B1':
if 'CIRCLYOT' in pupil:
i1, i2 = [int(910*oversample), int(1635*oversample)]
cmask[0:i1,i2:]=0
i1 = int(210*oversample)
cmask[0:i1,:] = 0
else:
i1, i2 = [int(905*oversample), int(1630*oversample)]
cmask[0:i1,i2:]=0
i1 = int(205*oversample)
cmask[0:i1,:] = 0
elif detid=='B3':
if 'CIRCLYOT' in pupil:
i1, i2 = [int(920*oversample), int(500*oversample)]
cmask[0:i1,0:i2]=0
i1 = int(210*oversample)
cmask[0:i1,:] = 0
else:
i1, i2 = [int(920*oversample), int(500*oversample)]
cmask[0:i1,0:i2]=0
i1 = int(210*oversample)
cmask[0:i1,:] = 0
elif detid=='B5':
if 'CIRCLYOT' in pupil:
i1, i2 = [int(560*oversample), int(185*oversample)]
cmask[0:i1,0:i2]=0
i1, i2 = [int(550*oversample), int(1830*oversample)]
cmask[0:i1,i2:]=0
i1 = int(215*oversample)
cmask[0:i1,:] = 0
else:
i1, i2 = [int(560*oversample), int(190*oversample)]
cmask[0:i1,0:i2]=0
i1, i2 = [int(550*oversample), int(1835*oversample)]
cmask[0:i1,i2:]=0
i1 = int(215*oversample)
cmask[0:i1,:] = 0
# Convert back to 'sci' orientation
cmask = det_to_sci(cmask, detid)
return cmask
def coron_ap_locs(module, channel, mask, pupil=None, full=False):
"""Coronagraph mask aperture locations and sizes
Returns a dictionary of the detector aperture sizes
and locations. Attributes `cen` and `loc` are in terms
of (x,y) detector pixels.
"""
if pupil is None:
pupil = 'WEDGELYOT' if 'WB' in mask else 'CIRCLYOT'
if module=='A':
if channel=='SW':
if '210R' in mask:
cdict_rnd = {'det':'A2', 'cen':(712,526), 'size':640}
cdict_bar = {'det':'A2', 'cen':(716,538), 'size':640}
elif '335R' in mask:
cdict_rnd = {'det':'A2', 'cen':(1368,525), 'size':640}
cdict_bar = {'det':'A2', 'cen':(1372,536), 'size':640}
elif '430R' in mask:
cdict_rnd = {'det':'A2', 'cen':(2025,525), 'size':640}
cdict_bar = {'det':'A2', 'cen':(2029,536), 'size':640}
elif 'SWB' in mask:
cdict_rnd = {'det':'A4', 'cen':(487,523), 'size':640}
cdict_bar = {'det':'A4', 'cen':(490,536), 'size':640}
elif 'LWB' in mask:
cdict_rnd = {'det':'A4', 'cen':(1141,523), 'size':640}
cdict_bar = {'det':'A4', 'cen':(1143,536), 'size':640}
else:
raise ValueError('Mask {} not recognized for {} channel'\
.format(mask, channel))
elif channel=='LW':
if '210R' in mask:
cdict_rnd = {'det':'A5', 'cen':(1720, 1670), 'size':320}
cdict_bar = {'det':'A5', 'cen':(1725, 1681), 'size':320}
elif '335R' in mask:
cdict_rnd = {'det':'A5', 'cen':(1397,1672), 'size':320}
cdict_bar = {'det':'A5', 'cen':(1402,1682), 'size':320}
elif '430R' in mask:
cdict_rnd = {'det':'A5', 'cen':(1074,1672), 'size':320}
cdict_bar = {'det':'A5', 'cen':(1078,1682), 'size':320}
elif 'SWB' in mask:
cdict_rnd = {'det':'A5', 'cen':(752,1672), 'size':320}
cdict_bar = {'det':'A5', 'cen':(757,1682), 'size':320}
elif 'LWB' in mask:
cdict_rnd = {'det':'A5', 'cen':(430,1672), 'size':320}
cdict_bar = {'det':'A5', 'cen':(435,1682), 'size':320}
else:
raise ValueError('Mask {} not recognized for {} channel'\
.format(mask, channel))
else:
raise ValueError('Channel {} not recognized'.format(channel))
elif module=='B':
if channel=='SW':
if '210R' in mask:
cdict_rnd = {'det':'B1', 'cen':(1293,515), 'size':640}
cdict_bar = {'det':'B1', 'cen':(1287,509), 'size':640}
elif '335R' in mask:
cdict_rnd = {'det':'B1', 'cen':(637,513), 'size':640}
cdict_bar = {'det':'B1', 'cen':(632,508), 'size':640}
elif '430R' in mask:
cdict_rnd = {'det':'B1', 'cen':(-20,513), 'size':640}
cdict_bar = {'det':'B1', 'cen':(-25,508), 'size':640}
elif 'SWB' in mask:
cdict_rnd = {'det':'B3', 'cen':(874,519), 'size':640}
cdict_bar = {'det':'B3', 'cen':(870,518), 'size':640}
elif 'LWB' in mask:
cdict_rnd = {'det':'B3', 'cen':(1532,519), 'size':640}
cdict_bar = {'det':'B3', 'cen':(1526,510), 'size':640}
else:
raise ValueError('Mask {} not recognized for {} channel'\
.format(mask, channel))
elif channel=='LW':
if '210R' in mask:
cdict_rnd = {'det':'B5', 'cen':(1656,359), 'size':320}
cdict_bar = {'det':'B5', 'cen':(1660,359), 'size':320}
elif '335R' in mask:
cdict_rnd = {'det':'B5', 'cen':(1334,360), 'size':320}
cdict_bar = {'det':'B5', 'cen':(1338,360), 'size':320}
elif '430R' in mask:
cdict_rnd = {'det':'B5', 'cen':(1012,362), 'size':320}
cdict_bar = {'det':'B5', 'cen':(1015,361), 'size':320}
elif 'SWB' in mask:
cdict_rnd = {'det':'B5', 'cen':(366,364), 'size':320}
cdict_bar = {'det':'B5', 'cen':(370,364), 'size':320}
elif 'LWB' in mask:
cdict_rnd = {'det':'B5', 'cen':(689,363), 'size':320}
cdict_bar = {'det':'B5', 'cen':(693,364), 'size':320}
else:
raise ValueError('Mask {} not recognized for {} channel'\
.format(mask, channel))
else:
raise ValueError('Channel {} not recognized'.format(channel))
else:
raise ValueError('Module {} not recognized'.format(module))
# Choose whether to use round or bar Lyot mask
cdict = cdict_rnd if 'CIRC' in pupil else cdict_bar
x0, y0 = np.array(cdict['cen']) - cdict['size']/2
cdict['loc'] = (int(x0), int(y0))
# Add in 'sci' coordinates (V2/V3 orientation)
# X is flipped for A5, Y is flipped for all others
cen = cdict['cen']
if cdict['det'] == 'A5':
cdict['cen_sci'] = (2048-cen[0], cen[1])
else:
cdict['cen_sci'] = (cen[0], 2048-cen[1])
if full:
cdict['size'] = 2048
cdict['loc'] = (0,0)
return cdict
| mit |
genialis/resolwe-bio | resolwe_bio/tools/merge_chipqc_archive.py | 1 | 1850 | #!/usr/bin/env python3
"""Merge ChIP/ATAC-seq prepeak and postpeak QC reports."""
import argparse
from collections import defaultdict
import pandas as pd
parser = argparse.ArgumentParser(description="Merge ChIP/ATAC-Seq QC reports.")
parser.add_argument(
"-f",
"--file_path",
required=True,
nargs="+",
help="List with paths to QC report files.",
)
parser.add_argument(
"-n", "--sample_names", required=True, nargs="+", help="List of sample names."
)
parser.add_argument(
"-r", "--report_type", required=True, nargs="+", help="List of report types."
)
if __name__ == "__main__":
args = parser.parse_args()
data = defaultdict(dict)
for report_file, report_type, sample_name in zip(
args.file_path, args.report_type, args.sample_names
):
data[sample_name][report_type] = report_file
prepeak_list = []
postpeak_list = []
for sample in data:
if "prepeak" in data[sample]:
prepeak = pd.read_csv(data[sample]["prepeak"], sep="\t")
prepeak.index = [sample]
prepeak_list.append(prepeak)
if "postpeak" in data[sample]:
postpeak = pd.read_csv(data[sample]["postpeak"], sep="\t")
postpeak.index = [sample]
postpeak_list.append(postpeak)
if prepeak_list and postpeak_list:
prepeaks = pd.concat(prepeak_list)
postpeaks = pd.concat(postpeak_list)
report = pd.merge(
prepeaks, postpeaks, left_index=True, right_index=True, how="outer"
)
elif prepeak_list:
report = pd.concat(prepeak_list)
else:
report = pd.concat(postpeak_list)
report.to_csv(
"QC_report.txt",
sep="\t",
na_rep="N/A",
index_label="SAMPLE_NAME",
float_format="%.3f",
)
| apache-2.0 |
yosssi/scipy_2015_sklearn_tutorial | notebooks/figures/plot_kneighbors_regularization.py | 25 | 1363 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
def make_dataset(n_samples=100):
rnd = np.random.RandomState(42)
x = np.linspace(-3, 3, n_samples)
y_no_noise = np.sin(4 * x) + x
y = y_no_noise + rnd.normal(size=len(x))
return x, y
def plot_regression_datasets():
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
for n_samples, ax in zip([10, 100, 1000], axes):
x, y = make_dataset(n_samples)
ax.plot(x, y, 'o', alpha=.6)
def plot_kneighbors_regularization():
rnd = np.random.RandomState(42)
x = np.linspace(-3, 3, 100)
y_no_noise = np.sin(4 * x) + x
y = y_no_noise + rnd.normal(size=len(x))
X = x[:, np.newaxis]
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
x_test = np.linspace(-3, 3, 1000)
for n_neighbors, ax in zip([2, 5, 20], axes.ravel()):
kneighbor_regression = KNeighborsRegressor(n_neighbors=n_neighbors)
kneighbor_regression.fit(X, y)
ax.plot(x, y_no_noise, label="true function")
ax.plot(x, y, "o", label="data")
ax.plot(x_test, kneighbor_regression.predict(x_test[:, np.newaxis]),
label="prediction")
ax.legend()
ax.set_title("n_neighbors = %d" % n_neighbors)
if __name__ == "__main__":
plot_kneighbors_regularization()
plt.show()
| cc0-1.0 |
jshiv/turntable | test/lib/python2.7/site-packages/numpy/linalg/linalg.py | 35 | 67345 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return wrap(gufunc(a, signature=signature, extobj=extobj).astype(result_t))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _ssyevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t))
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be always be of complex type. When `a` is real
the resulting eigenvalues will be real (0 imaginary part) or
occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
A : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues, not necessarily ordered.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t))
vt = vt.astype(result_t)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t)
s = s.astype(_realType(result_t))
vt = vt.astype(result_t)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t))
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[0]/s[-1]
else:
return norm(x, p)*norm(inv(x), p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 1.6.0.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
return sign.astype(result_t), logdet.astype(real_t)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(2, 2, 2
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return _umath_linalg.det(a, signature=signature).astype(result_t)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute the extreme singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Check the default case first and handle it immediately.
if ord is None and axis is None:
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
return sqrt(sqnorm)
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis)
elif ord == -Inf:
return abs(x).min(axis=axis)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
return _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
return _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
return add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
return add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
return add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
return add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
return sqrt(add.reduce((x.conj() * x).real, axis=axis))
else:
raise ValueError("Invalid norm order for matrices.")
else:
raise ValueError("Improper number of dimensions to norm.")
| mit |
mlyundin/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 65 | 50308 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
christianurich/DynaMind-ToolBox | DynaMind-Performance-Assessment/3rdparty/CD3Waterbalance/Simulator.py | 1 | 23114 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 12 14:12:52 2014
@author: Acer
"""
import subprocess
import csv
import os
import numpy as np
import pylab as pl
from datetime import datetime
from matplotlib.dates import date2num
from itertools import cycle
from numpy import size, asarray
Rainevapovector =[]
Outputvector =[]
Indoorvector=[]
area_fractions1=[]
total_area = 0.0
#killing the cd3 process (if necessary) and deleting old ouput .txt files
def Deleter(location_files1='C:\\Users\Acer\Documents\GitHub\CD3Waterbalance\simulationwithpatterns\outputfiles'):
os.system("taskkill /cd3.exe")
todelete = [ f for f in os.listdir(location_files1) if f.endswith(".txt") ]
for i in range(len(todelete)):
os.remove(location_files1+"\%s" % todelete[i])
return
#executing programm
def runcd3(filename='simple_system_CwR_RT_indooruse.xml'):
cd3 = r'"""C:\Program Files (x86)\CityDrain3\bin\cd3.exe" C:\Users\Acer\Documents\GitHub\CD3Waterbalance\simulationwithpatterns\outputfiles\%s""' % filename
p = subprocess.Popen(cd3, shell=True)
p.wait()
return
#vector=[Area, perv_fraction, imperv_to_storage, imperv_to_stormw]
#Catchment_area_fractions for complex system = [[485.1, 0.18, 0.63, 0.19], [855.9, 0.28, 0.43, 0.29], [800, 0.1, 0.3, 0.6], [960, 0.46, 0.45, 0.09], [1200, 0, 0, 1]]
def Fractioncalculator(vector=[[485.1, 0.0, 1.0, 0.0]]):
global area_fractions1
global total_area
total_area = 0.0
area_fractions1_0 = 0.0
area_fractions1_1 = 0.0
area_fractions1_2 = 0.0
for i in range(len(vector)):
total_area += float(vector[i][0])
area_fractions1_0 += float(vector[i][0]*vector[i][1])
area_fractions1_1 += float(vector[i][0]*vector[i][2])
area_fractions1_2 += float(vector[i][0]*vector[i][3])
area_fractions1=[area_fractions1_0/total_area, area_fractions1_1/total_area, area_fractions1_2/total_area]
return
#getting model outputdata
def getoutputdata(location_files1, totalarea=total_area):
#getting outputvector
#location_files1='C:\Users\Acer\Documents\GitHub\CD3Waterbalance\simulationwithpatterns\outputfiles'
file_names=os.listdir(str(location_files1)[0:])
alltogether = []
names = []
for i in range(len(file_names)):
if file_names[i][(len(file_names[i])-3):len(file_names[i])] == 'txt':
file_name=file_names[i]
csv_file = open(str(location_files1) +"\%s" % file_name, "r")
data = csv.reader(csv_file, delimiter='\t')
mylist = list(data)
csv_file.closed
alltogether.append(mylist)
names.append(file_names[i])
csv_file.close()
#creating vector right size
global Outputvector
Outputvector=[['error']*(len(alltogether)+1) for m in range(len(alltogether[0]))]
#writing header
Outputvector[0][0]='Time'
for i in range(len(alltogether)):
Outputvector[0][i+1]=names[i][:(len(names[i])-4)]
#writing time colum
for i in range(len(alltogether[0]))[1:]:
Outputvector[i][0]=float(date2num(datetime.strptime(alltogether[1][i][0][:19],"%d.%m.%Y %H:%M:%S")))
#writing Values of inputfiles in vector
for i in range((len(alltogether)+1))[1:]:
for n in range(len(alltogether[0]))[1:]:
Outputvector[n][i]=float(alltogether[i-1][n][1])
for i in range(len(Outputvector[0])):
if Outputvector[0][i] == 'evapo_model':
for n in range(len(Outputvector))[1:]:
Outputvector[n][i]=float(Outputvector[n][i])/1000*totalarea
if Outputvector[0][i] == 'rain_model':
for n in range(len(Outputvector))[1:]:
Outputvector[n][i]=float(Outputvector[n][i])/1000*totalarea
#checks whether all values have been inserted
for i in range(len(Outputvector)):
if 'error' in Outputvector[i]:
print('A problem has occured, please check getoutputdata() function')
print(('The problem has occured in line '+str(i)+' of the Outputvector'))
else:
pass
Outputvector=np.asarray(Outputvector)
print('Outputvector has been created')
return
def getinputdata(location_files2, numberhh , totalarea=total_area, lenindoor=9000):
#getting inputvector
#location_files2='C:\Users\Acer\Documents\GitHub\CD3Waterbalance\simulationwithpatterns\inputfiles'
file_names=os.listdir(str(location_files2)[0:])
rainevapo=[]
namesrainevapo=[]
indoor=[]
namesindoor=[]
for i in range(len(file_names)):
if file_names[i][(len(file_names[i])-3):len(file_names[i])] == 'ixx':
file_name=file_names[i]
csv_file = open(str(location_files2) +"\%s" % file_name, "r")
data = csv.reader(csv_file, delimiter='\t')
mylist = list(data)
if len(mylist)>lenindoor:
rainevapo.append(mylist)
namesrainevapo.append(file_names[i])
else:
indoor.append(mylist)
namesindoor.append(file_names[i])
#creating vector right size
global Rainevapovector, Indoorvector
Rainevapovector=[['error']*(len(namesrainevapo)+1) for m in range(size(rainevapo[0],0)-1)]
Indoorvector=[['error']*(len(namesindoor)+1) for m in range(size(indoor[0],0))]
#writing time colum
for i in range(size(rainevapo[0],0)-1):
Rainevapovector[i][0]=float(date2num(datetime.strptime(rainevapo[1][i+1][0]+" "+rainevapo[1][i+1][1],"%d.%m.%Y %H:%M:%S")))
for i in range(size(indoor[0],0)):
Indoorvector[i][0]=float(date2num(datetime.strptime(indoor[1][i][0]+" "+indoor[1][i][1],"%d.%m.%Y %H:%M:%S")))
#writing Values of inputfiles in vector
for i in range((len(namesrainevapo)+1))[1:]:
for n in range(size(rainevapo[0],0)-1):
Rainevapovector[n][i]=float(rainevapo[i-1][n+1][2])
for i in range((len(namesindoor)+1))[1:]:
for n in range(size(indoor[0],0)):
Indoorvector[n][i]=float(indoor[i-1][n][2])
#correcting unit and volume!
Rainevapovector=np.asarray(Rainevapovector)
Indoorvector=np.asarray(Indoorvector)
for i in range(len(namesrainevapo)+1)[1:]:
Rainevapovector[:,i]=Rainevapovector[:,i]/1000*totalarea
for i in range(len(namesindoor)+1)[1:]:
Indoorvector[:,i]=Indoorvector[:,i]/1000*numberhh*(float(Outputvector[2][0])-float(Outputvector[1][0]))*24
#giving header for future reference
Rainevapovector=Rainevapovector.tolist()
Rainevapovector.insert(0,['time']*(len(namesrainevapo)+1))
for i in range(len(namesrainevapo)+1)[1:]:
Rainevapovector[0][i]=namesrainevapo[i-1][:(len(namesrainevapo[i-1])-4)]
Indoorvector=Indoorvector.tolist()
Indoorvector.insert(0,['time']*(len(namesindoor)+1))
for i in range(len(namesindoor)+1)[1:]:
Indoorvector[0][i]=namesindoor[i-1][:(len(namesindoor[i-1])-4)]
Rainevapovector = np.asarray(Rainevapovector)
Indoorvector = np.asarray(Indoorvector)
print('Indoorvector and RainEvapovector have been created')
return
#tocheck (all, Evapo, Rain, Indooruse, Outdoordemand?, System)
#area_fractions = [perv, imperv_to_storage, imperv_to_stormw]
def Bilanz(Data, tocheck, wettingloss = 0.4, depressionloss=1.5, totalarea = total_area, area_fractions = area_fractions1):
#tocheck=['Evapo', 'Rain', 'System']
#Data=[Rainevapovector, Outputvector, Indoorvector]
colorred = "\033[01;31m{0}\033[00m"
for i in range(len(tocheck)):
#evapotranspiration check
if tocheck[i] == 'Evapo':
evapomodel = 0.0
evapoinput = 0.0
for i in range(len(Data)):
for n in range(len(Data[i][0])):
if Data[i][0][n] == 'evapo':
for m in range(len(Data[i][:,n]))[1:]:
evapoinput += float(Data[i][:,n][m])
elif Data[i][0][n] == 'evapo_model':
for m in range(len(Data[i][:,n]))[1:]:
evapomodel += float(Data[i][:,n][m])
ErrorFRPI=(1 - evapomodel/evapoinput) * 100
print(('The difference of given and produced Evapotranspiraten calculated by the Pattern Implementer and Filereader due to rounding errors is '+ colorred.format(str(ErrorFRPI))+' %'))
#rain check
elif tocheck[i] == 'Rain':
rainmodel = 0.0
raininput = 0.0
for i in range(len(Data)):
for n in range(len(Data[i][0])):
if Data[i][0][n] == 'rain':
for m in range(len(Data[i][:,n]))[1:]:
raininput += float(Data[i][:,n][m])
elif Data[i][0][n] == 'rain_model':
for m in range(len(Data[i][:,n]))[1:]:
rainmodel += float(Data[i][:,n][m])
ErrorFR=(1 - rainmodel/raininput) * 100
print(('The difference of given and produced Rain calculated by the Filereader due to rounding errors is '+ colorred.format(str(ErrorFR))+' %'))
#total system
#Lists have to be in alphabetical order
elif tocheck[i] == 'System':
#filenames in lists
totalstorage = []
totalstoragelist = ['Greywatertanklevels_0', 'Rainwatertanklevels_0', 'Stormwaterreservoirlevels_0']
inputER=[]
inputERlist = ['evapo_model', 'rain_model']
outputISSP = []
outputISSPlist = ['Actual_Infiltration_0', 'Potable_Water_Resorvoir_Demand_0', 'Sewer_0', 'Stormwaterpipe_0']
outputOD = []
for i in range(len(Data)):
for n in range(len(Data[i][0])):
if Data[i][0][n] in totalstoragelist:
totalstorage.append(Data[i][:,n])
elif Data[i][0][n] in inputERlist:
inputER.append(Data[i][:,n])
elif Data[i][0][n] in outputISSPlist:
outputISSP.append(Data[i][:,n])
if str(repr(Data[i][0][n])[1:15]) == 'Outdoor_Demand':
outputOD.append(Data[i][:,n])
totalstoragescalar = 0.0
rainminusevapolosses = 0.0
SewerStormwInfiltr = 0.0
PWRonly = 0.0
OutdoorD = 0.0
#Speicher
for i in range(len(totalstorage)):
totalstoragescalar += float(totalstorage[i][-1])
#Potable_Water_Demand/Sewer,Infiltr.,Stormwater
for i in range(len(outputISSP)):
if outputISSP[i][0] == 'PotableWaterDemand':
for n in range(len(outputISSP[0]))[1:]:
PWRonly += float(outputISSP[i][n])
else:
for n in range(len(outputISSP[0]))[1:]:
SewerStormwInfiltr -= float(outputISSP[i][n])
#OutdoorDemand
for i in range(len(outputOD)):
for n in range(len(outputOD[0]))[1:]:
OutdoorD += float(outputOD[i][n])
#Rain and Evapo inlcuding losses
lossstorage_perv_impervreservoir = 0.0
lossstorage_imperstormw = 0.0
onlyrain=0.0
onlyevapo=0.0
rainminusevapo = 0.0
global effective_rain
effective_rain = ['effective_rain']
for i in range(len(inputER[0]))[1:]:
if float(inputER[1][i]) > float(inputER[0][i]):
lossstorage_perv_impervreservoir += (float(inputER[1][i]) - float(inputER[0][i]))/totalarea*1000
lossstorage_imperstormw += (float(inputER[1][i]) - float(inputER[0][i]))/totalarea*1000
if lossstorage_perv_impervreservoir > wettingloss:
rainminusevapolosses += (float(inputER[1][i])-float(inputER[0][i]))*(area_fractions[0]+area_fractions[1])
foreffectiverain1 = (float(inputER[1][i])-float(inputER[0][i]))*(area_fractions[0]+area_fractions[1])
lossstorage_perv_impervreservoir = wettingloss
else:
foreffectiverain1=0.0
if lossstorage_imperstormw > depressionloss + wettingloss:
rainminusevapolosses += (float(inputER[1][i])-float(inputER[0][i]))*area_fractions[2]
foreffectiverain2 = (float(inputER[1][i])-float(inputER[0][i]))*area_fractions[2]
lossstorage_imperstormw = depressionloss + wettingloss
else:
foreffectiverain2=0.0
#writing the effective rain height in a vector
effective_rain.append(foreffectiverain1+foreffectiverain2)
else:
#writing the effective rain height in a vector
effective_rain.append(0.0)
#simulation drying via evapotranspiration
if lossstorage_perv_impervreservoir > 0:
lossstorage_perv_impervreservoir += (float(inputER[1][i]) - float(inputER[0][i]))/totalarea*1000
if lossstorage_perv_impervreservoir < 0:
lossstorage_perv_impervreservoir = 0.0
else:
pass
else:
lossstorage_perv_impervreservoir = 0.0
if lossstorage_imperstormw > 0:
lossstorage_imperstormw += (float(inputER[1][i]) - float(inputER[0][i]))/totalarea*1000
if lossstorage_imperstormw < 0:
lossstorage_imperstormw = 0.0
else:
pass
else:
lossstorage_imperstormw = 0.0
onlyrain += float(inputER[1][i])
if float(inputER[1][i]) >= float(inputER[0][i]):
onlyevapo += float(inputER[0][i])
rainminusevapo += (float(inputER[1][i])-float(inputER[0][i]))
else:
onlyevapo += float(inputER[1][i])
print(('Fraktion of Pervious Area: '+str(area_fractions[0])))
print(('Fraktion of Impervious Area to Reservoir: '+str(area_fractions[1])))
print(('Fraktion of Impervious Area to Stormdrain: '+str(area_fractions[2])))
print(('Wetting Loss: '+str( wettingloss)+' mm'))
print(('Depression Loss: '+str(depressionloss)+' mm'))
print(('Total Rain: '+str(onlyrain) + ' = '+str(onlyevapo+rainminusevapo)+' m^3'))
print(('Evaporated Rain: '+str(onlyevapo)+' m^3'))
print(('Inital Losses only: '+str(rainminusevapo-rainminusevapolosses)+' m^3'))
print(('Potable_Water_Demand: '+str(PWRonly)+' m^3'))
print(('Outdoor_Demand: '+str(OutdoorD)+' m^3'))
print(('Rain minus all Losses: '+str(rainminusevapolosses)+' m^3'))
print(('SewerStormwInfiltr: '+str(-1*SewerStormwInfiltr)+' m^3'))
print(('Still stored in tanks: ' +str(totalstoragescalar)+' m^3'))
print(('Absolut Error of entire balance: '+str(PWRonly-OutdoorD-totalstoragescalar+rainminusevapolosses+SewerStormwInfiltr)+' m^3'))
print(('Realtive Error of entire balance: '+str(100*(PWRonly-OutdoorD+rainminusevapolosses+SewerStormwInfiltr-totalstoragescalar)*2/(PWRonly+totalstoragescalar+OutdoorD+onlyrain+onlyevapo+(rainminusevapo-rainminusevapolosses)-SewerStormwInfiltr))+' %'))
return
#Possible Input: Outdoor_Demand, Indoor_Demand, all (plots everthing), all filenames (without endings)
def plotter(Vector1, Vector2, Vector3,limx=[0,365], limy=[0,1], toplot=['rain_model', 'Stormwater', 'evapo_model', 'effective_rain','Indoor_Demand','Raintank1','Outdoor_Demand'] ):
#liste der zu plottenden sachen erzeugen
global listtoplot
listtoplot=[]
for i in range(len(toplot)):
#searching vector headers for inputstrings, writes in plotting list
if toplot[i] in Vector1[0]:
for n in range(len(Vector1[0])):
if toplot[i]==Vector1[0][n]:
listtoplot.append([Vector1[:,0], Vector1[:,n]])
elif toplot[i] in Vector2[0]:
for n in range(len(Vector2[0])):
if toplot[i]==Vector2[0][n]:
listtoplot.append([Vector2[:,0], Vector2[:,n]])
elif toplot[i] in Vector3[0]:
for n in range(len(Vector3[0])):
if toplot[i]==Vector3[0][n]:
listtoplot.append([Vector3[:,0], Vector3[:,n]])
elif toplot[i] == 'Outdoor_Demand':
allheaders=Vector1.tolist()[0]+Vector2.tolist()[0]+Vector3.tolist()[0]
for n in range(len(allheaders)):
if toplot[i]==repr(allheaders[n])[1:15]:
position=n
if position<=len(Vector1[0]):
a=1
elif position<=len(Vector2[0]):
a=2
else:
a=3
exec('variable=Vector'+str(a))
storageOD=asarray([0.0 for m in range(len(variable))])
for i in range(len(variable[0])):
if repr(variable[0][i])[1:15] == 'Outdoor_Demand':
for n in range(len(variable))[1:]:
storageOD[n] += float(variable[n][i])
storageOD=storageOD.tolist()
storageOD[0]='Outdoor_Demand'
listtoplot.append([variable[:,0], storageOD])
#while time inbetween 2 days sum up and append
outdoordemandsum=0.0
dailyoutdoordemand=[]
fulldaystart=ceil(float(variable[:,0][1]))
fulldayend=floor(float(variable[:,0][-1]))
i=1
for n in range(int(fulldayend-fulldaystart)+1):
if float(variable[:,0][i]) < (int(fulldaystart)):
while float(variable[:,0][i]) <= (int(fulldaystart)+ n):
i+=1
else:
while float(variable[:,0][i]) >= (int(fulldaystart) + n-1) and float(variable[:,0][i]) < (int(fulldaystart) + n):
outdoordemandsum += float(storageOD[i])
i += 1
dailyoutdoordemand.append(outdoordemandsum)
outdoordemandsum=0.0
dailyoutdoordemand_per_sm=mean(dailyoutdoordemand)/(area_fractions1[0]*total_area)
print(('The average Outdoordemand per square meter for the simulated time frame is: '+str(dailyoutdoordemand_per_sm)+' m³/(m²d)'))
elif toplot[i] == 'Indoor_Demand':
allheaders=Vector1.tolist()[0]+Vector2.tolist()[0]+Vector3.tolist()[0]
for n in range(len(allheaders)):
if 'toilet'==(allheaders[n]):
position=n
if position<=len(Vector1[0]):
a=1
elif position<=len(Vector2[0]):
a=2
else:
a=3
exec('variable=Vector'+str(a))
storageID=asarray([0.0 for m in range(len(variable))])
for i in range(len(variable[0]))[1:]:
for n in range(len(variable))[1:]:
storageID[n] += float(variable[n][i])
storageID=storageID.tolist()
storageID[0]='Indoor_Demand'
listtoplot.append([variable[:,0], storageID])
elif toplot[i] == 'all':
for n in range(len(Vector1[0]))[1:]:
listtoplot.append([Vector1[:,0], Vector1[:,n]])
for n in range(len(Vector2[0]))[1:]:
listtoplot.append([Vector2[:,0], Vector2[:,n]])
for n in range(len(Vector3[0]))[1:]:
listtoplot.append([Vector3[:,0], Vector3[:,n]])
elif toplot[i] == 'effective_rain':
if len(Vector1[0])==len(effective_rain):
listtoplot.append([Vector1[:,0], effective_rain])
elif len(Vector2[0])==len(effective_rain):
listtoplot.append([Vector2[:,0], effective_rain])
else :
listtoplot.append([Vector3[:,0], effective_rain])
else:
print('Error: Wrong input name!')
#LEGENDE!!!save pic if wanted
pl.figure(figsize=(12, 6), dpi=80)
pl.xlim(float(Vector1[1][0])+float(limx[0]), float(Vector1[1][0]) + float(limx[1]))
pl.ylim(float(limy[0]), float(limy[1]))
lines = ["-","--","-.",":"]
linecycler = cycle(lines)
for i in range(len(listtoplot)):
exec('pl.plot(asarray(listtoplot['+str(i)+'])[0][1:],asarray(listtoplot['+str(i)+'])[1][1:], linewidth=2.5, linestyle = next(linecycler), label=listtoplot['+str(i)+'][1][0])')
pl.legend(loc='best')
pl.title('Model In - and Output', fontsize=20)
pl.xlabel('Time [d]')
pl.ylabel('Volume [m^3]')
pl.grid(True)
pl.show()
print(('t=0: '+str(float(Vector1[1][0]))))
print(('The time range plotted: '+str([num2date(float(Vector1[1][0]) + float(limx[0])).strftime("%d.%m.%Y %H:%M:%S"),
num2date(float(Vector1[1][0]) + float(limx[1])).strftime("%d.%m.%Y %H:%M:%S")])))
return
#[[485.1, 0.18, 0.63, 0.19], [855.9, 0.28, 0.43, 0.29], [800, 0.1, 0.3, 0.6], [960, 0.46, 0.45, 0.09], [1200, 0, 0, 1]]
def theholelot(outputfiles='C:\\Users\Acer\Documents\GitHub\CD3Waterbalance\simulationwithpatterns\outputfiles', inputfiles='C:\\Users\Acer\Documents\GitHub\CD3Waterbalance\simulationwithpatterns\inputfiles',
numberhh=1., wettingloss = 0.4, depressionloss=1.5):
#Deleter(outputfiles)
#runcd3('simple_system_CwR_RT.xml')
##Fractioncalculator input = [[total Area, perv, imperv_to_storage, imperv_to_stormw],...]
Fractioncalculator([[800,0.4,0.4,0.2],[10000,0.1,0.1,0.8],[900,0.4,0.3,0.3],[500,0.5,0.4,0.1],[1400,0.4,0.0,0.6], [20000,0.0,0.0,1]])
getoutputdata(outputfiles, total_area)
getinputdata(inputfiles, numberhh, total_area)
Bilanz([Rainevapovector, Outputvector, Indoorvector], ['Evapo', 'Rain', 'System'], wettingloss, depressionloss, total_area, area_fractions1)
plotter(Indoorvector, Rainevapovector, Outputvector,[0,365],[0,1], ['rain_model', 'Stormwater', 'evapo_model', 'effective_rain','Outdoor_Demand'])
print('done')
return
#theholelot()
#Deleter('C:\Users\Acer\Documents\GitHub\CD3Waterbalance\simulationwithpatterns\outputfiles')
#runcd3(filename='Test.xml')
[[800,0.4,0.4,0.2],[10000,0.1,0.1,0.8],[900,0.4,0.3,0.3],[500,0.5,0.4,0.1],[1400,0.4,0.0,0.6], [20000,0.0,0.0,1]]
| gpl-2.0 |
adam-rabinowitz/ngs_python | structure/analyseInteraction.py | 2 | 27305 | import collections
import gzip
import multiprocessing
import numpy as np
import os
import pandas as pd
import re
import itertools
from statsmodels.nonparametric.smoothers_lowess import lowess
from statsmodels.sandbox.stats.multicomp import multipletests
from scipy.stats import mannwhitneyu
class analyse_interaction(object):
def __init__(self, matrixList):
# Store matrix list and associated parameters
self.matrixList = matrixList
# Extract parameters and store regions
regionDict = collections.defaultdict(list)
for count, matrix in enumerate(matrixList):
# Check existence of file
if not os.path.isfile(matrix):
raise IOError('Could not find input file')
# Extract sample data
fileName = os.path.basename(matrix)
if not fileName.endswith('.normMatrix.gz'):
raise ValueError('Unexpected input files')
sample, binSize, region, minCount = fileName.split('.')[:4]
regionDict[sample].append(region)
if count:
if (not int(binSize) == self.binSize
or not int(minCount) == self.minCount):
raise ValueError('Sample parametes are different')
else:
self.binSize = int(binSize)
self.minCount = int(minCount)
# Check regions and store
self.sampleList = regionDict.keys()
self.sampleList.sort()
for count, sample in enumerate(self.sampleList):
regions = regionDict[sample]
regions.sort()
if count:
if not regions == self.regionList:
raise ValueError('Regions absent for some samples')
else:
self.regionList = regions
def __distance_prob_generator(self, matrix):
# Extract bin names
with gzip.open(matrix) as inFile:
binNames = inFile.next().strip().split()
# Create distance matrix
start, end = zip(
*[re.split(':|-', x)[1:] for x in binNames])
centres = np.array([map(int, start), map(int, end)]).mean(axis=0)
distMatrix = np.abs(centres - centres[:,None])
# Read in matrix and remove columns
probMatrix = np.loadtxt(
matrix, dtype=np.float32, delimiter='\t', skiprows=1)
for index, (prob, dist) in enumerate(zip(probMatrix.T, distMatrix.T)):
binDF = pd.DataFrame()
binDF['prob'] = prob
binDF['dist'] = dist
yield((binNames[index], binDF))
def __calc_quantile_metrics(
self, inQueue, outQueue
):
# Loop through input queue
for matrix, quantile in iter(inQueue.get, None):
# Extract bin data
fileName = os.path.basename(matrix)
sample, binSize, region, minCount = fileName.split('.')[:4]
# Create output dataframe
with gzip.open(matrix) as inFile:
binNames = inFile.next().strip().split('\t')
outDF = pd.DataFrame(index = binNames)
outDF['sample'] = sample
outDF['region'] = region
outDF['bin'] = binNames
# Create name for output series
quantileName = quantile * 100
if quantileName % 1:
quantileName = 'Q' + str(quantileName)
else:
quantileName = 'Q' + str(int(quantileName))
# Extract bin data
for binName, binDF in self.__distance_prob_generator(matrix):
# Skip bins with low probabilites
if binDF['prob'].sum() < quantile:
continue
# Calculate median distance
binDF['absdist'] = binDF['dist'].abs()
binDF.sort_values('absdist', inplace=True)
binDF['cumsum'] = binDF['prob'].cumsum()
quantDist = binDF['dist'][binDF['cumsum'] >= quantile].iloc[0]
# Add value to output df
outDF.loc[binName, quantileName] = quantDist
# Add output dataframe to out queue
outQueue.put((sample, region, outDF))
def calc_quantile(
self, quantile, threads=1
):
# Check arguments
if not isinstance(quantile, float) or not 0 < quantile < 1:
raise ValueError('quantile must be float between 0 and 1')
# Create queues
inQueue = multiprocessing.Queue()
outQueue = multiprocessing.Queue()
# Create processes
processList = []
for _ in range(threads):
process = multiprocessing.Process(
target = self.__calc_quantile_metrics,
args = (inQueue, outQueue)
)
process.start()
processList.append(process)
# Add data to queue
for matrix in self.matrixList:
inQueue.put((matrix, quantile))
# Create ordered dictionary to store values
output = collections.OrderedDict()
for sample in self.sampleList:
output[sample] = collections.OrderedDict()
for region in self.regionList:
output[sample][region] = None
# Populate output dictionary with dataframes
for _ in self.matrixList:
sample, region, df = outQueue.get()
output[sample][region] = df
# Clean up
for _ in range(threads):
inQueue.put(None)
for process in processList:
process.join()
# Concatenate regions and samples and return output
for sample in output:
output[sample] = pd.concat(
output[sample].values(), axis = 0)
output = pd.concat(
output.values(), axis=0
)
output.index = np.arange(output.shape[0])
return(output)
class maskMatrix(object):
def __init__(self, matrix, regions='', overlap = False):
# Extract bin names
if matrix.endswith('.gz'):
with gzip.open(matrix) as inFile:
self.binNames = inFile.next().strip().split('\t')
else:
with open(matrix) as inFile:
self.binNames = inFile.next().strip().split('\t')
# Create bin dataframe
binDF = pd.DataFrame()
binDF['chr'], binDF['start'], binDF['end'] = zip(
*[re.split(':|-', x) for x in self.binNames])
binDF[['start', 'end']] = binDF[['start', 'end']].astype(int)
binDF['chr'] = binDF['chr'].astype(str)
binDF['name'] = self.binNames
binDF['centre'] = np.mean([binDF['start'], binDF['end']], axis=0)
self.binDF = binDF
# Open probability matrix and check it is square
probMatrix = np.loadtxt(matrix, skiprows = 1)
if probMatrix.shape[0] != probMatrix.shape[1]:
raise IOError('Matrix must be square')
# Create mask matrix and remove low values
chrArray = np.array(self.binDF['chr'])
maskMatrix = chrArray != chrArray[:,None]
lowValues = probMatrix.sum(axis=0) < 0.5
maskMatrix[lowValues,:] = True
maskMatrix[:,lowValues] = True
# Add group data to dataframe
groups = self.binDF['chr'].copy()
groups[lowValues] = np.nan
self.binDF['group'] = groups
# Create masked probability and distance matrices
self.probMatrix = ma.masked_array(probMatrix, mask = maskMatrix)
centreArray = np.array(self.binDF['centre'])
distMatrix = np.abs(centreArray - centreArray[:,None])
self.distMatrix = ma.masked_array(distMatrix, mask = maskMatrix)
def upDown(self, array, index):
if index == 0:
up = ma.masked_array([], mask=np.array([],dtype=bool))
else:
up = array[index-1::-1]
down = array[index+1:]
return(up, down)
def unmaskedPair(self, a1, a2, maxl):
''' Returns indices of unmasked array pairs.'''
maxl = min(len(a1), len(a2), maxl)
if maxl == 0:
return(np.array([]))
masked = np.logical_or(a1.mask[:maxl], a2.mask[:maxl])
indices = np.where(masked == False)[0]
return(indices)
def binDirection(self, maxl = 10):
''' Extract interaction direction data for bins '''
# Create dataframe to store data
df = pd.DataFrame(columns = ['self', 'inter', 'up', 'down', 'log2'])
self.binDF = pd.concat([self.binDF, df], axis=1)
# Loop through rows of the matrix
for rowNo, row in enumerate(self.probMatrix):
# Set none values if bin is entirely masked
if ma.count(row) == 0:
continue
# Else calculate values
else:
# Extract up and down arrays
up, down = self.upDown(row, rowNo)
# Extract probabilities
selfProb = row[rowNo].sum()
if up.count() == 0:
upProb = 0
else:
upProb = up.sum()
if down.count() == 0:
downProb = 0
else:
downProb = down.sum()
interProb = 1 - upProb - downProb - selfProb
# Extract paired bins for log2 calculations
indices = self.unmaskedPair(up, down, maxl)
if len(indices) > 0:
# Calculate sum of paired up and down bins
upSum = up[indices].sum()
downSum = down[indices].sum()
# Calculate log2 ratio
if upSum == 0:
if downSum == 0:
log2 = np.nan
else:
log2 = -np.inf
elif downSum == 0:
log2 = np.inf
else:
log2 = np.log2(upSum/downSum)
else:
log2 = np.nan
# Store results
self.binDF.loc[rowNo,['self','inter','up','down','log2']] = (
selfProb, interProb, upProb, downProb, log2)
def binDistance(self):
''' Extract weighted mean interaction distance for bins '''
# Create dataframe to store data
df = pd.DataFrame(columns = ['dist'])
self.binDF = pd.concat([self.binDF, df], axis=1)
# Loop through rows of the matrix
for rowNo, row in enumerate(self.distMatrix):
# Set none values if bin is entirely masked
if ma.count(row) == 0:
continue
# Else calculate values
else:
# Calculate distance and store results
dist = ma.average(row, weights = self.probMatrix[rowNo])
self.binDF.loc[rowNo,'dist'] = np.uint32(dist)
def combinedDistance(self):
''' Extract lowess smooth interaction frequency for dataset '''
# Extract probabilities
prob = self.probMatrix[~self.probMatrix.mask]
dist = self.distMatrix[~self.distMatrix.mask]
# Return data
return(np.array([dist, prob]).T)
class compare_paired_matrices(object):
def __init__(
self, samples1, samples2, conditions, indir,
suffix = 'normMatrix.gz'
):
# Check arguments
if not isinstance(samples1, list) or len(samples1) == 0:
raise TypeError('prefix1 must be a list of length >= 1')
if not isinstance(samples2, list) or len(samples2) == 0:
raise TypeError('prefix1 must be a list of length >= 1')
if not os.path.isdir(indir):
raise IOError('could not find indir')
if (not isinstance(conditions, list)
or len(conditions) != 2
or not isinstance(conditions[0], str)
or not isinstance(conditions[1], str)):
raise TypeError('conditions must be a list of two strings')
if not isinstance(suffix, str):
raise TypeError('suffix must be a string')
# Store supplied variables
self.samples1 = samples1
self.samples2 = samples2
self.conditions = conditions
self.indir = indir
self.suffix = suffix
# Create matrix dictionary and check pairings
self.matrices = self.__create_matrix_dictionary()
self.matrixnames = self.__check_matrix_pairing()
def __create_matrix_dictionary(self):
# Check that all prefixes are unique strings
for s1, s2 in itertools.permutations(self.samples1 + self.samples2, 2):
if not isinstance(s1, str) or not isinstance(s2, str):
raise TypeError('prefixes must be lists of strings')
if s1.startswith(s2):
raise ValueError('{} & {} prefixes not unique'.format(s1, s2))
# Create dictionary to store extracted matrices
matrixDict = collections.OrderedDict()
for condition, samples in zip(
self.conditions, [self.samples1, self.samples2]
):
matrixDict[condition] = collections.OrderedDict()
for s in samples:
matrixDict[condition][s] = []
# Extract matrices for each prefix
fileList = os.listdir(self.indir)
fileList = [f for f in fileList if f.endswith(self.suffix)]
for condition in matrixDict:
for sample in matrixDict[condition]:
for f in fileList:
if f.startswith(sample):
matrixDict[condition][sample].append(f)
# Sort matrix lists and add full path
for condition in matrixDict:
for sample in matrixDict[condition]:
matrixList = matrixDict[condition][sample]
matrixList.sort()
matrixList = [os.path.join(self.indir, m) for m in matrixList]
matrixDict[condition][sample] = matrixList
# Return data
return(matrixDict)
def __check_matrix_pairing(self):
# Check that all matrices are paired for all samples
reference = None
for condition in self.matrices:
for sample in self.matrices[condition]:
# Extract matrix list and check length
matrixList = self.matrices[condition][sample]
if len(matrixList) == 0:
raise ValueError('No matrix found for {}'.format(sample))
# Extract matrix names
regx = re.compile('^.*?/{}([^/]+){}$'.format(sample, self.suffix))
matrixNames = [regx.sub('\\1', x) for x in matrixList]
matrixNames = [x.strip('.') for x in matrixNames]
matrixNames.sort()
# Check names are consitent
if reference is None:
reference = matrixNames
if matrixNames != reference:
print('Reference: {}'.format(', '.join(reference)))
print('Comparison: {}'.format(', '.join(matrixList)))
raise ValueError('Matrix names do not match')
# Return data
return(reference)
def __prob_matrix(self, path):
# Read in matrix
matrix = np.loadtxt(path, delimiter='\t', skiprows=1,
dtype=np.float64)
# Check matrix is square
m, n = matrix.shape
if m != n:
raise ValueError('{} is not square'.format(path))
# Check matrix is symetrical to six decimal places
if not np.allclose(matrix, matrix.T, atol=1.01e-6, rtol=0):
self.__extract_nonsymetrical_pairs(matrix)
# Return matrix
return(matrix)
def __dist_matrix(self, path):
# Extract bin names
with gzip.open(path) as inFile:
binNames = inFile.next().strip().split('\t')
# Extract and check bin data
binData = np.array([re.split('[:-]', x) for x in binNames])
if not np.alltrue(binData.T[0] == binData.T[0][1]):
raise ValueError('{} is not from single chromosome'.format(path))
# Generate distance matrix
centres = ((binData.T[1].astype(np.int64) +
binData.T[2].astype(np.int64)) / 2).reshape(len(binData), 1)
dist = abs(centres - centres.T)
return(dist)
def __prob_dist_matrix(self, path):
probMatrix = self.__prob_matrix(path)
distMatrix = self.__dist_matrix(path)
if probMatrix.shape != distMatrix.shape:
raise ValueError('{} has ambiguos bin numbers'.format('path'))
return((probMatrix, distMatrix))
def __extract_nonsymetrical_pairs(self, matrix, maxno=10):
different = np.where(matrix != matrix.T)
count = 0
print('Non-symetrical values found. Examples follow:')
for d1, d2 in zip(different[0], different[1]):
output = '{}\t{}\t{}\t{}'.format(d1, d2, matrix[d1, d2], matrix[d2, d1])
print(output)
count += 1
if count == maxno:
break
raise ValueError('Non symetrical matrix found')
def extract_dist_prob(self):
# Create output dataframe
outDF = pd.DataFrame(
columns = ['cond', 'repl', 'smpl', 'mtrx', 'dist', 'prob'])
# Extract probabilities for input matrices
for condition in self.matrices:
for replicate, sample in enumerate(self.matrices[condition]):
for path, name in zip(
self.matrices[condition][sample], self.matrixnames
):
# Create matrices
probMatrix, distMatrix = self.__prob_dist_matrix(path)
# Extract data from lower triangles
trilIndices = np.tril_indices(probMatrix.shape[0])
probData = probMatrix[trilIndices]
distData = distMatrix[trilIndices]
# Create dataframe and concat to output
pathDF = pd.DataFrame({
'cond' : pd.Series([condition] * len(probData)),
'repl' : pd.Series([replicate + 1] * len(probData)),
'smpl' : pd.Series([sample] * len(probData)),
'mtrx' : pd.Series([name] * len(probData)),
'dist' : distData,
'prob' : probData
}, columns = ['cond', 'repl', 'smpl', 'mtrx', 'dist',
'prob'])
outDF = pd.concat((outDF, pathDF), axis=0)
return(outDF)
def mean_matrix_dist_prob(self, rmzero=True):
# Check arguments:
if not isinstance(rmzero, bool):
raise TypeError('rmzero must be bool')
# Extract probabilities and remove zeros, if requested
probData = self.extract_dist_prob()
if rmzero:
probData = probData[probData['prob'] > 0]
# Split the data and create output dataframe
g = probData.groupby(['smpl', 'mtrx', 'dist'])
outDF = pd.DataFrame(index = g.groups.keys(), columns=[
'cond', 'repl', 'smpl', 'mtrx', 'dist', 'no', 'prob'])
outDF = outDF.sort_index()
# Populate dataframe
for key, data in g:
# Check all conditions and replicate data is identical
if (data['repl'] != data['repl'].iloc[0]).any():
raise ValueError('replicate not consistent across samples')
if (data['cond'] != data['cond'].iloc[0]).any():
raise ValueError('condition not consistent across samples')
# Create and store output
output = [data['cond'].iloc[0], data['repl'].iloc[0], key[0],
key[1], key[2], data['prob'].size, data['prob'].mean()]
outDF.loc[key] = output
# Reindex and return dataframe
outDF.index = np.arange(len(outDF))
return(outDF)
def calculate_dist_pvalue(self, rmzero=True, minvalues=10):
# Extract distances for input matrices
distProb = self.extract_dist_prob()
splitDist = distProb.groupby('dist')
# Create output columns
colNames = []
for condition in self.matrices:
for sample in self.matrices[condition]:
colNames.append('{}_{}_no'.format(condition, sample))
colNames.append('{}_{}_mean'.format(condition, sample))
for condition in self.matrices:
colNames.append('{}_no'.format(condition))
colNames.append('{}_mean'.format(condition))
colNames.extend(['pvalue', 'fdr'])
# Create output dataframe
outDF = pd.DataFrame(
columns = colNames, index = splitDist.groups.keys())
outDF = outDF.sort_index()
# Loop through data and calculate results
for dist, data in splitDist:
# Remove zero values
if rmzero:
data = data[data['prob'] > 0]
# Extract data for conditions and samples
condValues = []
for cond in self.matrices:
# Extract data for condition
condData = data[data['cond'] == cond]
condProb = condData['prob']
condValues.append(condProb)
# Add condition data to output
colPrefix = '{}_'.format(cond)
outDF.loc[dist, colPrefix + 'no'] = condProb.size
outDF.loc[dist, colPrefix + 'mean'] = condProb.mean()
for smpl in self.matrices[cond]:
# Extract data for sample
smplData = condData[condData['smpl'] == smpl]
smplProb = smplData['prob']
# Add sample data to output
colPrefix = '{}_{}_'.format(cond, smpl)
outDF.loc[dist, colPrefix + 'no'] = smplProb.size
outDF.loc[dist, colPrefix + 'mean'] = smplProb.mean()
# Calculate pvalues
prob1, prob2 = condValues
if prob1.size >= minvalues and prob2.size >= minvalues:
outDF.loc[dist, 'pvalue'] = mannwhitneyu(prob1, prob2)[1]
# Sort data, add fdr and return
pvalueIndex = outDF.index[~outDF['pvalue'].isnull()]
outDF.loc[pvalueIndex, 'fdr'] = multipletests(
outDF.loc[pvalueIndex, 'pvalue'], method='fdr_bh')[1]
return(outDF)
def extract_dist_quantile(
self, quantile
):
# Create output dataframe
outDF = pd.DataFrame(columns=['cond', 'smpl', 'quan', 'dist'])
# Loop through input queue
for cond in self.matrices:
for smpl in self.matrices[cond]:
for path in self.matrices[cond][smpl]:
# Create matrices
probMatrix, distMatrix = self.__prob_dist_matrix(path)
dfList = []
# Loop through columns
for dist, prob in zip(distMatrix.T, probMatrix.T):
# Create dataframe listing distances
distDF = pd.DataFrame()
distDF['dist'] = dist
distDF['prob'] = prob
if distDF['prob'].sum() == 0:
continue
if not 1.05 > distDF['prob'].sum() > 0.95:
raise ValueError('Columns must add to 0 or ~1')
# Sort by distance and find cumulative frequence
distDF.sort_values('dist', inplace=True)
distDF['cumsum'] = distDF['prob'].cumsum()
# Create dataframe to store data
quantDF = pd.DataFrame(index = quantile)
quantDF['cond'] = [cond] * len(quantile)
quantDF['smpl'] = [smpl] * len(quantile)
quantDF['quan'] = quantile
# Calculate quantile distances and store
for q in quantile:
subsetDF = distDF[distDF['cumsum'] >= q]
quantDF.loc[q, 'dist'] = subsetDF['dist'].iloc[0]
dfList.append(quantDF)
# Append results to output
outDF = pd.concat([outDF] + dfList, axis=0)
# Return data
return(outDF)
def calculate_quantile_pvalue(
self, quantile, minvalues=10
):
# Check arguments
if isinstance(quantile, float):
quantile = [quantile]
elif isinstance(quantile, list):
for q in quantile:
if not isinstance(q, float):
raise TypeError('quantile list must contain floats')
else:
raise TypeError('quantile must be float or list of floats')
# Create colnames for output dataframe
colNames = []
for condition in self.matrices:
for sample in self.matrices[condition]:
colNames.append('{}_{}_no'.format(condition, sample))
colNames.append('{}_{}_mean'.format(condition, sample))
for condition in self.matrices:
colNames.append('{}_no'.format(condition))
colNames.append('{}_mean'.format(condition))
colNames.extend(['pvalue', 'fdr'])
# Create output dataframe
outDF = pd.DataFrame(index=quantile, columns=colNames)
outDF = outDF.sort_index()
# Extract quantile distance data
quantData = self.extract_dist_quantile(quantile)
splitQuant = quantData.groupby('quan')
for q, data in splitQuant:
# Extract data for conditions and samples
condValues = []
for cond in self.matrices:
# Extract data for condition
condData = data[data['cond'] == cond]
condDist = condData['dist']
condValues.append(condDist)
# Add condition data to output
colPrefix = '{}_'.format(cond)
outDF.loc[q, colPrefix + 'no'] = condDist.size
outDF.loc[q, colPrefix + 'mean'] = condDist.mean()
for smpl in self.matrices[cond]:
# Extract data for sample
smplData = condData[condData['smpl'] == smpl]
smplDist = smplData['dist']
# Add sample data to output
colPrefix = '{}_{}_'.format(cond, smpl)
outDF.loc[q, colPrefix + 'no'] = smplDist.size
outDF.loc[q, colPrefix + 'mean'] = smplDist.mean()
# Calculate pvalues
dist1, dist2 = condValues
if dist1.size >= minvalues and dist2.size >= minvalues:
outDF.loc[q, 'pvalue'] = mannwhitneyu(dist1, dist2)[1]
# Add fdr and return
pvalueIndex = outDF.index[~outDF['pvalue'].isnull()]
outDF.loc[pvalueIndex, 'fdr'] = multipletests(
outDF.loc[pvalueIndex, 'pvalue'], method='fdr_bh')[1]
return(outDF)
| gpl-2.0 |
ManuSchmi88/landlab | landlab/ca/examples/turbulent_suspension_with_settling.py | 8 | 5487 | #!/usr/env/python
"""
isotropic_turbulent_suspension.py
Example of a continuous-time, stochastic, pair-based cellular automaton model,
which simulates the diffusion of suspended, neutrally buoyant particles in a
turbulent fluid.
Written by Greg Tucker, February 2015
"""
import time
import matplotlib
from pylab import figure, plot, show
from numpy import where, zeros, mean
from landlab import RasterModelGrid
from landlab.components.cellular_automata.celllab_cts import Transition, CAPlotter
from landlab.components.cellular_automata.oriented_raster_cts import OrientedRasterCTS
def setup_transition_list():
"""
Creates and returns a list of Transition() objects to represent state
transitions for a biased random walk, in which the rate of downward
motion is greater than the rate in the other three directions.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
State 0 represents fluid and state 1 represents a particle (such as a
sediment grain, tea leaf, or dissolved heavy particle).
The states and transitions are as follows:
Pair state Transition to Process Rate (cells/s)
========== ============= ======= ==============
0 (0-0) (none) - -
1 (0-1) 2 (1-0) left motion 10.0
2 (1-0) 1 (0-1) right motion 10.0
3 (1-1) (none) - -
4 (0-0) (none) - -
5 (0-1) 2 (1-0) down motion 10.55
6 (1-0) 1 (0-1) up motion 9.45
7 (1-1) (none) - -
"""
# Create an empty transition list
xn_list = []
# Append four transitions to the list.
# Note that the arguments to the Transition() object constructor are:
# - Tuple representing starting pair state
# (left cell, right cell, orientation [0=horizontal])
# - Tuple representing new pair state
# (bottom cell, top cell, orientation [1=vertical])
# - Transition rate (cells per time step, in this case 1 sec)
# - Name for transition
xn_list.append( Transition((0,1,0), (1,0,0), 10., 'left motion') )
xn_list.append( Transition((1,0,0), (0,1,0), 10., 'right motion') )
xn_list.append( Transition((0,1,1), (1,0,1), 10.55, 'down motion') )
xn_list.append( Transition((1,0,1), (0,1,1), 9.45, 'up motion') )
return xn_list
def main():
# INITIALIZE
# User-defined parameters
nr = 100 # number of rows in grid
nc = 64 # number of columns in grid
plot_interval = 0.5 # time interval for plotting, sec
run_duration = 20.0 # duration of run, sec
report_interval = 10.0 # report interval, in real-time seconds
# Remember the clock time, and calculate when we next want to report
# progress.
current_real_time = time.time()
next_report = current_real_time + report_interval
# Create grid
mg = RasterModelGrid(nr, nc, 1.0)
# Make the boundaries be walls
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
# Set up the states and pair transitions.
ns_dict = { 0 : 'fluid', 1 : 'particle' }
xn_list = setup_transition_list()
# Create the node-state array and attach it to the grid
node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=int)
# Initialize the node-state array: here, the initial condition is a pile of
# resting grains at the bottom of a container.
bottom_rows = where(mg.node_y<0.1*nr)[0]
node_state_grid[bottom_rows] = 1
# For visual display purposes, set all boundary nodes to fluid
node_state_grid[mg.closed_boundary_nodes] = 0
# Create the CA model
ca = OrientedRasterCTS(mg, ns_dict, xn_list, node_state_grid)
grain = '#5F594D'
fluid = '#D0E4F2'
clist = [fluid,grain]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
ca_plotter = CAPlotter(ca, cmap=my_cmap)
# Plot the initial grid
ca_plotter.update_plot()
# RUN
current_time = 0.0
while current_time < run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= next_report:
print 'Current sim time',current_time,'(',100*current_time/run_duration,'%)'
next_report = current_real_time + report_interval
# Run the model forward in time until the next output step
ca.run(current_time+plot_interval, ca.node_state,
plot_each_transition=False)
current_time += plot_interval
# Plot the current grid
ca_plotter.update_plot()
# FINALIZE
# Plot
ca_plotter.finalize()
# Calculate concentration profile
c = zeros(nr)
for r in range(nr):
c[r] = mean(node_state_grid[r*nc:(r+1)*nc])
figure(2)
plot(c, range(nr), 'o')
show()
# If user runs this file, activate the main() function
if __name__ == "__main__":
main()
| mit |
ingelectronicadj/FisicaConPython | FisicaCuantica/Ley de Planck/leyDePlanck.py | 1 | 3089 | #Autor: Diego Javier Mena Amado
## Que te diviertas...
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
import scipy.constants
from pylab import plot,xlabel,ylabel,show
from sympy import *
#Ajuste de workstation
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.1, bottom=0.25)
t = np.arange(0, 0.99, 0.001)
a0 = 0.7
#s = a0*np.sin(2*np.pi*t)
#print (t,a0) #Depurando errores de divisiones por cero halladas
PLANCK =((t**5)*(np.exp(1/(t-a0)-1))**(-1))
l, = plt.plot(t, PLANCK, lw=2, color='red')
#Definimos limites de barrido
#plt.xlim((0.0008, 1))
#plt.ylim((0, 30))
plt.axis([0.01, 1, 0, 30])
x = np.linspace(0.01, 1, 1000)
y = np.linspace(0, 30, 1000)
#Asignamos nombres a nuestro sistema de coordenadas
xlabel("t")
ylabel("x(t)")
#Se añaden constantes debido a falta de comprension de libreria para constantes fisicas
k=1.38*10**(-23)
h=6.62*10**(-34) #constante de Planck
c=3*10**8
#Se cargan los estilos para las curvas
style = {'family' : 'bold italic','color' : 'blue','weight' : 'normal','size' : 14}
style1 = {'family' : 'bold italic','color' : 'green','weight' : 'normal','size' : 14}
style2 = {'family' : 'bold italic','color' : 'red','weight' : 'normal','size' : 14}
style3 = {'family' : 'bold italic','color' : 'black','weight' : 'normal','size' : 14}
style4 = {'family' : 'bold italic','color' : 'purple','weight' : 'normal','size' : 14}
#Se cargan los label's para identificar cada curva y sus desasrrolladores
plt.title('Fisica de Semiconductores', fontdict=style2)
plt.text(0.53, 28, r'$\ Diego \ Javier \ Mena $', fontdict=style3)
plt.text(0.23, 20, r'$\ Ley \ de \ Planck $', fontdict=style)
plt.text(0.52, 18, r'$\ Ley \ de \ Rayleigh-Jeans $', fontdict=style4)
plt.text(0.185, 25, r'$\ Limite \ de \ Wien $', fontdict=style3)
#Ecuación Ley de Planck
plt.plot(x, ((x**5)*(np.exp(1/x)-1))**(-1),
x, ((x**5)*(np.exp(1/(0.9*x))-1))**(-1),
x, ((x**5)*(np.exp(1/(0.8*x))-1))**(-1))
#Ecuación Rayleigh-Jeans
plt.plot(x, 1/(x**4), x,1/(0.9*x**4),x,1/(0.8*x**4) )
#Ecuación Limite de Wien
plt.plot(x,np.exp(((1)/(x)))*10**(-0.87))
#implementamos Slider para variaciones de Ley de Planck
axcolor = 'lightgoldenrodyellow'
axamp = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
samp = Slider(axamp, 'Amp', 0.1, 1.3, valinit=a0)
#Establecemos la funcion a variar con el slider
def update(val):
amp = samp.val
l.set_ydata(((t**5)*(np.exp(1/(t*amp))-1))**(-1))
fig.canvas.draw_idle()
samp.on_changed(update)
#Creamos un boton reset para limpiar las variables color y amp
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
samp.reset()
button.on_clicked(reset)
#implementamos Cuadro Selector de Color
rax = plt.axes([0.025, 0.05, 0.15, 0.15], axisbg=axcolor)
radio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)
def colorfunc(label):
l.set_color(label)
fig.canvas.draw_idle()
radio.on_clicked(colorfunc)
#Mostramos el Grafico
plt.show() | gpl-3.0 |
zhenv5/scikit-learn | sklearn/naive_bayes.py | 70 | 28476 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
cauchycui/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
homeslike/OpticalTweezer | scripts/data/vCOMhistogramMass.py | 27 | 3006 | import math
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from subprocess import call
from scipy.stats import norm
# proc = call("ls *.dat",shell=True)
# datetime = "170123_2033_"
datetime = sys.argv[1]+"_"
gasTempDataIn = np.genfromtxt(datetime+"gasTempData.dat",usecols=0,skip_header=100)
gasTempDataOut = np.genfromtxt(datetime+"gasTempData.dat",usecols=1,skip_header=100)
vCOMData_x = np.genfromtxt(datetime+"vCOMData.dat",usecols=0,skip_header=100)
vCOMData_y = np.genfromtxt(datetime+"vCOMData.dat",usecols=1,skip_header=100)
vCOMData_z = np.genfromtxt(datetime+"vCOMData.dat",usecols=2,skip_header=100)
internalTempData = np.genfromtxt(datetime+"temperature_internal.dat",skip_header=200)
N = 32
internalTemp = np.mean(internalTempData)
vSqd = []
for i in range(0,len(vCOMData_x)):
vSqd.append(32*(vCOMData_x[i]*vCOMData_x[i]+vCOMData_x[i]*vCOMData_x[i]+vCOMData_x[i]*vCOMData_x[i])*0.5)
vSqdMean = np.mean(vSqd)
# histogram_x,bins_x = np.histogram(vCOMData_x,bins=100,normed=False)
# histogram_y,bins_y = np.histogram(vCOMData_y,bins=100,normed=False)
# histogram_z,bins_z = np.histogram(vCOMData_z,bins=100,normed=False)
histogram_x,bins_x = np.histogram(vCOMData_x,bins=100,normed=True)
histogram_y,bins_y = np.histogram(vCOMData_y,bins=100,normed=True)
histogram_z,bins_z = np.histogram(vCOMData_z,bins=100,normed=True)
inTemp = np.mean(gasTempDataIn)
outTemp = np.mean(gasTempDataOut)
statistics = open(datetime+"statistics_mass.dat","w")
statistics.write("GasIn: " + str(inTemp)+" +- " + str(np.std(gasTempDataIn)) + "\n")
statistics.write("GasOut: " + str(outTemp)+" +- " +str(np.std(gasTempDataOut)) + "\n")
statistics.write("T_COM: " + str(2./3. * vSqdMean)+" +- " +str(np.std(vSqd)) + "\n")
statistics.write("T_INT: " + str(internalTemp)+" +- " +str(np.std(internalTempData)) + "\n")
statistics.write("Mu_x " + str(np.mean(vCOMData_x))+"\n")
statistics.write("Sigma_x: " + str(np.std(vCOMData_x))+"\n")
statistics.write("Mu_y " + str(np.mean(vCOMData_y))+"\n")
statistics.write("Sigma_y: " + str(np.std(vCOMData_y))+"\n")
statistics.write("Mu_z " + str(np.mean(vCOMData_z))+"\n")
statistics.write("Sigma_z: " + str(np.std(vCOMData_z))+"\n")
histogram_x_file = open(datetime+"histogram_mass_vx.dat","w")
histogram_y_file = open(datetime+"histogram_mass_vy.dat","w")
histogram_z_file = open(datetime+"histogram_mass_vz.dat","w")
for i in range(0,len(histogram_x)):
histogram_x_file.write(str(bins_x[i]) + "\t" + str(histogram_x[i]) + "\n")
histogram_y_file.write(str(bins_y[i]) + "\t" + str(histogram_y[i]) + "\n")
histogram_z_file.write(str(bins_z[i]) + "\t" + str(histogram_z[i]) + "\n")
# plt.figure(1)
# plt.hist(vCOMData_x,bins=100)
# plt.figure(2)
# plt.hist(vCOMData_y,bins=100)
# plt.figure(3)
# plt.hist(vCOMData_z,bins=100)
# plt.show()
# plt.figure(1)
# plt.plot(vSqd)
# plt.plot((0,700),(vSqdMean,vSqdMean))
# plt.figure(2)
# plt.hist(vCOMData_x,bins=100,normed=True)
# plt.plot(x,gasInPDF)
# plt.show()
| mit |
witcxc/scipy | scipy/spatial/_plotutils.py | 53 | 4034 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
was_held = ax.ishold()
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
ptp_bound = points.ptp(axis=0)
ax.set_xlim(points[:,0].min() - 0.1*ptp_bound[0],
points[:,0].max() + 0.1*ptp_bound[0])
ax.set_ylim(points[:,1].min() - 0.1*ptp_bound[1],
points[:,1].max() + 0.1*ptp_bound[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
ax.plot(tri.points[:,0], tri.points[:,1], 'o')
ax.triplot(tri.points[:,0], tri.points[:,1], tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
for simplex in hull.simplices:
ax.plot(hull.points[simplex,0], hull.points[simplex,1], 'k-')
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
"""
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
ax.plot(vor.points[:,0], vor.points[:,1], '.')
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
for simplex in vor.ridge_vertices:
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
ax.plot(vor.vertices[simplex,0], vor.vertices[simplex,1], 'k-')
ptp_bound = vor.points.ptp(axis=0)
center = vor.points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
ax.plot([vor.vertices[i,0], far_point[0]],
[vor.vertices[i,1], far_point[1]], 'k--')
_adjust_bounds(ax, vor.points)
return ax.figure
| bsd-3-clause |
pratapvardhan/pandas | pandas/tests/indexes/multi/test_partial_indexing.py | 6 | 3298 | import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, MultiIndex, date_range
def test_partial_string_timestamp_multiindex():
# GH10331
dr = pd.date_range('2016-01-01', '2016-01-03', freq='12H')
abc = ['a', 'b', 'c']
ix = pd.MultiIndex.from_product([dr, abc])
df = pd.DataFrame({'c1': range(0, 15)}, index=ix)
idx = pd.IndexSlice
# c1
# 2016-01-01 00:00:00 a 0
# b 1
# c 2
# 2016-01-01 12:00:00 a 3
# b 4
# c 5
# 2016-01-02 00:00:00 a 6
# b 7
# c 8
# 2016-01-02 12:00:00 a 9
# b 10
# c 11
# 2016-01-03 00:00:00 a 12
# b 13
# c 14
# partial string matching on a single index
for df_swap in (df.swaplevel(),
df.swaplevel(0),
df.swaplevel(0, 1)):
df_swap = df_swap.sort_index()
just_a = df_swap.loc['a']
result = just_a.loc['2016-01-01']
expected = df.loc[idx[:, 'a'], :].iloc[0:2]
expected.index = expected.index.droplevel(1)
tm.assert_frame_equal(result, expected)
# indexing with IndexSlice
result = df.loc[idx['2016-01-01':'2016-02-01', :], :]
expected = df
tm.assert_frame_equal(result, expected)
# match on secondary index
result = df_swap.loc[idx[:, '2016-01-01':'2016-01-01'], :]
expected = df_swap.iloc[[0, 1, 5, 6, 10, 11]]
tm.assert_frame_equal(result, expected)
# Even though this syntax works on a single index, this is somewhat
# ambiguous and we don't want to extend this behavior forward to work
# in multi-indexes. This would amount to selecting a scalar from a
# column.
with pytest.raises(KeyError):
df['2016-01-01']
# partial string match on year only
result = df.loc['2016']
expected = df
tm.assert_frame_equal(result, expected)
# partial string match on date
result = df.loc['2016-01-01']
expected = df.iloc[0:6]
tm.assert_frame_equal(result, expected)
# partial string match on date and hour, from middle
result = df.loc['2016-01-02 12']
expected = df.iloc[9:12]
tm.assert_frame_equal(result, expected)
# partial string match on secondary index
result = df_swap.loc[idx[:, '2016-01-02'], :]
expected = df_swap.iloc[[2, 3, 7, 8, 12, 13]]
tm.assert_frame_equal(result, expected)
# tuple selector with partial string match on date
result = df.loc[('2016-01-01', 'a'), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# Slicing date on first level should break (of course)
with pytest.raises(KeyError):
df_swap.loc['2016-01-01']
# GH12685 (partial string with daily resolution or below)
dr = date_range('2013-01-01', periods=100, freq='D')
ix = MultiIndex.from_product([dr, ['a', 'b']])
df = DataFrame(np.random.randn(200, 1), columns=['A'], index=ix)
result = df.loc[idx['2013-03':'2013-03', :], :]
expected = df.iloc[118:180]
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
zorroblue/scikit-learn | sklearn/model_selection/_validation.py | 4 | 53401 | """
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.deprecation import DeprecationDict
from ..utils.validation import _is_arraylike, _num_samples
from ..utils.metaestimators import _safe_split
from ..externals.joblib import Parallel, delayed, logger
from ..externals.six.moves import zip
from ..metrics.scorer import check_scoring, _check_multimetric_scoring
from ..exceptions import FitFailedWarning
from ._split import check_cv
from ..preprocessing import LabelEncoder
__all__ = ['cross_validate', 'cross_val_score', 'cross_val_predict',
'permutation_test_score', 'learning_curve', 'validation_curve']
def cross_validate(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', return_train_score="warn"):
"""Evaluate metric(s) by cross-validation and also record fit/score times.
Read more in the :ref:`User Guide <multimetric_cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be for example a list, or an array.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's default scorer (if available) is used.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
return_train_score : boolean, optional
Whether to include train scores.
Current default is ``'warn'``, which behaves as ``True`` in addition
to raising a warning when a training score is looked up.
That default will be changed to ``False`` in 0.21.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
Returns
-------
scores : dict of float arrays of shape=(n_splits,)
Array of scores of the estimator for each run of the cross validation.
A dict of arrays containing the score/time arrays for each scorer is
returned. The possible keys for this ``dict`` are:
``test_score``
The score array for test scores on each cv split.
``train_score``
The score array for train scores on each cv split.
This is available only if ``return_train_score`` parameter
is ``True``.
``fit_time``
The time for fitting the estimator on the train
set for each cv split.
``score_time``
The time for scoring the estimator on the test set for each
cv split. (Note time for scoring on the train set is not
included even if ``return_train_score`` is set to ``True``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_validate
>>> from sklearn.metrics.scorer import make_scorer
>>> from sklearn.metrics import confusion_matrix
>>> from sklearn.svm import LinearSVC
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
Single metric evaluation using ``cross_validate``
>>> cv_results = cross_validate(lasso, X, y, return_train_score=False)
>>> sorted(cv_results.keys()) # doctest: +ELLIPSIS
['fit_time', 'score_time', 'test_score']
>>> cv_results['test_score'] # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
array([ 0.33..., 0.08..., 0.03...])
Multiple metric evaluation using ``cross_validate``
(please refer the ``scoring`` parameter doc for more information)
>>> scores = cross_validate(lasso, X, y,
... scoring=('r2', 'neg_mean_squared_error'))
>>> print(scores['test_neg_mean_squared_error']) # doctest: +ELLIPSIS
[-3635.5... -3573.3... -6114.7...]
>>> print(scores['train_r2']) # doctest: +ELLIPSIS
[ 0.28... 0.39... 0.22...]
See Also
---------
:func:`sklearn.model_selection.cross_val_score`:
Run cross-validation for single metric evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorers, _ = _check_multimetric_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(
delayed(_fit_and_score)(
clone(estimator), X, y, scorers, train, test, verbose, None,
fit_params, return_train_score=return_train_score,
return_times=True)
for train, test in cv.split(X, y, groups))
if return_train_score:
train_scores, test_scores, fit_times, score_times = zip(*scores)
train_scores = _aggregate_score_dicts(train_scores)
else:
test_scores, fit_times, score_times = zip(*scores)
test_scores = _aggregate_score_dicts(test_scores)
# TODO: replace by a dict in 0.21
ret = DeprecationDict() if return_train_score == 'warn' else {}
ret['fit_time'] = np.array(fit_times)
ret['score_time'] = np.array(score_times)
for name in scorers:
ret['test_%s' % name] = np.array(test_scores[name])
if return_train_score:
key = 'train_%s' % name
ret[key] = np.array(train_scores[name])
if return_train_score == 'warn':
message = (
'You are accessing a training score ({!r}), '
'which will not be available by default '
'any more in 0.21. If you need training scores, '
'please set return_train_score=True').format(key)
# warn on key access
ret.add_warning(key, message, FutureWarning)
return ret
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be for example a list, or an array.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.model_selection.cross_validate`:
To run cross-validation on multiple metrics and also to return
train scores, fit times and score times.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
# To ensure multimetric format is not supported
scorer = check_scoring(estimator, scoring=scoring)
cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups,
scoring={'score': scorer}, cv=cv,
return_train_score=False,
n_jobs=n_jobs, verbose=verbose,
fit_params=fit_params,
pre_dispatch=pre_dispatch)
return cv_results['test_score']
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
return_n_test_samples : boolean, optional, default: False
Whether to return the ``n_test_samples``
return_times : boolean, optional, default: False
Whether to return the fit/score times.
Returns
-------
train_scores : dict of scorer name -> float, optional
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float, optional
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
test_scores = {}
train_scores = {}
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
is_multimetric = not callable(scorer)
n_scorers = len(scorer.keys()) if is_multimetric else 1
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
if is_multimetric:
test_scores = dict(zip(scorer.keys(),
[error_score, ] * n_scorers))
if return_train_score:
train_scores = dict(zip(scorer.keys(),
[error_score, ] * n_scorers))
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
# _score will return dict if is_multimetric is True
test_scores = _score(estimator, X_test, y_test, scorer, is_multimetric)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = _score(estimator, X_train, y_train, scorer,
is_multimetric)
if verbose > 2:
if is_multimetric:
for scorer_name, score in test_scores.items():
msg += ", %s=%s" % (scorer_name, score)
else:
msg += ", score=%s" % test_scores
if verbose > 1:
total_time = score_time + fit_time
end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_scores, test_scores] if return_train_score else [test_scores]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer, is_multimetric=False):
"""Compute the score(s) of an estimator on a given test set.
Will return a single float if is_multimetric is False and a dict of floats,
if is_multimetric is True
"""
if is_multimetric:
return _multimetric_score(estimator, X_test, y_test, scorer)
else:
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) "
"instead. (scorer=%r)"
% (str(score), type(score), scorer))
return score
def _multimetric_score(estimator, X_test, y_test, scorers):
"""Return a dict of score for multimetric scoring"""
scores = {}
for name, scorer in scorers.items():
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
scores[name] = score
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) "
"instead. (scorer=%s)"
% (str(score), type(score), name))
return scores
def cross_val_predict(estimator, X, y=None, groups=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator. For
method='predict_proba', the columns correspond to the classes
in sorted order.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Notes
-----
In the case that one or more classes are absent in a training portion, a
default score needs to be assigned to all instances for that class if
``method`` produces columns per class, as in {'decision_function',
'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is
0. In order to ensure finite output, we approximate negative infinity by
the minimum finite float value for the dtype in other cases.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
le = LabelEncoder()
y = le.fit_transform(y)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, groups))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
n_classes = len(set(y))
if n_classes != len(estimator.classes_):
recommendation = (
'To fix this, use a cross-validation '
'technique resulting in properly '
'stratified folds')
warnings.warn('Number of classes in training fold ({}) does '
'not match total number of classes ({}). '
'Results may not be appropriate for your use case. '
'{}'.format(len(estimator.classes_),
n_classes, recommendation),
RuntimeWarning)
if method == 'decision_function':
if (predictions.ndim == 2 and
predictions.shape[1] != len(estimator.classes_)):
# This handles the case when the shape of predictions
# does not match the number of classes used to train
# it with. This case is found when sklearn.svm.SVC is
# set to `decision_function_shape='ovo'`.
raise ValueError('Output shape {} of {} does not match '
'number of classes ({}) in fold. '
'Irregular decision_function outputs '
'are not currently supported by '
'cross_val_predict'.format(
predictions.shape, method,
len(estimator.classes_),
recommendation))
if len(estimator.classes_) <= 2:
# In this special case, `predictions` contains a 1D array.
raise ValueError('Only {} class/es in training fold, this '
'is not supported for decision_function '
'with imbalanced folds. {}'.format(
len(estimator.classes_),
recommendation))
float_min = np.finfo(predictions.dtype).min
default_values = {'decision_function': float_min,
'predict_log_proba': float_min,
'predict_proba': 0}
predictions_for_all_classes = np.full((_num_samples(predictions),
n_classes),
default_values[method])
predictions_for_all_classes[:, estimator.classes_] = predictions
predictions = predictions_for_all_classes
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, groups=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : string, callable or None, optional, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None the estimator's default scorer, if available, is used.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, groups, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, groups):
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
estimator.fit(X_train, y_train)
avg_score.append(scorer(estimator, X_test, y_test))
return np.mean(avg_score)
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = (groups == group)
indices[this_mask] = random_state.permutation(indices[this_mask])
return safe_indexing(y, indices)
def learning_curve(estimator, X, y, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0, shuffle=False,
random_state=None):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
shuffle : boolean, optional
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` is True.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Store it as list as we will be iterating over the list multiple times
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv_iter)
else:
train_test_proportions = []
for train, test in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in train_test_proportions)
out = np.array(out)
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.floating):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
# NOTE do not change order of iteration to allow one time cv splitters
for train, test in cv.split(X, y, groups) for v in param_range)
out = np.asarray(out)
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
def _aggregate_score_dicts(scores):
"""Aggregate the list of dict to dict of np ndarray
The aggregated output of _fit_and_score will be a list of dict
of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...]
Convert it to a dict of array {'prec': np.array([0.1 ...]), ...}
Parameters
----------
scores : list of dict
List of dicts of the scores for all scorers. This is a flat list,
assumed originally to be of row major order.
Example
-------
>>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3},
... {'a': 10, 'b': 10}] # doctest: +SKIP
>>> _aggregate_score_dicts(scores) # doctest: +SKIP
{'a': array([1, 2, 3, 10]),
'b': array([10, 2, 3, 10])}
"""
out = {}
for key in scores[0]:
out[key] = np.asarray([score[key] for score in scores])
return out
| bsd-3-clause |
teese/pytoxr | setup.py | 1 | 1467 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pytoxr contains tools for the analysis of data from ToxR experiments
Copyright (C) 2016 Mark George Teese
This software is licensed under the permissive MIT License.
"""
from setuptools import setup, find_packages
from os import path
from codecs import open
# grab the long_description from the readme file
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "readme.rst")) as f:
long_description = f.read()
classifiers = """\
Intended Audience :: Science/Research
License :: OSI Approved :: MIT License
Programming Language :: Python :: 3
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Scientific/Engineering :: Chemistry
"""
setup(name='pytoxr',
author="Mark Teese",
url="https://github.com/teese/pytoxr",
download_url = 'https://github.com/teese/pytoxr/archive/0.0.7.tar.gz',
author_email="[email protected]",
description = "Tools for the analysis of data from ToxR experiments.",
long_description=long_description,
long_description_content_type='text/x-rst',
license='MIT',
packages=find_packages(),
classifiers=classifiers.splitlines(),
keywords="ToxR transmembrane TOXCAT TMDhomodimer GALLEX AraTM BacTH",
project_urls={'LangoschLab':'http://cbp.wzw.tum.de/index.php?id=9', "TU_Munich":"https://www.tum.de"},
install_requires=["pandas", "numpy", "matplotlib", "scipy", "seaborn", "eccpy"],
version='0.0.7') | mit |
jpzk/evopy | evopy/examples/experiments/constraints_cmaesrsvc/plot.py | 1 | 3556 | '''
This file is part of evopy.
Copyright 2012 - 2013, Jendrik Poloczek
evopy is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
evopy is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with
evopy. If not, see <http://www.gnu.org/licenses/>.
'''
from sys import path
path.append("../../../..")
from pickle import load
from copy import deepcopy
from numpy import matrix, log10, array
from scipy.stats import wilcoxon
from itertools import chain
from pylab import errorbar
from matplotlib.backends.backend_pdf import PdfPages
from evopy.problems.sphere_problem_origin_r1 import SphereProblemOriginR1
from evopy.problems.sphere_problem_origin_r2 import SphereProblemOriginR2
from evopy.problems.schwefels_problem_26 import SchwefelsProblem26
from evopy.problems.tr_problem import TRProblem
from evopy.metamodel.dses_svc_linear_meta_model import DSESSVCLinearMetaModel
from sklearn.cross_validation import KFold
from evopy.operators.scaling.scaling_standardscore import ScalingStandardscore
from evopy.operators.scaling.scaling_dummy import ScalingDummy
from evopy.metamodel.cv.svc_cv_sklearn_grid_linear import SVCCVSkGridLinear
from evopy.operators.termination.or_combinator import ORCombinator
from evopy.operators.termination.accuracy import Accuracy
from evopy.operators.termination.generations import Generations
from evopy.operators.termination.convergence import Convergence
from evopy.helper.timeseries_aggregator import TimeseriesAggregator
import matplotlib.pyplot as plt
from setup import *
cfcs_file = file("output/cfcs_file.save", "r")
cfcs = load(cfcs_file)
o_colors = {
TRProblem: "#044977",\
SphereProblemOriginR1: "k",\
SphereProblemOriginR2: "g",\
SchwefelsProblem26: "g"}
o_markers = {
TRProblem: ".",\
SphereProblemOriginR1: "x",\
SphereProblemOriginR2: "+",\
SchwefelsProblem26: "."}
figure_accs = plt.figure(figsize=(8,6), dpi=10, facecolor="w", edgecolor="k")
plt.xlabel("Generation")
plt.ylabel("Restriktionsaufrufe")
plt.ylim([100, 200])
plt.xlim([0, 60])
for problem in problems:
cfcs_agg, errors_agg =\
TimeseriesAggregator(cfcs[problem][optimizers[problem][0]]).get_aggregate()
eb = errorbar(range(0, len(cfcs_agg)),\
cfcs_agg,\
marker=o_markers[problem],
color=o_colors[problem],\
ecolor="#CCCCCC",\
linestyle="none",
yerr=errors_agg)
pp = PdfPages("output/cfcs_cmaes.pdf")
plt.savefig(pp, format='pdf')
pp.close()
figure_accs = plt.figure(figsize=(8,6), dpi=10, facecolor="w", edgecolor="k")
plt.xlabel("Generation")
plt.ylabel("Restriktionsaufrufe")
plt.ylim([100, 200])
plt.xlim([0, 60])
for problem in problems:
cfcs_agg, errors_agg =\
TimeseriesAggregator(cfcs[problem][optimizers[problem][1]]).get_aggregate()
eb = errorbar(range(0, len(cfcs_agg)),\
cfcs_agg,\
marker=o_markers[problem],
color=o_colors[problem],\
ecolor="#CCCCCC",\
linestyle="none",
yerr=errors_agg)
pp = PdfPages("output/cfcs_cmaessvc.pdf")
plt.savefig(pp, format='pdf')
pp.close()
| gpl-3.0 |
spragunr/echolocation | ros_run.py | 1 | 8646 | #!/usr/bin/env python
"""Ros node for for reconstructing depth in real-time using a
pre-trained network. This is a hacked-together proof-of-concept.
"""
import time
import subprocess
import h5py
import threading
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import message_filters
from sound_play.libsoundplay import SoundClient
from pyaudio_utils import AudioPlayer, AudioRecorder
from keras.models import load_model
import align_audio
import tensorflow as tf
from keras.backend import floatx
class Recorder(object):
def __init__(self):
rospy.init_node('ros_record')
self.parse_command_line()
model_file = 'stereo/model_100k3_raw_short.h5'
self.model = load_model(model_file,
custom_objects={'adjusted_mse':adjusted_mse})
self.parse_command_line()
self.image_pub = rospy.Publisher("predicted_depth",Image)
subprocess.call(["amixer", "-D", "pulse", "sset",
"Master", "{}%".format(self.volume)])
subprocess.call(["amixer", "-D", "pulse", "sset",
"Capture", "{}%".format(self.mic_level)])
self.bridge = CvBridge()
self.audio_player = AudioPlayer(self.chirp_file)
self.audio_recorder = AudioRecorder(channels=self.channels)
rate = rospy.Rate(self.rate)
line = None
im = None
# MAIN LOOP
while not rospy.is_shutdown():
# Play and record audio
self.audio_player.play()
rospy.sleep(self.delay) # hack.it takes the sound a while to play...
self.audio_recorder.record(self.record_duration)
audio = self.record()
# Align and shape the audio for the network
aligned = align_audio.align_clip(audio)
aligned = aligned[0:3328,:]
aligned = np.append(aligned[:,0], aligned[:,1])
aligned = aligned / 32000.
aligned = np.reshape(aligned, (1,aligned.size, 1))
# Get the depth prediction from the network
predictions = self.model.predict(aligned, batch_size=1)
predictions = np.exp(np.reshape(predictions, (12,16))) - 1
self.image_pub.publish(self.bridge.cv2_to_imgmsg(predictions))
# Use matplotlib to show the audio and predicted depth
plt.ion()
plt.show()
axes0 = plt.subplot(2,1,1)
axes1 = plt.subplot(2,1,2)
if line is not None:
line.remove()
im.remove()
line, = axes0.plot(aligned[0,:,0])
im = axes1.imshow(predictions, clim=(300, 7000),
interpolation='none')
plt.draw()
plt.pause(1e-17)
rate.sleep()
self.audio_player.shutdown()
self.audio_recorder.shutdown()
def record(self):
self.audio_recorder.record(self.record_duration)
while not self.audio_recorder.done_recording():
rospy.sleep(.005)
audio = self.audio_recorder.get_data()[1]
# Reshape mono to be consistent with stereo
if (len(audio.shape) == 1):
audio = audio.reshape((-1, 1))
return audio
def parse_command_line(self):
parser = argparse.ArgumentParser(
description='Sonar/image/depth data collection tool')
parser.add_argument('--num-channels', type=int,
dest='channels',
metavar="NUM_CHANNELS",default=2,
help='number of audio channels to record')
parser.add_argument('--rate', type=int, metavar="RATE",
default=10, help='rate to record chirps')
parser.add_argument('--duration', type=float, metavar="DURATION",
dest='record_duration',
default=.11, help='duration of audio recordings')
parser.add_argument('--delay', type=float, metavar="DELAY",
default=.0, help=('time in seconds to wait' +
'start of playback and record'))
parser.add_argument('--volume', type=int, metavar="VOLUME",
default=75, help='volume (0-100)')
parser.add_argument('--mic-level', type=int, metavar="MIC_LEVEL",
dest='mic_level',
default=100, help='mic_level (0-100)')
parser.add_argument('-c', '--chirp-file', type=str, metavar="CHIRP_FILE",
default='data/16000to8000.02s.wav',
help='Location of .wav file.')
parser.parse_args(namespace=self)
def init_data_sets(self):
self.h5_file = h5py.File(self.out, 'w')
test_audio = self.record()
self.audio_set = self.h5_file.create_dataset('audio',
(1, test_audio.shape[0], self.channels),
maxshape=(None,
test_audio.shape[0],
self.channels),
dtype=np.int16)
depth_shape = self.latest_depth.shape
self.depth_set = self.h5_file.create_dataset('depth', (10,
depth_shape[0],
depth_shape[1]),
maxshape=(None,
depth_shape[0],
depth_shape[1]),
dtype=self.latest_depth.dtype)
if self.record_rgb:
rgb_shape = self.latest_rgb.shape
self.rgb_set = self.h5_file.create_dataset('rgb', (10,
rgb_shape[0],
rgb_shape[1],
rgb_shape[2]),
maxshape=(None,
rgb_shape[0],
rgb_shape[1],
rgb_shape[2]),
dtype=self.latest_rgb.dtype)
self.time_set = self.h5_file.create_dataset('time', (1,),
maxshape=(None,),
compression="lzf",
dtype=np.float64)
def close_file(self, num_recorded):
self.audio_set.resize(tuple([num_recorded] +
list(self.audio_set.shape[1:])))
self.depth_set.resize(tuple([num_recorded] +
list(self.depth_set.shape[1:])))
if self.record_rgb:
self.rgb_set.resize(tuple([num_recorded] +
list(self.rgb_set.shape[1:])))
self.time_set.resize((num_recorded,))
self.h5_file.close()
def h5_append(self, dset, index, item):
if index == dset.shape[0]:
dset.resize(tuple([index*2] + list(dset.shape[1:])))
dset[index, ...] = item
def depth_callback(self, depth_image):
self.latest_depth = self.bridge.imgmsg_to_cv2(depth_image)
def depth_rgb_callback(self, depth_image, rgb_image):
self.lock.acquire()
self.latest_depth = self.bridge.imgmsg_to_cv2(depth_image)
self.latest_rgb = self.bridge.imgmsg_to_cv2(rgb_image,
"rgb8")
self.lock.release()
def adjusted_mse(y_true, y_pred):
zero = tf.constant(0, dtype=floatx())
ok_entries = tf.not_equal(y_true, zero)
safe_targets = tf.where(ok_entries, y_true, y_pred)
sqr = tf.square(y_pred - safe_targets)
valid = tf.cast(ok_entries, floatx())
num_ok = tf.reduce_sum(valid, axis=-1) # count OK entries
num_ok = tf.maximum(num_ok, tf.ones_like(num_ok)) # avoid divide by zero
return tf.reduce_sum(sqr, axis=-1) / num_ok
if __name__ == "__main__":
Recorder()
| mit |
cxhernandez/msmbuilder | msmbuilder/cluster/minibatchkmedoids.py | 9 | 8242 | # Author: Robert McGibbon <[email protected]>
# Contributors: Brooke Husic <[email protected]>
# Copyright (c) 2016, Stanford University
# All rights reserved.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, division
from operator import itemgetter
import numpy as np
from sklearn.utils import check_random_state
from sklearn.base import ClusterMixin, TransformerMixin
from . import MultiSequenceClusterMixin
from . import _kmedoids
from .. import libdistance
from ..base import BaseEstimator
class _MiniBatchKMedoids(ClusterMixin, TransformerMixin):
"""Mini-Batch K-Medoids clustering.
This method finds a set of cluster centers that are themselves data points,
attempting to minimize the mean-squared distance from the datapoints to
their assigned cluster centers using only mini-batches of the dataset.
Mini batches of the dataset are selected, and augmented to include each
of the cluster centers. Then, standard KMedoids clustering is performed
on the batch, using code based on the C clustering library [1]. The memory
requirement scales as the square ``batch_size`` instead of the square of
the size of the dataset.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default=5
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
batch_size : int, optional, default: 100
Size of the mini batches.
metric : {"euclidean", "sqeuclidean", "cityblock", "chebyshev", "canberra",
"braycurtis", "hamming", "jaccard", "cityblock", "rmsd"}
The distance metric to use. metric = "rmsd" requires that sequences
passed to ``fit()`` be ```md.Trajectory```; other distance metrics
require ``np.ndarray``s.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that do not lead to any modified assignments.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
References
----------
.. [1] de Hoon, Michiel JL, et al. "Open source clustering software."
Bioinformatics 20.9 (2004): 1453-1454.
See Also
--------
KMedoids:
Batch version, requring O(N^2) memory.
Attributes
----------
cluster_ids_ : array, [n_clusters]
Index of the data point that each cluster label corresponds to.
cluster_centers_ : array, [n_clusters, n_features] or md.Trajectory
Coordinates of cluster centers.
labels_ : array, [n_samples,]
The label of each point is an integer in [0, n_clusters).
inertia_ : float
Sum of distances of samples to their closest cluster center.
"""
def __init__(self, n_clusters=8, max_iter=5, batch_size=100,
metric='euclidean', max_no_improvement=10, random_state=None):
self.n_clusters = n_clusters
self.batch_size = batch_size
self.max_iter = max_iter
self.max_no_improvement = max_no_improvement
self.metric = metric
self.random_state = random_state
def fit(self, X, y=None):
if isinstance(X, np.ndarray):
if not (X.dtype == 'float32' or X.dtype == 'float64'):
X = X.astype('float64')
n_samples = len(X)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
random_state = check_random_state(self.random_state)
cluster_ids_ = random_state.randint(0, n_samples, size=self.n_clusters)
labels_ = random_state.randint(0, self.n_clusters, size=n_samples)
n_iters_no_improvement = 0
for kk in range(n_iter):
# each minibatch includes the random indices AND the
# current cluster centers
minibatch_indices = np.concatenate([
cluster_ids_,
random_state.randint(0, n_samples, self.batch_size),
])
dmat = libdistance.pdist(X, metric=self.metric, X_indices=np.array(minibatch_indices, dtype=np.intp))
minibatch_labels = np.array(np.concatenate([
np.arange(self.n_clusters),
labels_[minibatch_indices[self.n_clusters:]]
]), dtype=np.intp)
ids, intertia, _ = _kmedoids.kmedoids(
self.n_clusters, dmat, 0, minibatch_labels,
random_state=random_state)
minibatch_labels, m = _kmedoids.contigify_ids(ids)
# Copy back the new cluster_ids_ for the centers
minibatch_cluster_ids = np.array(
sorted(m.items(), key=itemgetter(1)))[:, 0]
cluster_ids_ = minibatch_indices[minibatch_cluster_ids]
# Copy back the new labels for the elements
n_changed = np.sum(labels_[minibatch_indices] != minibatch_labels)
if n_changed == 0:
n_iters_no_improvement += 1
else:
labels_[minibatch_indices] = minibatch_labels
n_iters_no_improvement = 0
if n_iters_no_improvement >= self.max_no_improvement:
break
self.cluster_ids_ = cluster_ids_
self.cluster_centers_ = X[cluster_ids_]
self.labels_, self.inertia_ = libdistance.assign_nearest(
X, self.cluster_centers_, metric=self.metric)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
New data to predict.
Returns
-------
Y : array, shape [n_samples,]
Index of the closest center each sample belongs to.
"""
if isinstance(X, np.ndarray):
if not (X.dtype == 'float32' or X.dtype == 'float64'):
X = X.astype('float64')
labels, inertia = libdistance.assign_nearest(
X, self.cluster_centers_, metric=self.metric)
return labels
def fit_predict(self, X, y=None):
return self.fit(X, y).labels_
class MiniBatchKMedoids(MultiSequenceClusterMixin, _MiniBatchKMedoids, BaseEstimator):
_allow_trajectory = True
__doc__ = _MiniBatchKMedoids.__doc__[: _MiniBatchKMedoids.__doc__.find('Attributes')] + \
'''
Attributes
----------
`cluster_centers_` : array, [n_clusters, n_features]
Coordinates of cluster centers
`labels_` : list of arrays, each of shape [sequence_length, ]
`labels_[i]` is an array of the labels of each point in
sequence `i`. The label of each point is an integer in
[0, n_clusters).
'''
def fit(self, sequences, y=None):
"""Fit the kcenters clustering on the data
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries, or ``md.Trajectory``. Each
sequence may have a different length, but they all must have the
same number of features, or the same number of atoms if they are
``md.Trajectory``s.
Returns
-------
self
"""
MultiSequenceClusterMixin.fit(self, sequences)
self.cluster_ids_ = self._split_indices(self.cluster_ids_)
return self
def summarize(self):
return """MiniBatchKMedoids clustering
----------------------------
n_clusters : {n_clusters}
metric : {metric}
Inertia : {inertia_}
""".format(**self.__dict__)
| lgpl-2.1 |
jblackburne/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
joernhees/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
automl/paramsklearn | tests/components/feature_preprocessing/test_feature_agglomeration.py | 1 | 1946 | import unittest
from sklearn.ensemble import RandomForestClassifier
from ParamSklearn.components.feature_preprocessing.feature_agglomeration import FeatureAgglomeration
from ParamSklearn.util import _test_preprocessing, PreprocessingTestCase, \
get_dataset
import sklearn.metrics
class FeatureAgglomerationComponentTest(PreprocessingTestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(FeatureAgglomeration)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertFalse((transformation == 0).all())
def test_default_configuration_classify(self):
for i in range(3):
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=False)
configuration_space = FeatureAgglomeration.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = FeatureAgglomeration(random_state=1,
**{hp_name: default[hp_name] for
hp_name in default})
preprocessor.fit(X_train, Y_train)
X_train_trans = preprocessor.transform(X_train)
X_test_trans = preprocessor.transform(X_test)
# fit a classifier on top
classifier = RandomForestClassifier(random_state=1)
predictor = classifier.fit(X_train_trans, Y_train)
predictions = predictor.predict(X_test_trans)
accuracy = sklearn.metrics.accuracy_score(predictions, Y_test)
self.assertAlmostEqual(accuracy, 0.8026715)
def test_preprocessing_dtype(self):
super(FeatureAgglomerationComponentTest,
self)._test_preprocessing_dtype(FeatureAgglomeration,
test_sparse=False)
| bsd-3-clause |
lsbardel/zipline | tests/test_data_util.py | 4 | 3092 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import deque
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from zipline.utils.data import RollingPanel
class TestRollingPanel(unittest.TestCase):
def test_basics(self):
items = ['foo', 'bar', 'baz']
minor = ['A', 'B', 'C', 'D']
window = 10
rp = RollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=30, tz='utc')
major_deque = deque()
frames = {}
for i in range(30):
frame = pd.DataFrame(np.random.randn(3, 4), index=items,
columns=minor)
date = dates[i]
rp.add_frame(date, frame)
frames[date] = frame
major_deque.append(date)
if i >= window:
major_deque.popleft()
result = rp.get_current()
expected = pd.Panel(frames, items=list(major_deque),
major_axis=items, minor_axis=minor)
tm.assert_panel_equal(result, expected.swapaxes(0, 1))
def f(option='clever', n=500, copy=False):
items = range(5)
minor = range(20)
window = 100
periods = n
dates = pd.date_range('2000-01-01', periods=periods, tz='utc')
frames = {}
if option == 'clever':
rp = RollingPanel(window, items, minor, cap_multiple=2)
major_deque = deque()
dummy = pd.DataFrame(np.random.randn(len(items), len(minor)),
index=items, columns=minor)
for i in range(periods):
frame = dummy * (1 + 0.001 * i)
date = dates[i]
rp.add_frame(date, frame)
frames[date] = frame
major_deque.append(date)
if i >= window:
del frames[major_deque.popleft()]
result = rp.get_current()
if copy:
result = result.copy()
else:
major_deque = deque()
dummy = pd.DataFrame(np.random.randn(len(items), len(minor)),
index=items, columns=minor)
for i in range(periods):
frame = dummy * (1 + 0.001 * i)
date = dates[i]
frames[date] = frame
major_deque.append(date)
if i >= window:
del frames[major_deque.popleft()]
result = pd.Panel(frames, items=list(major_deque),
major_axis=items, minor_axis=minor)
| apache-2.0 |
openfisca/legislation-ipp-to-code | parser_old.py | 1 | 7280 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import datetime
import math
import os
import numpy as np
import pandas as pd
# Architecture :
# un xlsx contient des sheets qui contiennent des variables, chaque sheet ayant un vecteur de dates
def clean_date(date_time):
''' Conversion des dates spécifiées en année au format year/01/01
Remise des jours au premier du mois '''
if len(str(date_time)) == 4 :
return datetime.date(date_time, 1, 1)
else:
return date_time.date().replace(day = 1)
def clean_sheet(xls_file, sheet_name):
''' Cleaning excel sheets and creating small database'''
sheet = xls_file.parse(sheet_name, index_col = None)
# Conserver les bonnes colonnes : on drop tous les "Unnamed"
for col in sheet.columns.values:
if col[0:7] == 'Unnamed':
sheet = sheet.drop([col], 1)
# Pour l'instant on drop également tous les ref_leg, jorf et notes
for var_to_drop in ['ref_leg', 'jorf', 'Notes', 'notes', 'date_ir'] :
if var_to_drop in sheet.columns.values:
sheet = sheet.drop(var_to_drop, axis = 1)
# Pour impôt sur le revenu, il y a date_IR et date_rev : on utilise date_rev, que l'on renome date pour plus de cohérence
if 'date_rev' in sheet.columns.values:
sheet = sheet.rename(columns={'date_rev':u'date'})
# Conserver les bonnes lignes : on drop s'il y a du texte ou du NaN dans la colonne des dates
def is_var_nan(row,col):
return isinstance(sheet.iloc[row, col], float) and math.isnan(sheet.iloc[row, col])
sheet['date_absente'] = False
for i in range(0,sheet.shape[0]):
sheet.loc[i,['date_absente']] = isinstance(sheet.iat[i,0], basestring) or is_var_nan(i,0)
sheet = sheet[sheet.date_absente == False]
sheet = sheet.drop(['date_absente'], axis = 1)
# S'il y a du texte au milieu du tableau (explications par exemple) => on le transforme en NaN
for col in range(0, sheet.shape[1]):
for row in range(0,sheet.shape[0]):
if isinstance(sheet.iloc[row,col], unicode):
sheet.iat[row,col] = np.nan
# Gérer la suppression et la création progressive de dispositifs
sheet.iloc[0, :].fillna('-', inplace = True)
# TODO: Handle currencies (Pb : on veut ne veut diviser que les montants et valeurs monétaires mais pas les taux ou conditions).
# TODO: Utiliser les lignes supprimées du début pour en faire des lables
# TODO: Utiliser les lignes supprimées de la fin et de la droite donner des informations sur la législation (références, notes...)
assert 'date' in sheet.columns, "Aucune colonne date dans la feuille : {}".format(sheet)
sheet['date'] =[ clean_date(d) for d in sheet['date']]
return sheet
def sheet_to_dic(xls_file, sheet):
dic = {}
sheet = clean_sheet(xls_file, sheet)
sheet.index = sheet['date']
for var in sheet.columns.values:
dic[var] = sheet[var]
for var in sheet.columns:
print sheet[var]
return dic
def dic_of_same_variable_names(xls_file, sheet_names):
dic = {}
all_variables = np.zeros(1)
multiple_names = []
for sheet_name in sheet_names:
dic[sheet_name]= clean_sheet(xls_file, sheet_name)
sheet = clean_sheet(xls_file, sheet_name)
columns = np.delete(sheet.columns.values,0)
all_variables = np.append(all_variables,columns)
for i in range(0,len(all_variables)):
var = all_variables[i]
new_variables = np.delete(all_variables,i)
if var in new_variables:
multiple_names.append(str(var))
multiple_names = list(set(multiple_names))
dic_var_to_sheet={}
for sheet_name in sheet_names:
sheet = clean_sheet(xls_file, sheet_name)
columns = np.delete(sheet.columns.values,0)
for var in multiple_names:
if var in columns:
if var in dic_var_to_sheet.keys():
dic_var_to_sheet[var].append(sheet_name)
else:
dic_var_to_sheet[var] = [sheet_name]
return dic_var_to_sheet
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument('-d', '--dir', default = u"P:/Legislation/Barèmes IPP/", help = 'path of IPP XLS directory')
parser.add_argument('-d', '--dir', default = u"/Users/malkaguillot/Documents/Baremes_IPP/", help = 'path of IPP XLS directory')
args = parser.parse_args()
baremes = [u'Prestations', u'prélèvements sociaux', u'Impôt Revenu']
forbiden_sheets = {u'Impôt Revenu' : (u'Barème IGR',),
u'prélèvements sociaux' : (u'Abréviations', u'ASSIETTE PU', u'AUBRYI')}
for bareme in baremes :
xls_path = os.path.join(args.dir, u"Barèmes IPP - {0}.xlsx".format(bareme))
xls_file = pd.ExcelFile(xls_path)
# Retrait des onglets qu'on ne souhaite pas importer
sheets_to_remove = (u'Sommaire', u'Outline')
if bareme in forbiden_sheets.keys():
sheets_to_remove += forbiden_sheets[bareme]
sheet_names = [
sheet_name
for sheet_name in xls_file.sheet_names
if not sheet_name.startswith(sheets_to_remove)
]
# Test si deux variables ont le même nom
test_duplicate = dic_of_same_variable_names(xls_file, sheet_names)
assert not test_duplicate, u'Au moins deux variables ont le même nom dans le classeur {} : u{}'.format(
bareme,test_duplicate)
# Création du dictionnaire key = 'nom de la variable' / value = 'vecteur des valeurs indexés par les dates'
mega_dic = {}
for sheet_name in sheet_names:
mega_dic.update(sheet_to_dic(xls_file, sheet_name))
date_list = [
datetime.date(year, month, 1)
for year in range(1914, 2021)
for month in range(1, 13)
]
table = pd.DataFrame(index = date_list)
for var_name, v in mega_dic.iteritems():
table[var_name] = np.nan
table.loc[v.index.values, var_name] = v.values
table = table.fillna(method = 'pad')
table = table.dropna(axis = 0, how = 'all')
table.to_csv(bareme + '.csv')
print u"Voilà, la table agrégée de {} est créée !".format(bareme)
# sheet = xls_file.parse('majo_excep', index_col = None)
| agpl-3.0 |
pratapvardhan/pandas | pandas/plotting/_tools.py | 5 | 12864 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
import warnings
from math import ceil
import numpy as np
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.index import Index
from pandas.compat import range
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
try:
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(rot)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
except Exception: # pragma: no cover
pass
def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
"""
Helper function to convert DataFrame and Series to matplotlib.table
Parameters
----------
`ax`: Matplotlib axes object
`data`: DataFrame or Series
data for table contents
`kwargs`: keywords, optional
keyword arguments which passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column
name will be used.
Returns
-------
matplotlib table object
"""
from pandas import DataFrame
if isinstance(data, ABCSeries):
data = DataFrame(data, columns=[data.name])
elif isinstance(data, DataFrame):
pass
else:
raise ValueError('Input data must be DataFrame or Series')
if rowLabels is None:
rowLabels = data.index
if colLabels is None:
colLabels = data.columns
cellText = data.values
import matplotlib.table
table = matplotlib.table.table(ax, cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels, **kwargs)
return table
def _get_layout(nplots, layout=None, layout_type='box'):
if layout is not None:
if not isinstance(layout, (tuple, list)) or len(layout) != 2:
raise ValueError('Layout must be a tuple of (rows, columns)')
nrows, ncols = layout
# Python 2 compat
ceil_ = lambda x: int(ceil(x))
if nrows == -1 and ncols > 0:
layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols)
elif ncols == -1 and nrows > 0:
layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows))
elif ncols <= 0 and nrows <= 0:
msg = "At least one dimension of layout must be positive"
raise ValueError(msg)
if nrows * ncols < nplots:
raise ValueError('Layout of {nrows}x{ncols} must be larger '
'than required size {nplots}'.format(
nrows=nrows, ncols=ncols, nplots=nplots))
return layout
if layout_type == 'single':
return (1, 1)
elif layout_type == 'horizontal':
return (1, nplots)
elif layout_type == 'vertical':
return (nplots, 1)
layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}
try:
return layouts[nplots]
except KeyError:
k = 1
while k ** 2 < nplots:
k += 1
if (k - 1) * k >= nplots:
return k, (k - 1)
else:
return k, k
# copied from matplotlib/pyplot.py and modified for pandas.plotting
def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, ax=None, layout=None, layout_type='box',
**fig_kw):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
naxes : int
Number of required axes. Exceeded axes are set invisible. Default is
nrows * ncols.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharey : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
ax : Matplotlib axis object, optional
layout : tuple
Number of rows and columns of the subplot grid.
If not specified, calculated from naxes and layout_type
layout_type : {'box', 'horziontal', 'vertical'}, default 'box'
Specify how to layout the subplot grid.
fig_kw : Other keyword arguments to be passed to the figure() call.
Note that all keywords not recognized above will be
automatically included here.
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one subplot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
if is_list_like(ax):
ax = _flatten(ax)
if layout is not None:
warnings.warn("When passing multiple axes, layout keyword is "
"ignored", UserWarning)
if sharex or sharey:
warnings.warn("When passing multiple axes, sharex and sharey "
"are ignored. These settings must be specified "
"when creating axes", UserWarning,
stacklevel=4)
if len(ax) == naxes:
fig = ax[0].get_figure()
return fig, ax
else:
raise ValueError("The number of passed axes must be {0}, the "
"same as the output plot".format(naxes))
fig = ax.get_figure()
# if ax is passed and a number of subplots is 1, return ax as it is
if naxes == 1:
if squeeze:
return fig, ax
else:
return fig, _flatten(ax)
else:
warnings.warn("To output multiple subplots, the figure containing "
"the passed axes is being cleared", UserWarning,
stacklevel=4)
fig.clear()
nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)
nplots = nrows * ncols
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw['sharex'] = ax0
if sharey:
subplot_kw['sharey'] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
kwds = subplot_kw.copy()
# Set sharex and sharey to None for blank/dummy axes, these can
# interfere with proper axis limits on the visible axes if
# they share axes e.g. issue #7528
if i >= naxes:
kwds['sharex'] = None
kwds['sharey'] = None
ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)
axarr[i] = ax
if naxes != nplots:
for ax in axarr[naxes:]:
ax.set_visible(False)
_handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
axes = axarr.reshape(nrows, ncols)
return fig, axes
def _remove_labels_from_axis(axis):
for t in axis.get_majorticklabels():
t.set_visible(False)
try:
# set_visible will not be effective if
# minor axis has NullLocator and NullFormattor (default)
import matplotlib.ticker as ticker
if isinstance(axis.get_minor_locator(), ticker.NullLocator):
axis.set_minor_locator(ticker.AutoLocator())
if isinstance(axis.get_minor_formatter(), ticker.NullFormatter):
axis.set_minor_formatter(ticker.FormatStrFormatter(''))
for t in axis.get_minorticklabels():
t.set_visible(False)
except Exception: # pragma no cover
raise
axis.get_label().set_visible(False)
def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if nplots > 1:
if nrows > 1:
try:
# first find out the ax layout,
# so that we can correctly handle 'gaps"
layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool)
for ax in axarr:
layout[ax.rowNum, ax.colNum] = ax.get_visible()
for ax in axarr:
# only the last row of subplots should get x labels -> all
# other off layout handles the case that the subplot is
# the last in the column, because below is no subplot/gap.
if not layout[ax.rowNum + 1, ax.colNum]:
continue
if sharex or len(ax.get_shared_x_axes()
.get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
except IndexError:
# if gridspec is used, ax.rowNum and ax.colNum may different
# from layout shape. in this case, use last_row logic
for ax in axarr:
if ax.is_last_row():
continue
if sharex or len(ax.get_shared_x_axes()
.get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
if ncols > 1:
for ax in axarr:
# only the first column should get y labels -> set all other to
# off as we only have labels in the first column and we always
# have a subplot there, we can skip the layout test
if ax.is_first_col():
continue
if sharey or len(ax.get_shared_y_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.yaxis)
def _flatten(axes):
if not is_list_like(axes):
return np.array([axes])
elif isinstance(axes, (np.ndarray, Index)):
return axes.ravel()
return np.array(axes)
def _get_all_lines(ax):
lines = ax.get_lines()
if hasattr(ax, 'right_ax'):
lines += ax.right_ax.get_lines()
if hasattr(ax, 'left_ax'):
lines += ax.left_ax.get_lines()
return lines
def _get_xlim(lines):
left, right = np.inf, -np.inf
for l in lines:
x = l.get_xdata(orig=False)
left = min(np.nanmin(x), left)
right = max(np.nanmax(x), right)
return left, right
def _set_ticks_props(axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
import matplotlib.pyplot as plt
for ax in _flatten(axes):
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
| bsd-3-clause |
lbishal/scikit-learn | sklearn/tests/test_cross_validation.py | 20 | 46586 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
rajul/ginga | ginga/web/pgw/Plot.py | 3 | 4306 | #
# Plot.py -- Plotting widget canvas wrapper.
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from io import BytesIO
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from ginga.web.pgw import Widgets
class PlotWidget(Widgets.Canvas):
"""
This class implements the server-side backend of the surface for a
web-based plot viewer. It uses a web socket to connect to an HTML5
canvas with javascript callbacks in a web browser on the client.
The viewer is created separately on the backend and connects to this
surface via the set_viewer() method.
"""
def __init__(self, plot, width=500, height=500):
super(PlotWidget, self).__init__(width=width, height=height)
self.widget = FigureCanvas(plot.get_figure())
self.logger = plot.logger
self._configured = False
self.refresh_delay = 0.010
self.set_plot(plot)
def set_plot(self, plot):
self.logger.debug("set_plot called")
self.plot = plot
self._dispatch_event_table = {
"activate": self.ignore_event,
"setbounds": self.map_event_cb,
"mousedown": self.ignore_event,
"mouseup": self.ignore_event,
"mousemove": self.ignore_event,
"mouseout": self.ignore_event,
"mouseover": self.ignore_event,
"mousewheel": self.ignore_event,
"wheel": self.ignore_event,
"click": self.ignore_event,
"dblclick": self.ignore_event,
"keydown": self.ignore_event,
"keyup": self.ignore_event,
"keypress": self.ignore_event,
"resize": self.resize_event,
"focus": self.ignore_event,
"focusout": self.ignore_event,
"blur": self.ignore_event,
"drop": self.ignore_event,
"paste": self.ignore_event,
# Hammer.js events
"pinch": self.ignore_event,
"pinchstart": self.ignore_event,
"pinchend": self.ignore_event,
"rotate": self.ignore_event,
"rotatestart": self.ignore_event,
"rotateend": self.ignore_event,
"tap": self.ignore_event,
"pan": self.ignore_event,
"panstart": self.ignore_event,
"panend": self.ignore_event,
"swipe": self.ignore_event,
}
self.plot.add_callback('draw-canvas', self.draw_cb)
self.add_timer('refresh', self.refresh_cb)
def get_plot(self):
return self.plot
def ignore_event(self, event):
pass
def refresh_cb(self):
app = self.get_app()
app.do_operation('refresh_canvas', id=self.id)
self.reset_timer('refresh', self.refresh_delay)
def get_rgb_buffer(self, plot):
buf = BytesIO()
fig = plot.get_figure()
fig.canvas.print_figure(buf, format='png')
wd, ht = self.width, self.height
return (wd, ht, buf.getvalue())
def draw_cb(self, plot):
self.logger.debug("getting RGB buffer")
wd, ht, buf = self.get_rgb_buffer(plot)
#self.logger.debug("clear_rect")
#self.clear_rect(0, 0, wd, ht)
self.logger.debug("drawing %dx%d image" % (wd, ht))
self.draw_image(buf, 0, 0, wd, ht)
self.reset_timer('refresh', self.refresh_delay)
def configure_window(self, wd, ht):
self.logger.debug("canvas resized to %dx%d" % (wd, ht))
fig = self.plot.get_figure()
fig.set_size_inches(float(wd) / fig.dpi, float(ht) / fig.dpi)
def map_event_cb(self, event):
wd, ht = event.width, event.height
self.configure_window(wd, ht)
self.plot.draw()
def resize_event(self, event):
wd, ht = event.x, event.y
self.configure_window(wd, ht)
self.plot.draw()
def _cb_redirect(self, event):
method = self._dispatch_event_table[event.type]
try:
method(event)
except Exception as e:
self.logger.error("error redirecting '%s' event: %s" % (
event.type, str(e)))
# TODO: dump traceback to debug log
#END
| bsd-3-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/matplotlib/backend_managers.py | 10 | 11976 | """
`ToolManager`
Class that makes the bridge between user interaction (key press,
toolbar clicks, ..) and the actions in response to the user inputs.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import matplotlib.cbook as cbook
import matplotlib.widgets as widgets
from matplotlib.rcsetup import validate_stringlist
import matplotlib.backend_tools as tools
class ToolEvent(object):
"""Event for tool manipulation (add/remove)"""
def __init__(self, name, sender, tool, data=None):
self.name = name
self.sender = sender
self.tool = tool
self.data = data
class ToolTriggerEvent(ToolEvent):
"""Event to inform that a tool has been triggered"""
def __init__(self, name, sender, tool, canvasevent=None, data=None):
ToolEvent.__init__(self, name, sender, tool, data)
self.canvasevent = canvasevent
class ToolManagerMessageEvent(object):
"""
Event carrying messages from toolmanager
Messages usually get displayed to the user by the toolbar
"""
def __init__(self, name, sender, message):
self.name = name
self.sender = sender
self.message = message
class ToolManager(object):
"""
Helper class that groups all the user interactions for a FigureManager
Attributes
----------
manager: `FigureManager`
keypresslock: `widgets.LockDraw`
`LockDraw` object to know if the `canvas` key_press_event is locked
messagelock: `widgets.LockDraw`
`LockDraw` object to know if the message is available to write
"""
def __init__(self, canvas):
warnings.warn('Treat the new Tool classes introduced in v1.5 as ' +
'experimental for now, the API will likely change in ' +
'version 2.1 and perhaps the rcParam as well')
self.canvas = canvas
self._key_press_handler_id = self.canvas.mpl_connect(
'key_press_event', self._key_press)
self._tools = {}
self._keys = {}
self._toggled = {}
self._callbacks = cbook.CallbackRegistry()
# to process keypress event
self.keypresslock = widgets.LockDraw()
self.messagelock = widgets.LockDraw()
def toolmanager_connect(self, s, func):
"""
Connect event with string *s* to *func*.
Parameters
----------
s : String
Name of the event
The following events are recognized
- 'tool_message_event'
- 'tool_removed_event'
- 'tool_added_event'
For every tool added a new event is created
- 'tool_trigger_TOOLNAME`
Where TOOLNAME is the id of the tool.
func : function
Function to be called with signature
def func(event)
"""
return self._callbacks.connect(s, func)
def toolmanager_disconnect(self, cid):
"""
Disconnect callback id *cid*
Example usage::
cid = toolmanager.toolmanager_connect('tool_trigger_zoom',
on_press)
#...later
toolmanager.toolmanager_disconnect(cid)
"""
return self._callbacks.disconnect(cid)
def message_event(self, message, sender=None):
""" Emit a `ToolManagerMessageEvent`"""
if sender is None:
sender = self
s = 'tool_message_event'
event = ToolManagerMessageEvent(s, sender, message)
self._callbacks.process(s, event)
@property
def active_toggle(self):
"""Currently toggled tools"""
return self._toggled
def get_tool_keymap(self, name):
"""
Get the keymap associated with the specified tool
Parameters
----------
name : string
Name of the Tool
Returns
-------
list : list of keys associated with the Tool
"""
keys = [k for k, i in six.iteritems(self._keys) if i == name]
return keys
def _remove_keys(self, name):
for k in self.get_tool_keymap(name):
del self._keys[k]
def update_keymap(self, name, *keys):
"""
Set the keymap to associate with the specified tool
Parameters
----------
name : string
Name of the Tool
keys : keys to associate with the Tool
"""
if name not in self._tools:
raise KeyError('%s not in Tools' % name)
self._remove_keys(name)
for key in keys:
for k in validate_stringlist(key):
if k in self._keys:
warnings.warn('Key %s changed from %s to %s' %
(k, self._keys[k], name))
self._keys[k] = name
def remove_tool(self, name):
"""
Remove tool from `ToolManager`
Parameters
----------
name : string
Name of the Tool
"""
tool = self.get_tool(name)
tool.destroy()
# If is a toggle tool and toggled, untoggle
if getattr(tool, 'toggled', False):
self.trigger_tool(tool, 'toolmanager')
self._remove_keys(name)
s = 'tool_removed_event'
event = ToolEvent(s, self, tool)
self._callbacks.process(s, event)
del self._tools[name]
def add_tool(self, name, tool, *args, **kwargs):
"""
Add *tool* to `ToolManager`
If successful adds a new event `tool_trigger_name` where **name** is
the **name** of the tool, this event is fired everytime
the tool is triggered.
Parameters
----------
name : str
Name of the tool, treated as the ID, has to be unique
tool : class_like, i.e. str or type
Reference to find the class of the Tool to added.
Notes
-----
args and kwargs get passed directly to the tools constructor.
See Also
--------
matplotlib.backend_tools.ToolBase : The base class for tools.
"""
tool_cls = self._get_cls_to_instantiate(tool)
if not tool_cls:
raise ValueError('Impossible to find class for %s' % str(tool))
if name in self._tools:
warnings.warn('A "Tool class" with the same name already exists, '
'not added')
return self._tools[name]
tool_obj = tool_cls(self, name, *args, **kwargs)
self._tools[name] = tool_obj
if tool_cls.default_keymap is not None:
self.update_keymap(name, tool_cls.default_keymap)
# For toggle tools init the radio_group in self._toggled
if isinstance(tool_obj, tools.ToolToggleBase):
# None group is not mutually exclusive, a set is used to keep track
# of all toggled tools in this group
if tool_obj.radio_group is None:
self._toggled.setdefault(None, set())
else:
self._toggled.setdefault(tool_obj.radio_group, None)
self._tool_added_event(tool_obj)
return tool_obj
def _tool_added_event(self, tool):
s = 'tool_added_event'
event = ToolEvent(s, self, tool)
self._callbacks.process(s, event)
def _handle_toggle(self, tool, sender, canvasevent, data):
"""
Toggle tools, need to untoggle prior to using other Toggle tool
Called from trigger_tool
Parameters
----------
tool: Tool object
sender: object
Object that wishes to trigger the tool
canvasevent : Event
Original Canvas event or None
data : Object
Extra data to pass to the tool when triggering
"""
radio_group = tool.radio_group
# radio_group None is not mutually exclusive
# just keep track of toggled tools in this group
if radio_group is None:
if tool.toggled:
self._toggled[None].remove(tool.name)
else:
self._toggled[None].add(tool.name)
return
# If the tool already has a toggled state, untoggle it
if self._toggled[radio_group] == tool.name:
toggled = None
# If no tool was toggled in the radio_group
# toggle it
elif self._toggled[radio_group] is None:
toggled = tool.name
# Other tool in the radio_group is toggled
else:
# Untoggle previously toggled tool
self.trigger_tool(self._toggled[radio_group],
self,
canvasevent,
data)
toggled = tool.name
# Keep track of the toggled tool in the radio_group
self._toggled[radio_group] = toggled
def _get_cls_to_instantiate(self, callback_class):
# Find the class that corresponds to the tool
if isinstance(callback_class, six.string_types):
# FIXME: make more complete searching structure
if callback_class in globals():
callback_class = globals()[callback_class]
else:
mod = 'backend_tools'
current_module = __import__(mod,
globals(), locals(), [mod], 1)
callback_class = getattr(current_module, callback_class, False)
if callable(callback_class):
return callback_class
else:
return None
def trigger_tool(self, name, sender=None, canvasevent=None,
data=None):
"""
Trigger a tool and emit the tool_trigger_[name] event
Parameters
----------
name : string
Name of the tool
sender: object
Object that wishes to trigger the tool
canvasevent : Event
Original Canvas event or None
data : Object
Extra data to pass to the tool when triggering
"""
tool = self.get_tool(name)
if tool is None:
return
if sender is None:
sender = self
self._trigger_tool(name, sender, canvasevent, data)
s = 'tool_trigger_%s' % name
event = ToolTriggerEvent(s, sender, tool, canvasevent, data)
self._callbacks.process(s, event)
def _trigger_tool(self, name, sender=None, canvasevent=None, data=None):
"""
Trigger on a tool
Method to actually trigger the tool
"""
tool = self.get_tool(name)
if isinstance(tool, tools.ToolToggleBase):
self._handle_toggle(tool, sender, canvasevent, data)
# Important!!!
# This is where the Tool object gets triggered
tool.trigger(sender, canvasevent, data)
def _key_press(self, event):
if event.key is None or self.keypresslock.locked():
return
name = self._keys.get(event.key, None)
if name is None:
return
self.trigger_tool(name, canvasevent=event)
@property
def tools(self):
"""Return the tools controlled by `ToolManager`"""
return self._tools
def get_tool(self, name, warn=True):
"""
Return the tool object, also accepts the actual tool for convenience
Parameters
----------
name : str, ToolBase
Name of the tool, or the tool itself
warn : bool, optional
If this method should give warnings.
"""
if isinstance(name, tools.ToolBase) and name.name in self._tools:
return name
if name not in self._tools:
if warn:
warnings.warn("ToolManager does not control tool %s" % name)
return None
return self._tools[name]
| bsd-2-clause |
sumitb/nuclei-analysis | hackrpi/plot_dbscan.py | 2 | 3735 | import numpy as np
from scipy.spatial import distance
from sklearn.cluster import DBSCAN
from sklearn import metrics
#from os import getcwd
##############################################################################
# Generate sample data
#centers = [[1, 1], [-1, -1], [1, -1]]
#X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4)
#fileNum = '01'
#dataDir = getcwd()+ '/../data/path-image-1' + str(fileNum) + '.tif/'
def clabels(featureNum):
if featureNum == 0:
label = "Area"
elif featureNum == 1:
label = "Perimeter"
elif featureNum == 2:
label = "Compactness"
elif featureNum == 3:
label = "Asymmetry"
elif featureNum == 4:
label = "BoundaryIndex"
elif featureNum == 5:
label = "Compactness"
elif featureNum == 6:
label = "Contrast"
elif featureNum == 7:
label = "Dissimilarity"
elif featureNum == 8:
label = "Angular Second moment"
elif featureNum == 9:
label = "Energy"
elif featureNum == 10:
label = "Homegeneity"
return label
def load_data(fName):
#fName = dataDir + fi
fp = open(fName)
X = np.loadtxt(fp)
fp.close()
return X
def start_dbscan(fi,fo,featureIndexList=[0,1]):
##############################################################################
# Compute similarities
X = load_data(fi)
D = distance.squareform(distance.pdist(X))
S = 1 - (D / np.max(D))
#print X
#print labels_true
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(S)
core_samples = db.core_sample_indices_
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print 'Estimated number of clusters: %d' % n_clusters_
if n_clusters_ ==0:
return
#print "Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels)
#print "Completeness: %0.3f" % metrics.completeness_score(labels_true, labels)
#print "V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels)
#print "Adjusted Rand Index: %0.3f" % \
# metrics.adjusted_rand_score(labels_true, labels)
#print "Adjusted Mutual Information: %0.3f" % \
# metrics.adjusted_mutual_info_score(labels_true, labels)
print ("Silhouette Coefficient: %0.3f" %
metrics.silhouette_score(D, labels, metric='precomputed'))
##############################################################################
# Plot result
import pylab as pl
from itertools import cycle
pl.close('all')
pl.figure(1)
pl.clf()
# Black removed and is used for noise instead.
colors = cycle('bgrcmybgrcmybgrcmybgrcmy')
for k, col in zip(set(labels), colors):
if k == -1:
# Black used for noise.
col = 'k'
markersize = 6
class_members = [index[0] for index in np.argwhere(labels == k)]
cluster_core_samples = [index for index in core_samples
if labels[index] == k]
for index in class_members:
x = X[index]
if index in core_samples and k != -1:
markersize = 14
else:
markersize = 6
pl.plot(x[0], x[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=markersize)
pl.title('Estimated number of clusters: %d' % n_clusters_)
#pl.savefig(dataDir + "dbscan/"+fo )
pl.savefig(fo)
pl.xlabel(clabels(featureIndexList[0]))
pl.ylabel(clabels(featureIndexList[1]))
pl.ion()
#for testing
#start_dbscan("path-image-100.seg.000000.000000.csv","myfilter_test.png")
| mit |
ankurankan/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
louispotok/pandas | pandas/tests/tslibs/test_libfrequencies.py | 1 | 4195 | # -*- coding: utf-8 -*-
import pandas.util.testing as tm
from pandas.tseries import offsets
from pandas._libs.tslibs.frequencies import (get_rule_month,
_period_str_to_code,
_INVALID_FREQ_ERROR,
is_superperiod, is_subperiod)
def assert_aliases_deprecated(freq, expected, aliases):
assert isinstance(aliases, list)
assert (_period_str_to_code(freq) == expected)
for alias in aliases:
with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
_period_str_to_code(alias)
def test_get_rule_month():
result = get_rule_month('W')
assert (result == 'DEC')
result = get_rule_month(offsets.Week())
assert (result == 'DEC')
result = get_rule_month('D')
assert (result == 'DEC')
result = get_rule_month(offsets.Day())
assert (result == 'DEC')
result = get_rule_month('Q')
assert (result == 'DEC')
result = get_rule_month(offsets.QuarterEnd(startingMonth=12))
result = get_rule_month('Q-JAN')
assert (result == 'JAN')
result = get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert (result == 'JAN')
result = get_rule_month('A-DEC')
assert (result == 'DEC')
result = get_rule_month('Y-DEC')
assert (result == 'DEC')
result = get_rule_month(offsets.YearEnd())
assert (result == 'DEC')
result = get_rule_month('A-MAY')
assert (result == 'MAY')
result = get_rule_month('Y-MAY')
assert (result == 'MAY')
result = get_rule_month(offsets.YearEnd(month=5))
assert (result == 'MAY')
def test_period_str_to_code():
assert (_period_str_to_code('A') == 1000)
assert (_period_str_to_code('A-DEC') == 1000)
assert (_period_str_to_code('A-JAN') == 1001)
assert (_period_str_to_code('Y') == 1000)
assert (_period_str_to_code('Y-DEC') == 1000)
assert (_period_str_to_code('Y-JAN') == 1001)
assert (_period_str_to_code('Q') == 2000)
assert (_period_str_to_code('Q-DEC') == 2000)
assert (_period_str_to_code('Q-FEB') == 2002)
assert_aliases_deprecated("M", 3000, ["MTH", "MONTH", "MONTHLY"])
assert (_period_str_to_code('W') == 4000)
assert (_period_str_to_code('W-SUN') == 4000)
assert (_period_str_to_code('W-FRI') == 4005)
assert_aliases_deprecated("B", 5000, ["BUS", "BUSINESS",
"BUSINESSLY", "WEEKDAY"])
assert_aliases_deprecated("D", 6000, ["DAY", "DLY", "DAILY"])
assert_aliases_deprecated("H", 7000, ["HR", "HOUR", "HRLY", "HOURLY"])
assert_aliases_deprecated("T", 8000, ["minute", "MINUTE", "MINUTELY"])
assert (_period_str_to_code('Min') == 8000)
assert_aliases_deprecated("S", 9000, ["sec", "SEC", "SECOND", "SECONDLY"])
assert_aliases_deprecated("L", 10000, ["MILLISECOND", "MILLISECONDLY"])
assert (_period_str_to_code('ms') == 10000)
assert_aliases_deprecated("U", 11000, ["MICROSECOND", "MICROSECONDLY"])
assert (_period_str_to_code('US') == 11000)
assert_aliases_deprecated("N", 12000, ["NANOSECOND", "NANOSECONDLY"])
assert (_period_str_to_code('NS') == 12000)
def test_is_superperiod_subperiod():
# input validation
assert not (is_superperiod(offsets.YearEnd(), None))
assert not (is_subperiod(offsets.MonthEnd(), None))
assert not (is_superperiod(None, offsets.YearEnd()))
assert not (is_subperiod(None, offsets.MonthEnd()))
assert not (is_superperiod(None, None))
assert not (is_subperiod(None, None))
assert (is_superperiod(offsets.YearEnd(), offsets.MonthEnd()))
assert (is_subperiod(offsets.MonthEnd(), offsets.YearEnd()))
assert (is_superperiod(offsets.Hour(), offsets.Minute()))
assert (is_subperiod(offsets.Minute(), offsets.Hour()))
assert (is_superperiod(offsets.Second(), offsets.Milli()))
assert (is_subperiod(offsets.Milli(), offsets.Second()))
assert (is_superperiod(offsets.Milli(), offsets.Micro()))
assert (is_subperiod(offsets.Micro(), offsets.Milli()))
assert (is_superperiod(offsets.Micro(), offsets.Nano()))
assert (is_subperiod(offsets.Nano(), offsets.Micro()))
| bsd-3-clause |
NewKnowledge/punk | punk/aggregator/numeric.py | 1 | 1786 | import pandas as pd
import numpy as np
def range_groups(df, number_headers, bins=None):
max_bins = 20
df_desc = df[number_headers].describe().reset_index()
df_nums = df[number_headers]
df_nums = df_nums.dropna()
if not bins:
lowest_min_header = None
lowest_min_value = None
highest_max_header = None
highest_max_value = None
for number_header in number_headers:
min_val = df_desc.loc[df_desc['index'] == 'min'][number_header].values[0]
max_val = df_desc.loc[df_desc['index'] == 'max'][number_header].values[0]
if not lowest_min_value or min_val < lowest_min_value:
lowest_min_header = number_header
lowest_min_value = min_val
if not highest_max_value or max_val > highest_max_value:
highest_max_header = number_header
highest_max_value = max_val
high_low = np.concatenate([df_nums[lowest_min_header].values,df_nums[highest_max_header].values])
high_low = high_low[~np.isnan(high_low)]
counts,bins = np.histogram(high_low,bins='auto')
if len(counts) > max_bins:
bins = max_bins
ys = {}
x_values = None
for number_header in number_headers:
count,division = np.histogram(df_nums[number_header].values,bins=bins)
if not x_values:
x_values = []
for i,d in enumerate(division):
if i == len(division) - 1:
break
x_values.append('{0:.2f}'.format(d) + u' to ' + '{0:.2f}'.format(division[i+1]))
y_values = list(count)
ys[number_header] = y_values
output = np.array([
x_values,
y_values
],dtype='O')
return output
| mit |
karlafej/WebAppEx | spyre/spyre_wordcloud.py | 1 | 2179 | from spyre import server
from wordcloud import WordCloud#, ImageColorGenerator
import matplotlib.pyplot as plt
import re
import numpy as np
from nltk.corpus import stopwords
from PIL import Image
class WCApp(server.App):
title = "Word Cloud"
controls = [{
"type": "upload",
"id": "ubutton"
}, {
"type": "button",
"label": "refresh",
"id": "update_data"
}]
inputs = [{
"type": 'slider',
"label": 'Number of words',
"min": 100, "max": 1000, "value": 500, "step": 100,
"key": 'lim',
"action_id": 'plot'
}]
tabs = ["Text", "WordCloud"]
outputs = [{
"type": "plot",
"id": "plot",
"control_id": "update_data",
"tab": "WordCloud",
"on_page_load": False
}, {
"type": "html",
"id": "html1",
"control_id": "update_data",
"tab": "Text"
}]
def __init__(self):
self.upload_data = None
self.upload_file = None
self.english_stopwords = set(stopwords.words('english'))
self.regex = re.compile(r"(\b[-']\b)|[\W_]+")
self.wc_mask = np.array(Image.open("../data/python.png"))
def storeUpload(self, file):
self.upload_file = file
self.upload_data = file.read()
def html1(self, params):
text = (
"Upload a text file and press refresh."
)
if self.upload_data is not None:
text = self.upload_data
return text
def getPlot(self, params):
if self.upload_data is not None:
txt = str(self.upload_data.decode("utf-8"))
txt = self.regex.sub(" ", txt).lower()
limit = int(params['lim'])
wordcloud = WordCloud(
max_words=limit,
stopwords=self.english_stopwords,
mask=self.wc_mask,
).generate(txt)
fig = plt.figure()
fig.set_figwidth(8)
fig.set_figheight(8)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis('off')
return fig
if __name__ == '__main__':
app = WCApp()
app.launch()
| mit |
ainafp/nilearn | plot_haxby_stimuli.py | 1 | 1030 | """
Show stimuli of Haxby et al. dataset
===============================================================================
In this script we plot an overview of the stimuli used in "Distributed
and Overlapping Representations of Faces and Objects in Ventral Temporal
Cortex" (Science 2001)
"""
from scipy.misc import imread
import matplotlib.pyplot as plt
from nilearn.datasets import fetch_haxby
stimulus_information = fetch_haxby(n_subjects=0,
fetch_stimuli=True).stimuli
for stim_type in sorted(stimulus_information.keys()):
if stim_type == "controls":
# skip control images, there are too many
continue
file_names = stimulus_information[stim_type]
plt.figure()
for i in range(48):
plt.subplot(6, 8, i + 1)
try:
plt.imshow(imread(file_names[i]), cmap=plt.cm.gray)
except:
# just go to the next one if the file is not present
pass
plt.axis("off")
plt.suptitle(stim_type)
plt.show()
| bsd-3-clause |
cxhernandez/msmbuilder | msmbuilder/example_datasets/base.py | 9 | 8471 | import numbers
import shutil
import sys
import time
from functools import wraps
from io import BytesIO
from os import environ
from os import makedirs
from os.path import exists
from os.path import expanduser
from os.path import join
from zipfile import ZipFile
import numpy as np
from six.moves.urllib.error import HTTPError
from six.moves.urllib.request import urlopen
from sklearn.utils import check_random_state
def retry(max_retries=1):
""" Retry a function `max_retries` times. """
def retry_func(func):
@wraps(func)
def wrapper(*args, **kwargs):
num_retries = 0
while num_retries <= max_retries:
try:
ret = func(*args, **kwargs)
break
except HTTPError:
if num_retries == max_retries:
raise
num_retries += 1
time.sleep(5)
return ret
return wrapper
return retry_func
class Dataset(object):
@classmethod
def description(cls):
"""Get a description from the Notes section of the docstring."""
lines = [s.strip() for s in cls.__doc__.splitlines()]
note_i = lines.index("Notes")
return "\n".join(lines[note_i + 2:])
def cache(self):
raise NotImplementedError
def get(self):
raise NotImplementedError
def get_cached(self):
raise NotImplementedError
class _MDDataset(Dataset):
target_directory = "" # set in subclass
data_url = "" # set in subclass
def __init__(self, data_home=None, verbose=True):
self.data_home = get_data_home(data_home)
self.data_dir = join(self.data_home, self.target_directory)
self.cached = False
self.verbose = verbose
def _msmbdata_cache(self):
if self.verbose:
print("Copying {} from msmb_data package to {}"
.format(self.target_directory, self.data_home))
msmb_data = has_msmb_data()
assert msmb_data is not None
shutil.copytree("{}/{}".format(msmb_data, self.target_directory),
self.data_dir)
@retry(3)
def _figshare_cache(self):
if self.verbose:
print('downloading {} from {} to {}'
.format(self.target_directory, self.data_url,
self.data_home))
fhandle = urlopen(self.data_url)
buf = BytesIO(fhandle.read())
zip_file = ZipFile(buf)
makedirs(self.data_dir)
for name in zip_file.namelist():
zip_file.extract(name, path=self.data_dir)
@retry(3)
def cache(self):
if not exists(self.data_home):
makedirs(self.data_home)
if not exists(self.data_dir):
if has_msmb_data() is not None:
self._msmbdata_cache()
else:
self._figshare_cache()
elif self.verbose:
print("{} already is cached".format(self.target_directory))
self.cached = True
def get_cached(self):
raise NotImplementedError
def get(self):
if not self.cached:
self.cache()
return self.get_cached()
class _NWell(Dataset):
"""Base class for brownian dynamics on a potential
Parameters
----------
data_home : optional, default: None
Specify another cache folder for the datasets. By default
all MSMBuilder data is stored in '~/msmbuilder_data' subfolders.
random_state : {int, None}, default: None
Seed the psuedorandom number generator to generate trajectories. If
seed is None, the global numpy PRNG is used. If random_state is an
int, the simulations will be cached in ``data_home``, or loaded from
``data_home`` if simulations with that seed have been performed already.
With random_state=None, new simulations will be performed and the
trajectories will not be cached.
"""
target_name = "" # define in subclass
n_trajectories = 0 # define in subclass
version = 1 # override in subclass if parameters are updated
def __init__(self, data_home=None, random_state=None):
self.data_home = get_data_home(data_home)
self.data_dir = join(self.data_home, self.target_name)
self.random_state = random_state
self.cache_path = self._get_cache_path(random_state)
def _get_cache_path(self, random_state):
path = "{}/version-{}/randomstate-{}".format(self.data_dir,
self.version,
self.random_state)
return path
def _load(self, path):
return [np.load("{}/{}.npy".format(path, i))
for i in range(self.n_trajectories)]
def _save(self, path, trajectories):
assert len(trajectories) == self.n_trajectories
if not exists(path):
makedirs(path)
for i, traj in enumerate(trajectories):
np.save("{}/{}.npy".format(path, i), traj)
def cache(self):
random = check_random_state(self.random_state)
if not exists(self.data_dir):
makedirs(self.data_dir)
if self.random_state is None:
trajectories = self.simulate_func(random)
return trajectories
if not isinstance(self.random_state, numbers.Integral):
raise TypeError('random_state must be an int')
if exists(self.cache_path):
return self._load(self.cache_path)
trajectories = self.simulate_func(random)
self._save(self.cache_path, trajectories)
return trajectories
def get_cached(self):
if self.cache_path is None:
raise ValueError("You must specify a random state to get "
"cached trajectories.")
trajectories = self._load(self.cache_path)
return Bunch(trajectories=trajectories, DESCR=self.description())
def get(self):
trajectories = self.cache()
return Bunch(trajectories=trajectories, DESCR=self.description())
def simulate_func(self, random):
# Implement in subclass
raise NotImplementedError
def potential(self, x):
# Implement in subclass
raise NotImplementedError
class Bunch(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def has_msmb_data():
"""We provide a conda package containing the saved data.
This package was introduced because the figshare downloads could
be 'iffy' at times.
Returns
-------
path : str or None
The path (if it exists). otherwise None
"""
msmb_data_dir = join(sys.prefix, 'share', 'msmb_data')
if exists(msmb_data_dir):
return msmb_data_dir
else:
return None
def _expand_and_makedir(data_home):
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def get_data_home(data_home=None):
"""Return the path of the msmbuilder data dir.
As of msmbuilder v3.6, this function will prefer data downloaded via
the msmb_data conda package (and located within the python installation
directory). If this package exists, we will use its data directory as
the data home. Otherwise, we use the old logic:
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'msmbuilder_data'
in the user's home folder.
Alternatively, it can be set by the 'MSMBUILDER_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is not None:
return _expand_and_makedir(data_home)
msmb_data = has_msmb_data()
if msmb_data is not None:
return _expand_and_makedir(msmb_data)
data_home = environ.get('MSMBUILDER_DATA', join('~', 'msmbuilder_data'))
return _expand_and_makedir(data_home)
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
| lgpl-2.1 |
PhloxAR/math3 | math3/objects/basecls.py | 1 | 7026 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
class NpProxy(object):
def __init__(self, index):
self._index = index
def __get__(self, obj, cls):
return obj[self._index]
def __set__(self, obj, value):
obj[self._index] = value
class BaseObject(np.ndarray):
_shape = None
def __new__(cls, obj):
# ensure the object matches the required shape
obj.shape = cls._shape
return obj
def _unsupported_type(self, method, other):
raise ValueError('Cannot {} a {} to a {}'.format(method, type(other).__name__, type(self).__name__))
# Redirect assignment operators
def __iadd__(self, other):
self[:] = self.__add__(other)
return self
def __isub__(self, other):
self[:] = self.__sub__(other)
return self
def __imul__(self, other):
self[:] = self.__mul__(other)
return self
def __idiv__(self, other):
self[:] = self.__div__(other)
return self
class BaseMatrix(BaseObject):
@classmethod
def identity(cls, dtype=None):
"""
Creates an identity Matrix.
"""
pass
@classmethod
def from_euler(cls, euler, dtype=None):
"""
Creates a Matrix from the specified Euler angles.
"""
pass
@classmethod
def from_quaternion(cls, quaternion, dtype=None):
"""
Creates a Matrix from a Quaternion.
"""
pass
@classmethod
def from_inverse_of_quaternion(cls, quaternion, dtype=None):
"""
Creates a Matrix from the inverse of the specified Quaternion.
"""
pass
@classmethod
def from_scale(cls, scale, dtype=None):
pass
@classmethod
def from_x_rotation(cls, theta, dtype=None):
"""
Creates a Matrix with a rotation around the X-axis.
"""
pass
@classmethod
def from_y_rotation(cls, theta, dtype=None):
"""
Creates a Matrix with a rotation around the Y-axis.
"""
pass
@classmethod
def from_z_rotation(cls, theta, dtype=None):
"""
Creates a Matrix with a rotation around the Z-axis.
"""
pass
@property
def inverse(self):
"""
Returns the inverse of this matrix.
"""
return None
class BaseVector(BaseObject):
@classmethod
def from_matrix44_translation(cls, matrix, dtype=None):
return cls(cls._module.create_from_matrix44_translation(matrix, dtype))
def normalize(self):
self[:] = self.normalised
@property
def normalized(self):
"""
Normalizes an Nd list of vectors or a single vector to unit length.
The vector is **not** changed in place.
For zero-length vectors, the result will be np.nan.
numpy.array([ x, y, z ])
Or an NxM array::
numpy.array([
[x1, y1, z1],
[x2, y2, z2]
]).
:rtype: A numpy.array the normalised value
"""
return type(self)(self.T / np.sqrt(np.sum(self ** 2, axis=-1)))
@property
def squared_length(self):
"""
Calculates the squared length of a vector.
Useful when trying to avoid the performance
penalty of a square root operation.
:rtype: If one vector is supplied, the result with be a scalar.
Otherwise the result will be an array of scalars with shape
vec.ndim with the last dimension being size 1.
"""
lengths = np.sum(self ** 2., axis=-1)
return lengths
@property
def length(self):
"""
Returns the length of an Nd list of vectors
or a single vector.
Single vector::
numpy.array([ x, y, z ])
Nd array::
numpy.array([
[x1, y1, z1],
[x2, y2, z2]
]).
:rtype: If a 1d array was passed, it will be a scalar.
Otherwise the result will be an array of scalars with shape
vec.ndim with the last dimension being size 1.
"""
return np.sqrt(np.sum(self ** 2, axis=-1))
@length.setter
def length(self, length):
"""
Resize a Nd list of vectors or a single vector to 'length'.
The vector is changed in place.
Single vector::
numpy.array([ x, y, z ])
Nd array::
numpy.array([
[x1, y1, z1],
[x2, y2, z2]
]).
"""
# calculate the length
# this is a duplicate of length(vec) because we
# always want an array, even a 0-d array.
self[:] = (self.T / np.sqrt(np.sum(self ** 2, axis=-1)) * length).T
def dot(self, other):
"""Calculates the dot product of two vectors.
:param numpy.array other: an Nd array with the final dimension
being size 3 (a vector)
:rtype: If a 1d array was passed, it will be a scalar.
Otherwise the result will be an array of scalars with shape
vec.ndim with the last dimension being size 1.
"""
return np.sum(self * other, axis=-1)
def cross(self, other):
return type(self)(np.cross(self[:3], other[:3]))
def interpolate(self, other, delta):
"""
Interpolates between 2 arrays of vectors (shape = N,3)
by the specified delta (0.0 <= delta <= 1.0).
:param numpy.array other: an Nd array with the final dimension
being size 3. (a vector)
:param float delta: The interpolation percentage to apply,
where 0.0 <= delta <= 1.0.
When delta is 0.0, the result will be v1.
When delta is 1.0, the result will be v2.
Values inbetween will be an interpolation.
:rtype: A numpy.array with shape v1.shape.
"""
# scale the difference based on the time
# we must do it this 'unreadable' way to avoid
# loss of precision.
# the 'readable' method (f_now = f_0 + (f1 - f0) * delta)
# causes floating point errors due to the small values used
# in md2 files and the values become corrupted.
# this horrible code curtousey of this comment:
# http://stackoverflow.com/questions/5448322/temporal-interpolation-in-numpy-matplotlib
return self + ((other - self) * delta)
# return v1 * (1.0 - delta ) + v2 * delta
t = delta
t0 = 0.0
t1 = 1.0
delta_t = t1 - t0
return (t1 - t) / delta_t * v1 + (t - t0) / delta_t * v2
class BaseQuaternion(BaseObject):
pass
# pre-declarations to prevent circular imports
class BaseMatrix3(BaseMatrix):
pass
class BaseMatrix4(BaseMatrix):
pass
class BaseVector3(BaseVector):
pass
class BaseVector4(BaseVector):
pass
| bsd-3-clause |
gfyoung/pandas | pandas/tests/indexes/ranges/test_join.py | 4 | 6098 | import numpy as np
from pandas import Index, Int64Index, RangeIndex
import pandas._testing as tm
class TestJoin:
def test_join_outer(self):
# join with Int64Index
index = RangeIndex(start=0, stop=20, step=2)
other = Int64Index(np.arange(25, 14, -1))
res, lidx, ridx = index.join(other, how="outer", return_indexers=True)
noidx_res = index.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
eres = Int64Index(
[0, 2, 4, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
)
elidx = np.array(
[0, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, 9, -1, -1, -1, -1, -1, -1, -1],
dtype=np.intp,
)
eridx = np.array(
[-1, -1, -1, -1, -1, -1, -1, -1, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
dtype=np.intp,
)
assert isinstance(res, Int64Index)
assert not isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# join with RangeIndex
other = RangeIndex(25, 14, -1)
res, lidx, ridx = index.join(other, how="outer", return_indexers=True)
noidx_res = index.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
assert isinstance(res, Int64Index)
assert not isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
# Join with non-RangeIndex
index = RangeIndex(start=0, stop=20, step=2)
other = Int64Index(np.arange(25, 14, -1))
res, lidx, ridx = index.join(other, how="inner", return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([16, 18])
elidx = np.array([8, 9], dtype=np.intp)
eridx = np.array([9, 7], dtype=np.intp)
assert isinstance(res, Int64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# Join two RangeIndex
other = RangeIndex(25, 14, -1)
res, lidx, ridx = index.join(other, how="inner", return_indexers=True)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
# Join with Int64Index
index = RangeIndex(start=0, stop=20, step=2)
other = Int64Index(np.arange(25, 14, -1))
res, lidx, ridx = index.join(other, how="left", return_indexers=True)
eres = index
eridx = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 9, 7], dtype=np.intp)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# Join withRangeIndex
other = Int64Index(np.arange(25, 14, -1))
res, lidx, ridx = index.join(other, how="left", return_indexers=True)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_right(self):
# Join with Int64Index
index = RangeIndex(start=0, stop=20, step=2)
other = Int64Index(np.arange(25, 14, -1))
res, lidx, ridx = index.join(other, how="right", return_indexers=True)
eres = other
elidx = np.array([-1, -1, -1, -1, -1, -1, -1, 9, -1, 8, -1], dtype=np.intp)
assert isinstance(other, Int64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
# Join withRangeIndex
other = RangeIndex(25, 14, -1)
res, lidx, ridx = index.join(other, how="right", return_indexers=True)
eres = other
assert isinstance(other, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
def test_join_non_int_index(self):
index = RangeIndex(start=0, stop=20, step=2)
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = index.join(other, how="outer")
outer2 = other.join(index, how="outer")
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18])
tm.assert_index_equal(outer, outer2)
tm.assert_index_equal(outer, expected)
inner = index.join(other, how="inner")
inner2 = other.join(index, how="inner")
expected = Index([6, 8, 10])
tm.assert_index_equal(inner, inner2)
tm.assert_index_equal(inner, expected)
left = index.join(other, how="left")
tm.assert_index_equal(left, index.astype(object))
left2 = other.join(index, how="left")
tm.assert_index_equal(left2, other)
right = index.join(other, how="right")
tm.assert_index_equal(right, other)
right2 = other.join(index, how="right")
tm.assert_index_equal(right2, index.astype(object))
def test_join_non_unique(self):
index = RangeIndex(start=0, stop=20, step=2)
other = Index([4, 4, 3, 3])
res, lidx, ridx = index.join(other, return_indexers=True)
eres = Int64Index([0, 2, 4, 4, 6, 8, 10, 12, 14, 16, 18])
elidx = np.array([0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp)
eridx = np.array([-1, -1, 0, 1, -1, -1, -1, -1, -1, -1, -1], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_self(self, join_type):
index = RangeIndex(start=0, stop=20, step=2)
joined = index.join(index, how=join_type)
assert index is joined
| bsd-3-clause |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/core/magics/pylab.py | 2 | 5482 | """Implementation of magic functions for matplotlib/pylab support.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from IPython.config.application import Application
from IPython.core import magic_arguments
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils.warn import warn
from IPython.core.pylabtools import backends
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
magic_gui_arg = magic_arguments.argument(
'gui', nargs='?',
help="""Name of the matplotlib backend to use %s.
If given, the corresponding matplotlib backend is used,
otherwise it will be matplotlib's default
(which you can set in your matplotlib config file).
""" % str(tuple(sorted(backends.keys())))
)
@magics_class
class PylabMagics(Magics):
"""Magics related to matplotlib's pylab support"""
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
@magic_gui_arg
def matplotlib(self, line=''):
"""Set up matplotlib to work interactively.
This function lets you activate matplotlib interactive support
at any point during an IPython session.
It does not import anything into the interactive namespace.
If you are using the inline matplotlib backend for embedded figures,
you can adjust its behavior via the %config magic::
# enable SVG figures, necessary for SVG+XHTML export in the qtconsole
In [1]: %config InlineBackend.figure_format = 'svg'
# change the behavior of closing all figures at the end of each
# execution (cell), or allowing reuse of active figures across
# cells:
In [2]: %config InlineBackend.close_figures = False
Examples
--------
In this case, where the MPL default is TkAgg::
In [2]: %matplotlib
Using matplotlib backend: TkAgg
But you can explicitly request a different backend::
In [3]: %matplotlib qt
"""
args = magic_arguments.parse_argstring(self.matplotlib, line)
gui, backend = self.shell.enable_matplotlib(args.gui)
self._show_matplotlib_backend(args.gui, backend)
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'--no-import-all', action='store_true', default=None,
help="""Prevent IPython from performing ``import *`` into the interactive namespace.
You can govern the default behavior of this flag with the
InteractiveShellApp.pylab_import_all configurable.
"""
)
@magic_gui_arg
def pylab(self, line=''):
"""Load numpy and matplotlib to work interactively.
This function lets you activate pylab (matplotlib, numpy and
interactive support) at any point during an IPython session.
%pylab makes the following imports::
import numpy
import matplotlib
from matplotlib import pylab, mlab, pyplot
np = numpy
plt = pyplot
from IPython.display import display
from IPython.core.pylabtools import figsize, getfigs
from pylab import *
from numpy import *
If you pass `--no-import-all`, the last two `*` imports will be excluded.
See the %matplotlib magic for more details about activating matplotlib
without affecting the interactive namespace.
"""
args = magic_arguments.parse_argstring(self.pylab, line)
if args.no_import_all is None:
# get default from Application
if Application.initialized():
app = Application.instance()
try:
import_all = app.pylab_import_all
except AttributeError:
import_all = True
else:
# nothing specified, no app - default True
import_all = True
else:
# invert no-import flag
import_all = not args.no_import_all
gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
self._show_matplotlib_backend(args.gui, backend)
print ("Populating the interactive namespace from numpy and matplotlib")
if clobbered:
warn("pylab import has clobbered these variables: %s" % clobbered +
"\n`%pylab --no-import-all` prevents importing * from pylab and numpy"
)
def _show_matplotlib_backend(self, gui, backend):
"""show matplotlib message backend message"""
if not gui or gui == 'auto':
print ("Using matplotlib backend: %s" % backend)
| apache-2.0 |
sarahannnicholson/FNC | model.py | 1 | 13862 | import numpy as np
from joblib import Parallel, delayed
from datetime import datetime
from sklearn import svm, preprocessing
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
from feature_generation import FeatureGenerator
from FeatureData import FeatureData
import scorer
class Model(object):
def __init__(self, modelType, features):
self._stance_map = {'unrelated': 0, 'discuss': 1, 'agree': 2, 'disagree': 3}
self._model_type = modelType
self._features_for_X1 = features
self._feature_col_names = []
def get_data(self, body_file, stance_file, features_directory):
feature_data = FeatureData(body_file, stance_file)
X_train, self._feature_col_names = FeatureGenerator.get_features_from_file(use=self._features_for_X1,
features_directory=features_directory)
y_train = np.asarray([self._stance_map[stance['Stance']] for stance in feature_data.stances])
# Scale features to range[0, 1] to prevent larger features from dominating smaller ones
min_max_scaler = preprocessing.MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)
return {'X':X_train, 'y':y_train}
def related_unrelated(self, y):
return [x > 0 for x in y]
def get_trained_classifier(self, X_train, y_train):
"""Trains the model and returns the trained classifier to be used for prediction on test data. Note
that stances in test data will need to be translated to the numbers shown in self._stance_map."""
if self._model_type == 'svm':
classifier = svm.SVC(decision_function_shape='ovr', cache_size=1000)
elif self._model_type == 'nn':
classifier = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(30,), random_state=1)
classifier.fit(X_train, y_train)
return classifier
def test_classifier(self, classifier, X_test):
return classifier.predict(X_test)
def precision(actual, predicted, stance_map):
pairs = zip(actual, predicted)
print "Precision"
scores = {stance: None for stance in stance_map.iterkeys()}
for stance, index in stance_map.iteritems():
truePositive = np.count_nonzero([x[1] == index for x in pairs if x[0] == index])
falsePositive = np.count_nonzero([x[1] == index for x in pairs if x[0] != index])
try:
precision = 100 * float(truePositive) / (truePositive + falsePositive + 1)
scores[stance] = precision
print stance + ": " + str(precision)
except ZeroDivisionError:
print "Zero"
return scores
def recall(actual, predicted, stance_map):
print "Recall"
pairs = zip(actual, predicted)
scores = {stance: None for stance in stance_map.iterkeys()}
for stance, index in stance_map.iteritems():
truePositive = np.count_nonzero([x[1] == index for x in pairs if x[0] == index])
falseNegative = np.count_nonzero([x[1] != index for x in pairs if x[0] == index])
try:
recall = 100 * float(truePositive) / (truePositive + falseNegative + 1)
scores[stance] = recall
print stance + ": " + str(recall)
except ZeroDivisionError:
print "Zero"
return scores
def accuracy(actual, predicted, stance_map):
print "Accuracy"
pairs = zip(actual, predicted)
scores = {stance: None for stance in stance_map.iterkeys()}
for stance, index in stance_map.iteritems():
accurate = np.count_nonzero([x[1] == index and x[1] == x[0] for x in pairs])
total = np.count_nonzero([x[0] == index for x in pairs])
try:
accuracy = 100 * float(accurate)/total
scores[stance] = accuracy
print stance + ": " + str(accuracy)
except ZeroDivisionError:
print "Zero"
return scores
def stratify(X, y):
""" Returns X and y matrices with an even distribution of each class """
# Find the indices of each class
disagree_indices = np.where(y == 3)[0]
agree_indices = np.where(y == 2)[0]
discuss_indices = np.where(y == 1)[0]
unrelated_indices = np.where(y == 0)[0]
num_disagree = disagree_indices.shape[0]
# Take the first num_disagrees entries for each class
reduced_agree_indices = agree_indices[:len(agree_indices)]
reduced_discuss_indices = discuss_indices[:len(discuss_indices)]
reduced_unrelated_indices = unrelated_indices[:(num_disagree + len(agree_indices) + len(discuss_indices))]
# Recombine into stratified X and y matrices
X_stratified = np.concatenate([X[disagree_indices], X[reduced_agree_indices], X[reduced_discuss_indices],
X[reduced_unrelated_indices]], axis=0)
y_stratified = np.concatenate([y[disagree_indices], y[reduced_agree_indices], y[reduced_discuss_indices],
y[reduced_unrelated_indices]], axis=0)
return {'X': X_stratified, 'y': y_stratified}
def score_average(scores, model1):
""" Used to calculate score averages resulting from kfold validation. """
# Calculate averages for precision, recall, and accuracy
score_sums = {stance: 0 for stance in model1._stance_map.iterkeys()}
invalid_counts = {stance: 0 for stance in
model1._stance_map.iterkeys()} # Count number of zero division errors and exclude from averages
for result in scores:
for stance in model1._stance_map.iterkeys():
if result[stance] != None:
score_sums[stance] += result[stance]
else:
invalid_counts[stance] += 1
# Dictionary containing average scores for each stance
return {stance: score_sums[stance]/(len(scores) - invalid_counts[stance]) for stance in model1._stance_map.iterkeys()}
def convert_stance_to_related(y):
for stance, i in enumerate(y):
if stance != 0:
y[i] = 1
return y
def plot_coefficients(classifier, feature_names, i, k):
top_features=len(feature_names)/2
coef = classifier.coef_[0]
top_positive_coefficients = np.argsort(coef)[-top_features:]
top_negative_coefficients = np.argsort(coef)[:top_features]
top_coefficients = np.hstack([top_negative_coefficients, top_positive_coefficients])
# create plot
plt.figure(figsize=(30, 20))
colors = ['#cccccc' if c < 0 else 'teal' for c in coef[top_coefficients]]
plt.bar(np.arange(2 * top_features), coef[top_coefficients], color=colors)
feature_names = np.array(feature_names)
plt.xticks(np.arange(0, 1 + 2 * top_features), feature_names[top_coefficients], rotation='70')
plt.savefig("graphs/plot-NN_model" + str(i) + "_kfold" + str(k) + ".png")
def map_stances(y):
stance_map = {0: 'unrelated', 1: 'discuss', 2: 'agree', 3: 'disagree'}
return [stance_map.get(key) for key in y]
def split_data(data1, data2, doStratify):
X1 = data1['X']; X2 = data2['X']
y1 = data1['y']; y2 = data2['y']
if doStratify:
stratified = stratify(X1, y1)
X1 = stratified['X']
y1 = stratified['y']
X2 = stratified['X']
y2 = stratified['y']
return X1, y1, X2, y2
def kfold_system(X1_features, X2_features, doStratify, numFolds, m1_type, m2_type):
# init models
model1 = Model(m1_type, X1_features)
model2 = Model(m2_type, X2_features)
# Get training and testing data
data = model1.get_data('data/combined_bodies.csv', 'data/combined_stances.csv', 'combined_features')
data2 = model2.get_data('data/combined_bodies.csv', 'data/combined_stances.csv', 'combined_features')
X1, y1, X2, y2 = split_data(data, data2, doStratify)
# For loop parameters
kfold = StratifiedKFold(n_splits=numFolds)
precision_scores = []; recall_scores = [];
accuracy_scores = []; competition_scores = []
k=0
for train_indices, test_indices in kfold.split(X1, y1):
X1_train = X1[train_indices]
y1_train = [int(s != 0) for s in y1[train_indices]]
X2_train = X2[train_indices]
y2_train = y2[train_indices]
# Save testing data
X1_test = X1[test_indices]
X2_test = X2[test_indices]
y_test = y2[test_indices]
# remove rows of the unrelated class for X2_train and y2_train
X2_train_filtered = X2_train[np.nonzero(y1_train)]
y2_train_filtered = y2_train[np.nonzero(y1_train)]
# phase 1: Neural Net Classifier for unrelated/related classification
# print "#1 Train"
# print np.bincount(y1_train)
# print np.unique(y1_train)
clf1 = model1.get_trained_classifier(X1_train, y1_train)
# phase 2: Neural Net Classifier for agree, disagree, discuss
# print "#2 Train"
# print np.bincount(y2_train_filtered)
# print np.unique(y2_train_filtered)
clf2 = model2.get_trained_classifier(X2_train_filtered, y2_train_filtered)
y_predicted = model1.test_classifier(clf1, X1_test)
# print "#1 Test"
# print np.bincount(y_predicted)
# print np.unique(y_predicted)
y2_predicted = model2.test_classifier(clf2, X2_test)
# print "#2 Test"
# print np.bincount(y2_predicted)
# print np.unique(y2_predicted)
# print "Actual Test"
# print np.bincount(y_test)
# print np.unique(y_test)
# add agree, disagree, discuss results back into y_predicted
for i, stance in enumerate(y_predicted):
if stance != 0:
y_predicted[i] = y2_predicted[i]
# print "Final"
# print np.bincount(y_predicted)
# print np.unique(y_predicted)
precision_scores.append(precision(y_test, y_predicted, model1._stance_map))
recall_scores.append(recall(y_test, y_predicted, model1._stance_map))
accuracy_scores.append(accuracy(y_test, y_predicted, model1._stance_map))
y_test= map_stances(y_test)
y_predicted = map_stances(y_predicted)
competition_score = scorer.report_score(y_test, y_predicted)
competition_scores.append(competition_score)
k+=1
print '\nKfold precision averages: ', score_average(precision_scores, model1)
print 'Kfold recall averages: ', score_average(recall_scores, model1)
print 'Kfold accuracy averages: ', score_average(accuracy_scores, model1)
print 'competition score averages: ', sum(competition_scores) / len(competition_scores)
def competition_system(X1_features, X2_features, doStratify, m1_type, m2_type):
# Init models
model1 = Model(m1_type, X1_features)
model2 = Model(m2_type, X2_features)
# Get testing and trainig data
train1 = model1.get_data('data/train_bodies.csv', 'data/train_stances.csv', 'features')
test1 = model1.get_data('data/competition_test_bodies.csv', 'data/competition_test_stances.csv', 'test_features')
train2 = model2.get_data('data/train_bodies.csv', 'data/train_stances.csv', 'features')
test2 = model2.get_data('data/competition_test_bodies.csv', 'data/competition_test_stances.csv', 'test_features')
X1_train, y1_train, X1_test, y1_test = split_data(train1, test1, doStratify)
X2_train, y2_train, X2_test, y_test = split_data(train2, test2, doStratify)
y1_train = [int(s != 0) for s in y1_train]
# remove rows of the unrelated class for X2_train and y2_train
X2_train_filtered = X2_train[np.nonzero(y1_train)]
y2_train_filtered = y2_train[np.nonzero(y1_train)]
# Train Models
clf1 = model1.get_trained_classifier(X1_train, y1_train)
#plot_coefficients(clf1, model1._feature_col_names, 1, 1)
clf2 = model2.get_trained_classifier(X2_train_filtered, y2_train_filtered)
# Get model predictions
y_predicted = model1.test_classifier(clf1, X1_test)
y2_predicted = model2.test_classifier(clf2, X2_test)
tmp_test = map_stances([int(s != 0) for s in y_test])
tmp_predicted = map_stances(y_predicted)
tmp_competition_score = scorer.report_score(tmp_test, tmp_predicted)
# add agree, disagree, discuss results back into y_predicted
for i, stance in enumerate(y_predicted):
if stance != 0:
y_predicted[i] = y2_predicted[i]
precision(y_test, y_predicted, model1._stance_map)
recall(y_test, y_predicted, model1._stance_map)
accuracy(y_test, y_predicted, model1._stance_map)
y_test= map_stances(y_test)
y_predicted = map_stances(y_predicted)
competition_score = scorer.report_score(y_test, y_predicted)
if __name__ == '__main__':
# ===============================
# System config parameters
# ===============================
X1_features = {
#'refuting': [0,2,3,8,12,13],
'ngrams': [0, 1, 2],
#'polarity': [0],
'named': [],
#'vader': [0,1],
'jaccard': [],
'quote_analysis': [],
'lengths': [],
'punctuation_frequency': [],
'word2Vec': []
}
X2_features = {
#'refuting': [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],
'ngrams': [1],
'polarity': [1],
#'named': [],
#'vader': [0,1],
#'jaccard': [],
'quote_analysis': [],
'lengths': [],
'punctuation_frequency': [],
#'word2Vec': []
}
model1_type = 'nn'
model2_type = 'nn'
doStratify = False
doKfold = False
numFolds = 10
if doKfold:
# Train and test using kfold validation
kfold_system(X1_features, X2_features, doStratify, numFolds, model1_type, model2_type)
else:
# Train and test designed by the FNC
competition_system(X1_features, X2_features, doStratify, model1_type, model2_type) | apache-2.0 |
smlng/RIOT | tests/pkg_utensor/generate_digit.py | 19 | 1149 | #!/usr/bin/env python3
"""Generate a binary file from a sample image of the MNIST dataset.
Pixel of the sample are stored as float32, images have size 28x28.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
_, (mnist_test, _) = tf.keras.datasets.mnist.load_data()
data = mnist_test[args.index]
output_path = os.path.join(SCRIPT_DIR, args.output)
np.ndarray.tofile(data.astype('float32'), output_path)
if args.no_plot is False:
plt.gray()
plt.imshow(data.reshape(28, 28))
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in MNIST test dataset")
parser.add_argument("-o", "--output", type=str, default='digit',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
| lgpl-2.1 |
DrFr4nk/Agent_Base_Market_Simulator | main.py | 1 | 4974 | #Sistema Economico ad Agenti#
import random
import datetime
import math
import matplotlib.pyplot as plt
from Titolo import *
from Market import *
from Agente import *
from Event_generator import *
agenti=[]
titolo=Titolo(0,"null")
market=Market("null")
sch=[]
retG=[]
bigG=[]
topG=[]
invG=[]
flwG=[]
evG=Event_generator()
def crea_agenti(num_top, num_big, num_ret):
for a in range(num_top-3):
ag=Agente(('t'+str(a)),'top',int(random.uniform(1000000, 10000000))*10000,random.uniform(1, 10),market,1)
ag.add_ttl(titolo)
agenti.append(ag)
for a in range(num_big-10):
ag=Agente(('b'+str(a)),'big',int(random.uniform(20000, 400000))*10000,random.uniform(1, 10),market,1)
ag.set_event(evG)
ag.add_ttl(titolo)
agenti.append(ag)
for a in range(num_ret):
ag=Agente(('r'+str(a)),'retail',int(random.uniform(1000, 10000))*10000,random.uniform(1, 10),market,1)
ag.add_ttl(titolo)
agenti.append(ag)
def initialization(conf_file):
f=open(conf_file,'r')
c=f.readlines()
titolo.set_id_ttl(c[2][3:-1])
titolo.set_value(int(c[3][6:]))
evG.set_inst(int(c[4][5:]))
print c[4][6:]
market.add_ttl(titolo)
market.set_id_mrk(c[6][3:-1])
num_top=int(c[8][4:])
num_big=int(c[9][4:])
num_ret=int(c[10][7:])
sch.append(int(c[12][5:]))
sch.append(int(c[13][4:]))
evG.iniztialization()
crea_agenti(num_top, num_big, num_ret)
f.close()
def stat_mkr(agenti):
tot_liq=0
mRet=0
countR=0
mTop=0
countT=0
mBig=0
countB=0
inv=0
countI=0
flw=0
countF=0
for a in agenti:
tot_liq+=a.get_lqd()
if a.typ_agn=="retail":
mRet+=a.get_lqd()
countR+=1
if a.typ_agn=="top":
mTop+=a.get_lqd()
countT+=1
if a.typ_agn=="big":
mBig+=a.get_lqd()
countB+=1
if a.agn_str==7 and a.agg_type=="Inv":
inv+=a.get_lqd()
countI+=1
if a.agn_str==7 and a.agg_type=="Flw":
flw+=a.get_lqd()
countF+=1
invG.append(float(inv/countI)/10000)
flwG.append(float(flw/countF)/10000)
topG.append(float(mTop/countT)/10000)
bigG.append(float(mBig/countB)/10000)
retG.append(float(mRet/countR)/10000)
print "Media Top "+str(int(mTop/countT)/10000)
print "Media Big "+str((mBig/countB)/10000)
print "Media Retail "+str((mRet/countR)/10000)
def show_grp(dtaT):
plt.figure(1)
plt.subplot(711)
plt.title("Andamento titolo: MBPS")
plt.plot(dtaT, color='black')
plt.subplot(713)
plt.title("Media patrimonio Retail")
plt.plot(retG, color='green')#, 'b-',bigG,'g-')#,topG,'r-')
plt.subplot(715)
plt.title("Media patrimonio Big")
plt.plot(bigG,color='blue')
plt.subplot(717)
plt.title("Media patrimonio Top")
plt.plot(topG,color='red')
plt.show()
plt.figure(2)
plt.subplot(311)
plt.title("Aggiotaggio: media patrimonio Inv")
plt.plot(invG, color='black')
plt.subplot(313)
plt.title("Aggiotaggio: media patrimonio Flw")
plt.plot(flwG, color='blue')
plt.show()
def exe():
ricchezza=[]
tmp=0.0
print "Inizio computazione"
dtaT=[]
for a in agenti:
print a.to_string()
#CICLO su giorni da simulare
for i in range(sch[0]):
print "### Day "+str(i+1)+" ###"
#TO DO: genera eventi (del giorno) influenza il titolo ad una determinata iterazione
print "APERTURA: "
#for t in market.titoli:
print "titolo: "+titolo.get_id_ttl()+" valore: 0."+str(titolo.get_value())
#CICLO su iterazioni giornaliere da eseguire
for j in range(sch[1]):
for a in agenti:
tmp+=float(a.liquidita)/10000
ricchezza.append(tmp)
tmp=0.0
print "Iterazione: "+str(j+1)
#market.ltl_update()
for a in agenti:
a.update_str()
stat_mkr(agenti)
market.update()
for a in agenti:
a.update_value()
#market.update_evnt(titolo, evG.get_event())
#print ''
#for a in agenti:
# print a.to_string()
#print ''
evG.next_event()
print "####Day "+str(i+1)+" Iterazione "+str(j+1)+ " Terminato####"
print "UPDATE MARKET "+str(titolo.get_value())
dtaT.append(titolo.get_value())
print "CHIUSURA: "
for t in market.titoli:
print "titolo: "+t.get_id_ttl()+" valore: 0."+str(t.get_value())
print "ricchezza"
print ricchezza
print "Andamento Titolo: "
print dtaT
print "Andamento Retail: "
print retG
print "Andamento Big: "
print bigG
print "Andamento Top: "
print topG
show_grp(dtaT)
initialization('conf.txt')
exe()
| mit |
gVallverdu/pymatgen | pymatgen/analysis/interface.py | 4 | 46759 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to store, generate, and manipulate material interfaces.
"""
from pymatgen.core.surface import SlabGenerator
from pymatgen import Lattice, Structure
from pymatgen.core.surface import Slab
from itertools import product
import numpy as np
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from matplotlib import pyplot as plt
from pymatgen.core.operations import SymmOp
from matplotlib.lines import Line2D
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.core.sites import PeriodicSite
from pymatgen.analysis.substrate_analyzer import (SubstrateAnalyzer, reduce_vectors)
import warnings
__author__ = "Eric Sivonxay, Shyam Dwaraknath, and Kyle Bystrom"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Kyle Bystrom"
__email__ = "[email protected]"
__date__ = "5/29/2019"
__status__ = "Prototype"
class Interface(Structure):
"""
This class stores data for defining an interface between two structures.
It is a subclass of pymatgen.core.structure.Structure.
"""
def __init__(self, lattice, species, coords,
sub_plane, film_plane,
sub_init_cell, film_init_cell,
modified_sub_structure, modified_film_structure,
strained_sub_structure, strained_film_structure,
validate_proximity=False,
coords_are_cartesian=False,
init_inplane_shift=None,
charge=None,
site_properties=None,
to_unit_cell=False):
"""
Makes an interface structure, a Structure object with additional
information and methods pertaining to interfaces.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
sub_plane (list): Substrate plane in the form of a list of integers
(based on the sub_init_cell), e.g.: [1, 2, 3].
film_plane (list): Film plane in the form of a list of integers
(based on the film_init_cell), e.g. [1, 2, 3].
sub_init_cell (Structure): initial bulk substrate structure
film_init_cell (Structure): initial bulk film structure
site_properties (dict): Properties associated with the sites as a
dict of sequences. The sequences have to be the same length as
the atomic species and fractional_coords. For an interface, you should
have the 'interface_label' properties to classify the sites as
'substrate' and 'film'.
modified_sub_structure (Slab): substrate supercell slab.
modified_film_structure (Slab): film supercell slab.
strained_sub_structure (Slab): strained substrate supercell slab
strained_film_structure (Slab): strained film supercell slab
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
init_inplane_shift (length-2 list of float, in Cartesian coordinates):
The initial shift of the film relative to the substrate
in the plane of the interface.
charge (float, optional): overal charge of the structure
"""
super().__init__(
lattice, species, coords, validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties, charge=charge)
self.modified_sub_structure = modified_sub_structure
self.modified_film_structure = modified_film_structure
self.strained_sub_structure = strained_sub_structure
self.strained_film_structure = strained_film_structure
self.sub_plane = sub_plane
self.film_plane = film_plane
self.sub_init_cell = sub_init_cell
self.film_init_cell = film_init_cell
z_shift = np.min(self.film.cart_coords[:, 2]) - np.max(self.substrate.cart_coords[:, 2])
if init_inplane_shift is None:
init_inplane_shift = np.array([0.0, 0.0])
self._offset_vector = np.append(init_inplane_shift, [z_shift])
def shift_film_along_surface_lattice(self, da, db):
"""
Given two floats da and db, adjust the shift vector
by da * (first lattice vector) + db * (second lattice vector).
This shift is in the plane of the interface.
I.e. da and db are fractional coordinates.
Args:
da (float): shift in the first lattice vector
db (float): shift in the second lattice vector
"""
self.shift_film(da * self.lattice.matrix[0] + db * self.lattice.matrix[1])
def change_z_shift(self, dz):
"""
Adjust the spacing between the substrate and film layers by dz Angstroms
Args:
dz (float): shift perpendicular to the plane (in Angstroms)
"""
self.shift_film(np.array([0.0, 0.0, dz]))
def shift_film(self, delta):
"""
Shift the film's position relative to the substrate.
Args:
delta (length-3 list of float or numpy array): Cartesian coordinate
vector by which to shift the film. After this operation
self.offset_vector -> self.offset_vector + delta.
"""
if self.offset_vector[2] + delta[2] < 0 or delta[2] > self.vacuum_thickness:
raise ValueError("The shift {} will collide the film and substrate.".format(delta))
self._offset_vector += np.array(delta)
self.translate_sites(self.get_film_indices(),
delta, frac_coords=False, to_unit_cell=True)
@property
def offset_vector(self):
"""
Displacement of the origin of the film structure relative to that
of the substrate structure in Cartesian coordinates.
"""
return self._offset_vector.copy()
@offset_vector.setter
def offset_vector(self, offset_vector):
delta = offset_vector - self._offset_vector
self.shift_film(delta)
@property
def ab_shift(self):
"""
The 2D component of offset_vector along the interface plane
in fractional coordinates. I.e. if ab_shift = [a, b], the
Cartesian coordinate shift in the interface plane
is a * (first lattice vector) + b * (second lattice vector).
"""
return np.dot(self.offset_vector, np.linalg.inv(self.lattice.matrix))[:2]
@ab_shift.setter
def ab_shift(self, ab_shift):
delta = ab_shift - self.ab_shift
self.shift_film_along_surface_lattice(delta[0], delta[1])
@property
def z_shift(self):
"""
The 1D component of offset_vector along the interface plane
in fractional coordinates. I.e. if z_shift = z, the distance
between the substrate and film planes is z.
"""
return self.offset_vector[2]
@z_shift.setter
def z_shift(self, z_shift):
delta = z_shift - self.z_shift
self.change_z_shift(delta)
@property
def vacuum_thickness(self):
"""
Vacuum buffer above the film.
"""
return np.min(self.substrate.cart_coords[:, 2]) + self.lattice.c - np.max(self.film.cart_coords[:, 2])
@property
def substrate_sites(self):
"""
Return the substrate sites of the interface.
"""
sub_sites = []
for i, tag in enumerate(self.site_properties['interface_label']):
if 'substrate' in tag:
sub_sites.append(self.sites[i])
return sub_sites
@property
def substrate(self):
"""
Return the substrate (Structure) of the interface.
"""
return Structure.from_sites(self.substrate_sites)
def get_film_indices(self):
"""
Retrieve the indices of the film sites
"""
film_sites = []
for i, tag in enumerate(self.site_properties['interface_label']):
if 'film' in tag:
film_sites.append(i)
return film_sites
@property
def film_sites(self):
"""
Return the film sites of the interface.
"""
film_sites = []
for i, tag in enumerate(self.site_properties['interface_label']):
if 'film' in tag:
film_sites.append(self.sites[i])
return film_sites
@property
def film(self):
"""
Return the film (Structure) of the interface.
"""
return Structure.from_sites(self.film_sites)
def copy(self, site_properties=None):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Returns:
A copy of the Interface.
"""
props = self.site_properties
if site_properties:
props.update(site_properties)
return Interface(self.lattice, self.species_and_occu, self.frac_coords,
self.sub_plane, self.film_plane,
self.sub_init_cell, self.film_init_cell,
self.modified_sub_structure, self.modified_film_structure,
self.strained_sub_structure, self.strained_film_structure,
validate_proximity=False, coords_are_cartesian=False,
init_inplane_shift=self.offset_vector[:2], charge=self.charge,
site_properties=self.site_properties)
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
struct_copy = self.copy()
struct_copy.sort(key=key, reverse=reverse)
return struct_copy
def as_dict(self):
"""
:return: MSONable dict
"""
d = super().as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["sub_plane"] = self.sub_plane
d["film_plane"] = self.film_plane
d["sub_init_cell"] = self.sub_init_cell
d["film_init_cell"] = self.film_init_cell
d["modified_sub_structure"] = self.modified_sub_structure
d["modified_film_structure"] = self.modified_film_structure
d["strained_sub_structure"] = self.strained_sub_structure
d["strained_film_structure"] = self.strained_film_structure
d['init_inplane_shift'] = self.offset_vector[0:2]
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: Interface
"""
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
s = Structure.from_sites(sites)
return Interface(
lattice=lattice,
species=s.species_and_occu, coords=s.frac_coords,
sub_plane=d["sub_plane"], film_plane=d["film_plane"],
sub_init_cell=d["sub_init_cell"], film_init_cell=d["film_init_cell"],
modified_sub_structure=d["modified_sub_structure"], modified_film_structure=d["modified_film_structure"],
strained_sub_structure=d["strained_sub_structure"], strained_film_structure=d["strained_film_structure"],
site_properties=s.site_properties, init_inplane_shift=d["init_inplane_shift"]
)
class InterfaceBuilder:
"""
This class constructs the epitaxially matched interfaces between two crystalline slabs
"""
def __init__(self, substrate_structure, film_structure):
"""
Args:
substrate_structure (Structure): structure of substrate
film_structure (Structure): structure of film
"""
# Bulk structures
self.original_substrate_structure = substrate_structure
self.original_film_structure = film_structure
self.matches = []
self.match_index = None
# SlabGenerator objects for the substrate and film
self.sub_sg = None
self.substrate_layers = None
self.film_sg = None
self.film_layers = None
# Structures with no vacuum
self.substrate_structures = []
self.film_structures = []
# "slab" structure (with no vacuum) oriented with a direction along x-axis and ab plane normal aligned with
# z-axis
self.oriented_substrate = None
self.oriented_film = None
# Strained structures with no vacuum
self.strained_substrate = None
self.strained_film = None
# Substrate with transformation/matches applied
self.modified_substrate_structures = []
self.modified_film_structures = []
# Non-stoichiometric slabs with symmetric surfaces, as generated by pymatgen. Please check, this is highly
# unreliable from tests.
self.sym_modified_substrate_structures = []
self.sym_modified_film_structures = []
# Interface structures
self.interfaces = []
self.interface_labels = []
def get_summary_dict(self):
"""
Return dictionary with information about the InterfaceBuilder,
with currently generated structures included.
"""
d = {'match': self.matches[0]}
d['substrate_layers'] = self.substrate_layers
d['film_layers'] = self.film_layers
d['bulk_substrate'] = self.original_substrate_structure
d['bulk_film'] = self.original_film_structure
d['strained_substrate'] = self.strained_substrate
d['strained_film'] = self.strained_film
d['slab_substrates'] = self.modified_substrate_structures
d['slab_films'] = self.modified_film_structures
d['interfaces'] = self.interfaces
d['interface_labels'] = self.interface_labels
return d
def write_all_structures(self):
"""
Write all of the structures relevant for
the interface calculation to VASP POSCAR files.
"""
_poscar = Poscar(self.original_substrate_structure)
_poscar.write_file('bulk_substrate_POSCAR')
_poscar = Poscar(self.original_film_structure)
_poscar.write_file('bulk_film_POSCAR')
_poscar = Poscar(self.strained_substrate)
_poscar.write_file('strained_substrate_POSCAR')
_poscar = Poscar(self.strained_film)
_poscar.write_file('strained_film_POSCAR')
for i, interface in enumerate(self.modified_substrate_structures):
_poscar = Poscar(interface)
_poscar.write_file('slab_substrate_%d_POSCAR' % i)
for i, interface in enumerate(self.modified_film_structures):
_poscar = Poscar(interface)
_poscar.write_file('slab_film_%d_POSCAR' % i)
for i, interface in enumerate(self.film_structures):
_poscar = Poscar(interface)
_poscar.write_file('slab_unit_film_%d_POSCAR' % i)
for label, interface in zip(self.interface_labels, self.interfaces):
_poscar = Poscar(interface)
_poscar.write_file('interface_%s_POSCAR' % label.replace("/", "-"))
return
def generate_interfaces(self, film_millers=None, substrate_millers=None, film_layers=3, substrate_layers=3,
**kwargs):
"""
Generate a list of Interface (Structure) objects and store them to self.interfaces.
Args:
film_millers (list of [int]): list of film surfaces
substrate_millers (list of [int]): list of substrate surfaces
film_layers (int): number of layers of film to include in Interface structures.
substrate_layers (int): number of layers of substrate to include in Interface structures.
"""
self.get_oriented_slabs(lowest=True, film_millers=film_millers,
substrate_millers=substrate_millers, film_layers=film_layers,
substrate_layers=substrate_layers)
self.combine_slabs(**kwargs)
return
def get_oriented_slabs(self, film_layers=3, substrate_layers=3, match_index=0, **kwargs):
"""
Get a list of oriented slabs for constructing interfaces and put them
in self.film_structures, self.substrate_structures, self.modified_film_structures,
and self.modified_substrate_structures.
Currently only uses first match (lowest SA) in the list of matches
Args:
film_layers (int): number of layers of film to include in Interface structures.
substrate_layers (int): number of layers of substrate to include in Interface structures.
match_index (int): ZSL match from which to construct slabs.
"""
self.match_index = match_index
self.substrate_layers = substrate_layers
self.film_layers = film_layers
if 'zslgen' in kwargs.keys():
sa = SubstrateAnalyzer(zslgen=kwargs.get('zslgen'))
del kwargs['zslgen']
else:
sa = SubstrateAnalyzer()
# Generate all possible interface matches
self.matches = list(sa.calculate(self.original_film_structure, self.original_substrate_structure, **kwargs))
match = self.matches[match_index]
# Generate substrate slab and align x axis to (100) and slab normal to (001)
# Get no-vacuum structure for strained bulk calculation
self.sub_sg = SlabGenerator(self.original_substrate_structure, match['sub_miller'], substrate_layers, 0,
in_unit_planes=True,
reorient_lattice=False,
primitive=False)
no_vac_sub_slab = self.sub_sg.get_slab()
no_vac_sub_slab = get_shear_reduced_slab(no_vac_sub_slab)
self.oriented_substrate = align_x(no_vac_sub_slab)
self.oriented_substrate.sort()
# Get slab with vacuum
self.sub_sg = SlabGenerator(self.original_substrate_structure, match['sub_miller'], substrate_layers, 1,
in_unit_planes=True,
reorient_lattice=False,
primitive=False)
sub_slabs = self.sub_sg.get_slabs()
for i, sub_slab in enumerate(sub_slabs):
sub_slab = get_shear_reduced_slab(sub_slab)
sub_slab = align_x(sub_slab)
sub_slab.sort()
sub_slabs[i] = sub_slab
self.substrate_structures = sub_slabs
# Generate film slab and align x axis to (100) and slab normal to (001)
# Get no-vacuum structure for strained bulk calculation
self.film_sg = SlabGenerator(self.original_film_structure, match['film_miller'], film_layers, 0,
in_unit_planes=True,
reorient_lattice=False,
primitive=False)
no_vac_film_slab = self.film_sg.get_slab()
no_vac_film_slab = get_shear_reduced_slab(no_vac_film_slab)
self.oriented_film = align_x(no_vac_film_slab)
self.oriented_film.sort()
# Get slab with vacuum
self.film_sg = SlabGenerator(self.original_film_structure, match['film_miller'], film_layers, 1,
in_unit_planes=True,
reorient_lattice=False,
primitive=False)
film_slabs = self.film_sg.get_slabs()
for i, film_slab in enumerate(film_slabs):
film_slab = get_shear_reduced_slab(film_slab)
film_slab = align_x(film_slab)
film_slab.sort()
film_slabs[i] = film_slab
self.film_structures = film_slabs
# Apply transformation to produce matched area and a & b vectors
self.apply_transformations(match)
# Get non-stoichioimetric substrate slabs
sym_sub_slabs = []
for sub_slab in self.modified_substrate_structures:
sym_sub_slab = self.sub_sg.nonstoichiometric_symmetrized_slab(sub_slab)
for slab in sym_sub_slab:
if not slab == sub_slab:
sym_sub_slabs.append(slab)
self.sym_modified_substrate_structures = sym_sub_slabs
# Get non-stoichioimetric film slabs
sym_film_slabs = []
for film_slab in self.modified_film_structures:
sym_film_slab = self.film_sg.nonstoichiometric_symmetrized_slab(film_slab)
for slab in sym_film_slab:
if not slab == film_slab:
sym_film_slabs.append(slab)
self.sym_modified_film_structures = sym_film_slabs
# Strained film structures (No Vacuum)
self.strained_substrate, self.strained_film = strain_slabs(self.oriented_substrate, self.oriented_film)
return
def apply_transformation(self, structure, matrix):
"""
Make a supercell of structure using matrix
Args:
structure (Slab): Slab to make supercell of
matrix (3x3 np.ndarray): supercell matrix
Returns:
(Slab) The supercell of structure
"""
modified_substrate_structure = structure.copy()
# Apply scaling
modified_substrate_structure.make_supercell(matrix)
# Reduce vectors
new_lattice = modified_substrate_structure.lattice.matrix.copy()
new_lattice[:2, :] = reduce_vectors(*modified_substrate_structure.lattice.matrix[:2, :])
modified_substrate_structure = Slab(lattice=Lattice(new_lattice), species=modified_substrate_structure.species,
coords=modified_substrate_structure.cart_coords,
miller_index=modified_substrate_structure.miller_index,
oriented_unit_cell=modified_substrate_structure.oriented_unit_cell,
shift=modified_substrate_structure.shift,
scale_factor=modified_substrate_structure.scale_factor,
coords_are_cartesian=True, energy=modified_substrate_structure.energy,
reorient_lattice=modified_substrate_structure.reorient_lattice,
to_unit_cell=True)
return modified_substrate_structure
def apply_transformations(self, match):
"""
Using ZSL match, transform all of the film_structures by the ZSL
supercell transformation.
Args:
match (dict): ZSL match returned by ZSLGenerator.__call__
"""
film_transformation = match["film_transformation"]
sub_transformation = match["substrate_transformation"]
modified_substrate_structures = [struct.copy() for struct in self.substrate_structures]
modified_film_structures = [struct.copy() for struct in self.film_structures]
# Match angles in lattices with 𝛾=θ° and 𝛾=(180-θ)°
if np.isclose(180 - modified_film_structures[0].lattice.gamma, modified_substrate_structures[0].lattice.gamma,
atol=3):
reflection = SymmOp.from_rotation_and_translation(((-1, 0, 0), (0, 1, 0), (0, 0, 1)), (0, 0, 1))
for modified_film_structure in modified_film_structures:
modified_film_structure.apply_operation(reflection, fractional=True)
self.oriented_film.apply_operation(reflection, fractional=True)
sub_scaling = np.diag(np.diag(sub_transformation))
# Turn into 3x3 Arrays
sub_scaling = np.diag(np.append(np.diag(sub_scaling), 1))
temp_matrix = np.diag([1, 1, 1])
temp_matrix[:2, :2] = sub_transformation
for modified_substrate_structure in modified_substrate_structures:
modified_substrate_structure = self.apply_transformation(modified_substrate_structure, temp_matrix)
self.modified_substrate_structures.append(modified_substrate_structure)
self.oriented_substrate = self.apply_transformation(self.oriented_substrate, temp_matrix)
film_scaling = np.diag(np.diag(film_transformation))
# Turn into 3x3 Arrays
film_scaling = np.diag(np.append(np.diag(film_scaling), 1))
temp_matrix = np.diag([1, 1, 1])
temp_matrix[:2, :2] = film_transformation
for modified_film_structure in modified_film_structures:
modified_film_structure = self.apply_transformation(modified_film_structure, temp_matrix)
self.modified_film_structures.append(modified_film_structure)
self.oriented_film = self.apply_transformation(self.oriented_film, temp_matrix)
return
def combine_slabs(self):
"""
Combine the slabs generated by get_oriented_slabs into interfaces
"""
all_substrate_variants = []
sub_labels = []
for i, slab in enumerate(self.modified_substrate_structures):
all_substrate_variants.append(slab)
sub_labels.append(str(i))
sg = SpacegroupAnalyzer(slab, symprec=1e-3)
if not sg.is_laue():
mirrored_slab = slab.copy()
reflection_z = SymmOp.from_rotation_and_translation(((1, 0, 0), (0, 1, 0), (0, 0, -1)), (0, 0, 0))
mirrored_slab.apply_operation(reflection_z, fractional=True)
translation = [0, 0, -min(mirrored_slab.frac_coords[:, 2])]
mirrored_slab.translate_sites(range(mirrored_slab.num_sites), translation)
all_substrate_variants.append(mirrored_slab)
sub_labels.append('%dm' % i)
all_film_variants = []
film_labels = []
for i, slab in enumerate(self.modified_film_structures):
all_film_variants.append(slab)
film_labels.append(str(i))
sg = SpacegroupAnalyzer(slab, symprec=1e-3)
if not sg.is_laue():
mirrored_slab = slab.copy()
reflection_z = SymmOp.from_rotation_and_translation(((1, 0, 0), (0, 1, 0), (0, 0, -1)), (0, 0, 0))
mirrored_slab.apply_operation(reflection_z, fractional=True)
translation = [0, 0, -min(mirrored_slab.frac_coords[:, 2])]
mirrored_slab.translate_sites(range(mirrored_slab.num_sites), translation)
all_film_variants.append(mirrored_slab)
film_labels.append('%dm' % i)
# substrate first index, film second index
self.interfaces = []
self.interface_labels = []
# self.interfaces = [[None for j in range(len(all_film_variants))] for i in range(len(all_substrate_variants))]
for i, substrate in enumerate(all_substrate_variants):
for j, film in enumerate(all_film_variants):
self.interfaces.append(self.make_interface(substrate, film))
self.interface_labels.append('%s/%s' % (film_labels[j], sub_labels[i]))
def make_interface(self, slab_substrate, slab_film, offset=None):
"""
Strain a film to fit a substrate and generate an interface.
Args:
slab_substrate (Slab): substrate structure supercell
slab_film (Slab): film structure supercell
offset ([int]): separation vector of film and substrate
"""
# Check if lattices are equal. If not, strain them to match
# NOTE: CHANGED THIS TO MAKE COPY OF SUBSTRATE/FILM, self.modified_film_structures NO LONGER STRAINED
unstrained_slab_substrate = slab_substrate.copy()
slab_substrate = slab_substrate.copy()
unstrained_slab_film = slab_film.copy()
slab_film = slab_film.copy()
latt_1 = slab_substrate.lattice.matrix.copy()
latt_1[2, :] = [0, 0, 1]
latt_2 = slab_film.lattice.matrix.copy()
latt_2[2, :] = [0, 0, 1]
if not Lattice(latt_1) == Lattice(latt_2):
# Calculate lattice strained to match:
matched_slab_substrate, matched_slab_film = strain_slabs(slab_substrate, slab_film)
else:
matched_slab_substrate = slab_substrate
matched_slab_film = slab_film
# Ensure substrate has positive c-direction:
if matched_slab_substrate.lattice.matrix[2, 2] < 0:
latt = matched_slab_substrate.lattice.matrix.copy()
latt[2, 2] *= -1
new_struct = matched_slab_substrate.copy()
new_struct.lattice = Lattice(latt)
matched_slab_substrate = new_struct
# Ensure film has positive c-direction:
if matched_slab_film.lattice.matrix[2, 2] < 0:
latt = matched_slab_film.lattice.matrix.copy()
latt[2, 2] *= -1
new_struct = matched_slab_film.copy()
new_struct.lattice = Lattice(latt)
matched_slab_film = new_struct
if offset is None:
offset = (2.5, 0.0, 0.0)
_structure = merge_slabs(matched_slab_substrate, matched_slab_film, *offset)
orthogonal_structure = _structure.get_orthogonal_c_slab()
orthogonal_structure.sort()
if not orthogonal_structure.is_valid(tol=1):
warnings.warn("Check generated structure, it may contain atoms too closely placed")
# offset_vector = (offset[1], offset[2], offset[0])
interface = Interface(orthogonal_structure.lattice.copy(), orthogonal_structure.species,
orthogonal_structure.frac_coords,
slab_substrate.miller_index, slab_film.miller_index,
self.original_substrate_structure, self.original_film_structure,
unstrained_slab_substrate, unstrained_slab_film,
slab_substrate, slab_film, init_inplane_shift=offset[1:],
site_properties=orthogonal_structure.site_properties)
return interface
def visualize_interface(self, interface_index=0, show_atoms=False, n_uc=2):
"""
Plot the film-substrate superlattice match, the film superlattice,
and the substrate superlattice in three separate plots and show them.
Args:
interface_index (int, 0): Choice of interface to plot
show_atoms (bool, False): Whether to plot atomic sites
n_uc (int, 2): Number of 2D unit cells of the interface in each direction.
(The unit cell of the interface is the supercell of th substrate
that matches a supercel of the film.)
"""
film_index = int(self.interface_labels[interface_index][0])
sub_index = int(self.interface_labels[interface_index][2])
visualize_interface(self.interfaces[interface_index], show_atoms, n_uc)
visualize_superlattice(self.film_structures[film_index], self.modified_film_structures[film_index],
film=True, show_atoms=show_atoms, n_uc=n_uc)
visualize_superlattice(self.substrate_structures[sub_index], self.modified_substrate_structures[sub_index],
film=False, show_atoms=show_atoms, n_uc=n_uc)
def visualize_interface(interface, show_atoms=False, n_uc=2):
"""
Plot the match of the substrate and film superlattices.
Args:
interface (Interface): Interface object
show_atoms (bool, False): Whether to plot atomic sites
n_uc (int, 2): Number of 2D unit cells of the interface in each direction.
(The unit cell of the interface is the supercell of th substrate
that matches a supercel of the film.)
"""
# sub_struct = interface.sub_init_cell
# film_struct = interface.film_init_cell
modified_sub_struct = interface.modified_sub_structure
modified_film_struct = interface.modified_film_structure
rotated_modified_film_structure = align_x(modified_film_struct.copy(),
get_ortho_axes(modified_sub_struct))
# Show super lattice matches
plt.figure(dpi=150)
legend_elements = []
for i, j in product(range(-n_uc, n_uc), range(-n_uc, n_uc)):
v1 = modified_sub_struct.lattice.matrix[0, :]
v2 = modified_sub_struct.lattice.matrix[1, :]
current_start = v1 * i + v2 * j
plt.plot([current_start[0], current_start[0] + v1[0]], [current_start[1], current_start[1] + v1[1]], '-k',
linewidth=0.3)
plt.plot([current_start[0], current_start[0] + v2[0]], [current_start[1], current_start[1] + v2[1]], '-k',
linewidth=0.3)
if show_atoms:
plt.plot(
np.add(modified_sub_struct.cart_coords[:, 0], current_start[0]),
np.add(modified_sub_struct.cart_coords[:, 1], current_start[1]),
'or', markersize=0.1)
legend_elements.append(Line2D([0], [0], color='k', lw=1, label='Substrate Superlattice'))
if show_atoms:
legend_elements.append(Line2D([0], [0], marker='o', color='w', lw=1, label='Substrate atoms',
markerfacecolor='r', markersize=3))
for i, j in product(range(-n_uc, n_uc), range(-n_uc, n_uc)):
v1 = rotated_modified_film_structure.lattice.matrix[0, :]
v2 = rotated_modified_film_structure.lattice.matrix[1, :]
current_start = v1 * i + v2 * j
plt.plot([current_start[0], current_start[0] + v1[0]], [current_start[1], current_start[1] + v1[1]], '-b',
linewidth=0.3)
plt.plot([current_start[0], current_start[0] + v2[0]], [current_start[1], current_start[1] + v2[1]], '-b',
linewidth=0.3)
if show_atoms:
plt.plot(np.add(rotated_modified_film_structure.cart_coords[:, 0], current_start[0]),
np.add(rotated_modified_film_structure.cart_coords[:, 1], current_start[1]),
'og', markersize=0.1)
legend_elements.append(Line2D([0], [0], color='b', lw=1, label='Film Superlattice'))
if show_atoms:
legend_elements.append(Line2D([0], [0], marker='o', color='w', lw=1, label='Film atoms',
markerfacecolor='g', markersize=3))
plt.axis('scaled')
plt.title('Superlattice Match')
plt.legend(handles=legend_elements)
plt.show()
def visualize_superlattice(struct, modified_struct, film=True, show_atoms=False, n_uc=2):
"""
Visualize the unit cell-supercell match for either the film or substrate
(specified by film boolean tag).
Args:
struct (Slab): unit cell slab
modified_struct (Slab): supercell slab
film (bool, True): True=label plot as film, False=label plot as substrate
show_atoms (bool, False): Whether to plot atomic sites
n_uc (int, 2): Number of 2D unit cells of the interface in each direction.
(The unit cell of the interface is the supercell of th substrate
that matches a supercel of the film.)
"""
label = 'Film' if film else 'Substrate'
plt.figure(dpi=150)
legend_elements = []
for i, j in product(range(-n_uc, n_uc), range(-n_uc, n_uc)):
v1 = modified_struct.lattice.matrix[0, :]
v2 = modified_struct.lattice.matrix[1, :]
current_start = v1 * i + v2 * j
plt.plot([current_start[0], current_start[0] + v1[0]], [current_start[1], current_start[1] + v1[1]], '-k',
linewidth=0.3)
plt.plot([current_start[0], current_start[0] + v2[0]], [current_start[1], current_start[1] + v2[1]], '-k',
linewidth=0.3)
if show_atoms:
plt.plot(
np.add(modified_struct.cart_coords[:, 0], current_start[0]),
np.add(modified_struct.cart_coords[:, 1], current_start[1]),
'or', markersize=0.1)
legend_elements.append(Line2D([0], [0], color='k', lw=1, label='%s Superlattice' % label))
if show_atoms:
legend_elements.append(Line2D([0], [0], marker='o', color='w', lw=1, label='%s Superlattice atoms' % label,
markerfacecolor='r', markersize=3))
uc_v1 = struct.lattice.matrix[0, :]
uc_v2 = struct.lattice.matrix[1, :]
sl_v1 = modified_struct.lattice.matrix[0, :]
sl_v2 = modified_struct.lattice.matrix[1, :]
sl_v = (sl_v1 + sl_v2) * n_uc
uc_v = (uc_v1 + uc_v2) * n_uc
rx = np.abs(int(n_uc * sl_v[0] / uc_v[0]))
ry = np.abs(int(n_uc * sl_v[1] / uc_v[1]))
for i, j in product(range(-rx, rx), range(-ry, ry)):
v1 = struct.lattice.matrix[0, :]
v2 = struct.lattice.matrix[1, :]
current_start = v1 * i + v2 * j
plt.plot([current_start[0], current_start[0] + v1[0]], [current_start[1], current_start[1] + v1[1]], '-b',
linewidth=0.3)
plt.plot([current_start[0], current_start[0] + v2[0]], [current_start[1], current_start[1] + v2[1]], '-b',
linewidth=0.3)
if show_atoms:
plt.plot(np.add(struct.cart_coords[:, 0], current_start[0]),
np.add(struct.cart_coords[:, 1], current_start[1]),
'og', markersize=0.1)
legend_elements.append(Line2D([0], [0], color='b', lw=1, label='%s Lattice' % label))
if show_atoms:
legend_elements.append(Line2D([0], [0], marker='o', color='w', lw=1, label='%s atoms' % label,
markerfacecolor='g', markersize=3))
plt.axis('scaled')
plt.legend(handles=legend_elements)
plt.title('%s unit cell and superlattice' % label)
plt.show()
def merge_slabs(substrate, film, slab_offset, x_offset, y_offset, vacuum=20, **kwargs):
"""
Given substrate and film supercells (oriented to match as closely as possible),
strain the film to match the substrate lattice and combine the slabs.
Args:
slab_offset: spacing between the substrate and film
x_offset y_offset: in-plane displacement of the film in Cartesian coordinates
vacuum: vacuum buffer above the film
Returns:
combined_structure (Slab): A structure with the strained film and substrate
combined into one structure
"""
# strain film to match substrate
new_latt = film.lattice.matrix.copy()
new_latt[:2, :2] = substrate.lattice.matrix[:2, :2]
film.lattice = Lattice(new_latt)
combined_species = [*substrate.species, *film.species]
if kwargs.get('cell_height'):
height = kwargs.get('cell_height')
else:
added_height = vacuum + slab_offset + film.lattice.c
height = added_height + substrate.lattice.matrix[2, 2]
combined_lattice = substrate.lattice.matrix.copy()
combined_lattice[2, :] *= height / substrate.lattice.matrix[2, 2]
max_substrate = np.max(substrate.cart_coords[:, 2])
min_substrate = np.min(film.cart_coords[:, 2])
offset = max_substrate - min_substrate + slab_offset
offset_film_coords = [np.add(coord, [x_offset, y_offset, offset]) for coord in film.cart_coords]
combined_coords = [*substrate.cart_coords, *offset_film_coords]
combined_site_properties = {}
for key, item in substrate.site_properties.items():
combined_site_properties[key] = [*substrate.site_properties[key], *film.site_properties[key]]
labels = ['substrate'] * len(substrate) + ['film'] * len(film)
combined_site_properties['interface_label'] = labels
combined_structure = Slab(lattice=Lattice(combined_lattice), species=combined_species,
coords=combined_coords,
miller_index=substrate.miller_index,
oriented_unit_cell=substrate,
shift=substrate.shift,
scale_factor=substrate.scale_factor,
coords_are_cartesian=True, energy=substrate.energy,
reorient_lattice=False, to_unit_cell=True,
site_properties=combined_site_properties)
return combined_structure
def strain_slabs(sub_slab, film_slab):
"""
Strain the film_slab to match the sub_slab,
orient the structures to match each other,
and return the new matching structures.
Args:
sub_slab (Slab): substrate supercell slab
film_slab (Slab): film supercell slab
Returns:
sub_struct (Slab): substrate structure oriented
to match the film supercell
film_struct (Slab): film structure strained to match
the substrate supercell lattice.
"""
sub_struct = sub_slab.copy()
latt_1 = sub_struct.lattice.matrix.copy()
film_struct = align_x(film_slab, get_ortho_axes(sub_struct)).copy()
latt_2 = film_struct.lattice.matrix.copy()
# Rotate film so its diagonal matches with the sub's diagonal
diag_vec = np.add(latt_1[0, :], latt_1[1, :])
sub_norm_diag_vec = diag_vec / np.linalg.norm(diag_vec)
sub_b = np.cross(sub_norm_diag_vec, [0, 0, 1])
sub_matrix = np.vstack([sub_norm_diag_vec, sub_b, [0, 0, 1]])
diag_vec = np.add(latt_2[0, :], latt_2[1, :])
film_norm_diag_vec = diag_vec / np.linalg.norm(diag_vec)
film_b = np.cross(film_norm_diag_vec, [0, 0, 1])
film_matrix = np.vstack([film_norm_diag_vec, film_b, [0, 0, 1]])
rotation = np.dot(np.linalg.inv(film_matrix), sub_matrix)
new_latt = Lattice(np.dot(film_struct.lattice.matrix, rotation))
film_struct.lattice = new_latt
# Average the two lattices (Should get equal strain?)
mean_a = np.mean([film_struct.lattice.matrix[0, :], sub_struct.lattice.matrix[0, :]], axis=0)
mean_b = np.mean([film_struct.lattice.matrix[1, :], sub_struct.lattice.matrix[1, :]], axis=0)
new_latt = np.vstack([mean_a, mean_b, sub_struct.lattice.matrix[2, :]])
sub_struct.lattice = Lattice(new_latt)
new_latt = np.vstack([mean_a, mean_b, film_struct.lattice.matrix[2, :]])
film_struct.lattice = Lattice(new_latt)
return sub_struct, film_struct
def get_ortho_axes(structure):
"""
Get an orthonormal set of axes for the structure with the first axis
pointing along the a lattice vector.
Args:
structure (Structure)
Returns:
3x3 numpy matrix with the axes
"""
sub_a = structure.lattice.matrix[0, :] / np.linalg.norm(structure.lattice.matrix[0, :])
sub_c = third_vect(sub_a, structure.lattice.matrix[1, :])
sub_b = third_vect(sub_c, sub_a)
sub_b = sub_b / np.linalg.norm(sub_b)
return np.vstack((sub_a, sub_b, sub_c))
def align_x(slab, orthogonal_basis=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]):
"""
Align the a lattice vector of slab with the x axis. Optionally specify
an orthogonal_basis to align according to a different set of axes
Args:
slab (Slab): input structure
orthogonal basis (3x3 numpy matrix): If specified, align with
orthogonal_basis[0] rather than [1,0,0]
Returns:
The slab, which has been aligned with the specified axis in place.
"""
sub_ortho_axes = get_ortho_axes(slab)
rotation = transf_mat(sub_ortho_axes, orthogonal_basis)
new_sub_lattice = Lattice(np.dot(slab.lattice.matrix[0:3], rotation))
slab.lattice = new_sub_lattice
return slab
def transf_mat(A, B):
"""
Get the matrix to transform from the set of axes A
to the set of axes B.
Args:
A (3x3 numpy array): original axis basis
B (3x3 numpy array): new axis basis
Returns:
3x3 numpy array transformation between the bases
"""
return np.dot(np.linalg.inv(A), B)
def third_vect(a, b):
"""
Get a unit vector proportional to cross(a, b).
Args:
a, b (numpy arrays): 3D vectors.
Returns:
unit vector proportional to cross(a, b).
"""
c = np.cross(a, b)
return c / np.linalg.norm(c)
def get_shear_reduced_slab(slab):
"""
Reduce the vectors of the slab plane according to the algorithm in
substrate_analyzer, then make a new Slab with a Lattice with those
reduced vectors.
Args:
slab (Slab): Slab to reduce
Returns:
Slab object of identical structure to the input slab
but rduced in-plane lattice vectors
"""
reduced_vectors = reduce_vectors(
slab.lattice.matrix[0],
slab.lattice.matrix[1])
new_lattice = Lattice([reduced_vectors[0], reduced_vectors[1], slab.lattice.matrix[2]])
return Slab(lattice=new_lattice, species=slab.species,
coords=slab.cart_coords,
miller_index=slab.miller_index,
oriented_unit_cell=slab.oriented_unit_cell,
shift=slab.shift,
scale_factor=slab.scale_factor,
coords_are_cartesian=True, energy=slab.energy,
reorient_lattice=slab.reorient_lattice,
to_unit_cell=True)
| mit |
herilalaina/scikit-learn | examples/svm/plot_rbf_parameters.py | 30 | 8051 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_splits`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
# #############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
# #############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
# #############################################################################
# Visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r,
edgecolors='k')
plt.xticks(())
plt.yticks(())
plt.axis('tight')
scores = grid.cv_results_['mean_test_score'].reshape(len(C_range),
len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
llhe/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 130 | 9577 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
| apache-2.0 |
mpharrigan/mixtape | msmbuilder/tests/test_preprocessing.py | 3 | 8031 | import numpy as np
from numpy.testing.decorators import skipif
try:
from sklearn.preprocessing import (FunctionTransformer as
FunctionTransformerR)
from msmbuilder.preprocessing import FunctionTransformer
HAVE_FT = True
except:
HAVE_FT = False
try:
from sklearn.preprocessing import MinMaxScaler as MinMaxScalerR
from msmbuilder.preprocessing import MinMaxScaler
HAVE_MMS = True
except:
HAVE_MMS = False
try:
from sklearn.preprocessing import MaxAbsScaler as MaxAbsScalerR
from msmbuilder.preprocessing import MaxAbsScaler
HAVE_MAS = True
except:
HAVE_MAS = False
try:
from sklearn.preprocessing import RobustScaler as RobustScalerR
from msmbuilder.preprocessing import RobustScaler
HAVE_RS = True
except:
HAVE_RS = False
try:
from sklearn.preprocessing import StandardScaler as StandardScalerR
from msmbuilder.preprocessing import StandardScaler
HAVE_SS = True
except:
HAVE_SS = False
from sklearn.preprocessing import (Binarizer as BinarizerR,
Imputer as ImputerR,
KernelCenterer as KernelCentererR,
LabelBinarizer as LabelBinarizerR,
MultiLabelBinarizer as MultiLabelBinarizerR,
Normalizer as NormalizerR,
PolynomialFeatures as PolynomialFeaturesR)
from ..preprocessing import (Binarizer, Imputer, KernelCenterer,
LabelBinarizer, MultiLabelBinarizer,
Normalizer, PolynomialFeatures, Butterworth,
EWMA, DoubleEWMA)
random = np.random.RandomState(42)
trajs = [random.randn(100, 3) for _ in range(5)]
labels = [random.randint(low=0, high=5, size=100).reshape(-1, 1)
for _ in range(5)]
def test_butterworth():
butterworth = Butterworth()
y1 = butterworth.transform(trajs)
assert len(y1) == len(trajs)
assert any(np.abs(y1[0] - trajs[0]).ravel() > 1E-5)
def test_ewma():
ewma = EWMA(span=5)
y1 = ewma.transform(trajs)
assert len(y1) == len(trajs)
assert any(np.abs(y1[0] - trajs[0]).ravel() > 1E-5)
def test_doubleewma():
dewma = DoubleEWMA(span=5)
y1 = dewma.transform(trajs)
assert len(y1) == len(trajs)
assert any(np.abs(y1[0] - trajs[0]).ravel() > 1E-5)
def test_binarizer_vs_sklearn():
# Compare msmbuilder.preprocessing.Binarizer
# with sklearn.preprocessing.Binarizer
binarizerr = BinarizerR()
binarizerr.fit(np.concatenate(trajs))
binarizer = Binarizer()
binarizer.fit(trajs)
y_ref1 = binarizerr.transform(trajs[0])
y1 = binarizer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_FT, 'this test requires sklearn >0.17.0')
def test_functiontransformer_vs_sklearn():
# Compare msmbuilder.preprocessing.FunctionTransformer
# with sklearn.preprocessing.FunctionTransformer
functiontransformerr = FunctionTransformerR()
functiontransformerr.fit(np.concatenate(trajs))
functiontransformer = FunctionTransformer()
functiontransformer.fit(trajs)
y_ref1 = functiontransformerr.transform(trajs[0])
y1 = functiontransformer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_imputer_vs_sklearn():
# Compare msmbuilder.preprocessing.Imputer
# with sklearn.preprocessing.Imputer
imputerr = ImputerR()
imputerr.fit(np.concatenate(trajs))
imputer = Imputer()
imputer.fit(trajs)
y_ref1 = imputerr.transform(trajs[0])
y1 = imputer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_kernelcenterer_vs_sklearn():
# Compare msmbuilder.preprocessing.KernelCenterer
# with sklearn.preprocessing.KernelCenterer
kernelcentererr = KernelCentererR()
kernelcentererr.fit(np.concatenate(trajs))
kernelcenterer = KernelCenterer()
kernelcenterer.fit(trajs)
y_ref1 = kernelcentererr.transform(trajs[0])
y1 = kernelcenterer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_labelbinarizer_vs_sklearn():
# Compare msmbuilder.preprocessing.LabelBinarizer
# with sklearn.preprocessing.LabelBinarizer
labelbinarizerr = LabelBinarizerR()
labelbinarizerr.fit(np.concatenate(labels))
labelbinarizer = LabelBinarizer()
labelbinarizer.fit(labels)
y_ref1 = labelbinarizerr.transform(labels[0])
y1 = labelbinarizer.transform(labels)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_multilabelbinarizer_vs_sklearn():
# Compare msmbuilder.preprocessing.MultiLabelBinarizer
# with sklearn.preprocessing.MultiLabelBinarizer
multilabelbinarizerr = MultiLabelBinarizerR()
multilabelbinarizerr.fit(np.concatenate(trajs))
multilabelbinarizer = MultiLabelBinarizer()
multilabelbinarizer.fit(trajs)
y_ref1 = multilabelbinarizerr.transform(trajs[0])
y1 = multilabelbinarizer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_MMS, 'this test requires sklearn >0.17.0')
def test_minmaxscaler_vs_sklearn():
# Compare msmbuilder.preprocessing.MinMaxScaler
# with sklearn.preprocessing.MinMaxScaler
minmaxscalerr = MinMaxScalerR()
minmaxscalerr.fit(np.concatenate(trajs))
minmaxscaler = MinMaxScaler()
minmaxscaler.fit(trajs)
y_ref1 = minmaxscalerr.transform(trajs[0])
y1 = minmaxscaler.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_MAS, 'this test requires sklearn >0.17.0')
def test_maxabsscaler_vs_sklearn():
# Compare msmbuilder.preprocessing.MaxAbsScaler
# with sklearn.preprocessing.MaxAbsScaler
maxabsscalerr = MaxAbsScalerR()
maxabsscalerr.fit(np.concatenate(trajs))
maxabsscaler = MaxAbsScaler()
maxabsscaler.fit(trajs)
y_ref1 = maxabsscalerr.transform(trajs[0])
y1 = maxabsscaler.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_normalizer_vs_sklearn():
# Compare msmbuilder.preprocessing.Normalizer
# with sklearn.preprocessing.Normalizer
normalizerr = NormalizerR()
normalizerr.fit(np.concatenate(trajs))
normalizer = Normalizer()
normalizer.fit(trajs)
y_ref1 = normalizerr.transform(trajs[0])
y1 = normalizer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_RS, 'this test requires sklearn >0.17.0')
def test_robustscaler_vs_sklearn():
# Compare msmbuilder.preprocessing.RobustScaler
# with sklearn.preprocessing.RobustScaler
robustscalerr = RobustScalerR()
robustscalerr.fit(np.concatenate(trajs))
robustscaler = RobustScaler()
robustscaler.fit(trajs)
y_ref1 = robustscalerr.transform(trajs[0])
y1 = robustscaler.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_SS, 'this test requires sklearn >0.17.0')
def test_standardscaler_vs_sklearn():
# Compare msmbuilder.preprocessing.StandardScaler
# with sklearn.preprocessing.StandardScaler
standardscalerr = StandardScalerR()
standardscalerr.fit(np.concatenate(trajs))
standardscaler = StandardScaler()
standardscaler.fit(trajs)
y_ref1 = standardscalerr.transform(trajs[0])
y1 = standardscaler.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_polynomialfeatures_vs_sklearn():
# Compare msmbuilder.preprocessing.PolynomialFeatures
# with sklearn.preprocessing.PolynomialFeatures
polynomialfeaturesr = PolynomialFeaturesR()
polynomialfeaturesr.fit(np.concatenate(trajs))
polynomialfeatures = PolynomialFeatures()
polynomialfeatures.fit(trajs)
y_ref1 = polynomialfeaturesr.transform(trajs[0])
y1 = polynomialfeatures.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
| lgpl-2.1 |
go-bears/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/rcsetup.py | 69 | 23344 | """
The rcsetup module contains the default values and the validation code for
customization using matplotlib's rc settings.
Each rc setting is assigned a default value and a function used to validate any
attempted changes to that setting. The default values and validation functions
are defined in the rcsetup module, and are used to construct the rcParams global
object which stores the settings and is referenced throughout matplotlib.
These default values should be consistent with the default matplotlibrc file
that actually reflects the values given here. Any additions or deletions to the
parameter set listed here should also be visited to the
:file:`matplotlibrc.template` in matplotlib's root source directory.
"""
import os
import warnings
from matplotlib.fontconfig_pattern import parse_fontconfig_pattern
from matplotlib.colors import is_color_like
#interactive_bk = ['gtk', 'gtkagg', 'gtkcairo', 'fltkagg', 'qtagg', 'qt4agg',
# 'tkagg', 'wx', 'wxagg', 'cocoaagg']
# The capitalized forms are needed for ipython at present; this may
# change for later versions.
interactive_bk = ['GTK', 'GTKAgg', 'GTKCairo', 'FltkAgg', 'MacOSX',
'QtAgg', 'Qt4Agg', 'TkAgg', 'WX', 'WXAgg', 'CocoaAgg']
non_interactive_bk = ['agg', 'cairo', 'emf', 'gdk',
'pdf', 'ps', 'svg', 'template']
all_backends = interactive_bk + non_interactive_bk
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False):
'valid is a list of legal strings'
self.key = key
self.ignorecase = ignorecase
def func(s):
if ignorecase: return s.lower()
else: return s
self.valid = dict([(func(k),k) for k in valid])
def __call__(self, s):
if self.ignorecase: s = s.lower()
if s in self.valid: return self.valid[s]
raise ValueError('Unrecognized %s string "%s": valid strings are %s'
% (self.key, s, self.valid.values()))
def validate_path_exists(s):
'If s is a path, return s, else False'
if os.path.exists(s): return s
else:
raise RuntimeError('"%s" should be a path but it does not exist'%s)
def validate_bool(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_bool_maybe_none(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b=='none': return None
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_float(s):
'convert s to float or raise'
try: return float(s)
except ValueError:
raise ValueError('Could not convert "%s" to float' % s)
def validate_int(s):
'convert s to int or raise'
try: return int(s)
except ValueError:
raise ValueError('Could not convert "%s" to int' % s)
def validate_fonttype(s):
'confirm that this is a Postscript of PDF font type that we know how to convert to'
fonttypes = { 'type3': 3,
'truetype': 42 }
try:
fonttype = validate_int(s)
except ValueError:
if s.lower() in fonttypes.keys():
return fonttypes[s.lower()]
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.keys())
else:
if fonttype not in fonttypes.values():
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.values())
return fonttype
#validate_backend = ValidateInStrings('backend', all_backends, ignorecase=True)
_validate_standard_backends = ValidateInStrings('backend', all_backends, ignorecase=True)
def validate_backend(s):
if s.startswith('module://'): return s
else: return _validate_standard_backends(s)
validate_numerix = ValidateInStrings('numerix',[
'Numeric','numarray','numpy',
], ignorecase=True)
validate_toolbar = ValidateInStrings('toolbar',[
'None','classic','toolbar2',
], ignorecase=True)
def validate_autolayout(v):
if v:
warnings.warn("figure.autolayout is not currently supported")
class validate_nseq_float:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n floats or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [float(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to floats')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [float(val) for val in s]
class validate_nseq_int:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n ints or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [int(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to ints')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [int(val) for val in s]
def validate_color(s):
'return a valid color arg'
if s.lower() == 'none':
return 'None'
if is_color_like(s):
return s
stmp = '#' + s
if is_color_like(stmp):
return stmp
# If it is still valid, it must be a tuple.
colorarg = s
msg = ''
if s.find(',')>=0:
# get rid of grouping symbols
stmp = ''.join([ c for c in s if c.isdigit() or c=='.' or c==','])
vals = stmp.split(',')
if len(vals)!=3:
msg = '\nColor tuples must be length 3'
else:
try:
colorarg = [float(val) for val in vals]
except ValueError:
msg = '\nCould not convert all entries to floats'
if not msg and is_color_like(colorarg):
return colorarg
raise ValueError('%s does not look like a color arg%s'%(s, msg))
def validate_stringlist(s):
'return a list'
if type(s) is str:
return [ v.strip() for v in s.split(',') ]
else:
assert type(s) in [list,tuple]
return [ str(v) for v in s ]
validate_orientation = ValidateInStrings('orientation',[
'landscape', 'portrait',
])
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid aspect specification')
def validate_fontsize(s):
if type(s) is str:
s = s.lower()
if s in ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large',
'xx-large', 'smaller', 'larger']:
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid font size')
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s
validate_fontset = ValidateInStrings('fontset', ['cm', 'stix', 'stixsans', 'custom'])
validate_verbose = ValidateInStrings('verbose',[
'silent', 'helpful', 'debug', 'debug-annoying',
])
validate_cairo_format = ValidateInStrings('cairo_format',
['png', 'ps', 'pdf', 'svg'],
ignorecase=True)
validate_ps_papersize = ValidateInStrings('ps_papersize',[
'auto', 'letter', 'legal', 'ledger',
'a0', 'a1', 'a2','a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10',
'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10',
], ignorecase=True)
def validate_ps_distiller(s):
if type(s) is str:
s = s.lower()
if s in ('none',None):
return None
elif s in ('false', False):
return False
elif s in ('ghostscript', 'xpdf'):
return s
else:
raise ValueError('matplotlibrc ps.usedistiller must either be none, ghostscript or xpdf')
validate_joinstyle = ValidateInStrings('joinstyle',['miter', 'round', 'bevel'], ignorecase=True)
validate_capstyle = ValidateInStrings('capstyle',['butt', 'round', 'projecting'], ignorecase=True)
validate_negative_linestyle = ValidateInStrings('negative_linestyle',['solid', 'dashed'], ignorecase=True)
def validate_negative_linestyle_legacy(s):
try:
res = validate_negative_linestyle(s)
return res
except ValueError:
dashes = validate_nseq_float(2)(s)
warnings.warn("Deprecated negative_linestyle specification; use 'solid' or 'dashed'")
return (0, dashes) # (offset, (solid, blank))
validate_legend_loc = ValidateInStrings('legend_loc',[
'best',
'upper right',
'upper left',
'lower left',
'lower right',
'right',
'center left',
'center right',
'lower center',
'upper center',
'center',
], ignorecase=True)
class ValidateInterval:
"""
Value must be in interval
"""
def __init__(self, vmin, vmax, closedmin=True, closedmax=True):
self.vmin = vmin
self.vmax = vmax
self.cmin = closedmin
self.cmax = closedmax
def __call__(self, s):
try: s = float(s)
except: raise RuntimeError('Value must be a float; found "%s"'%s)
if self.cmin and s<self.vmin:
raise RuntimeError('Value must be >= %f; found "%f"'%(self.vmin, s))
elif not self.cmin and s<=self.vmin:
raise RuntimeError('Value must be > %f; found "%f"'%(self.vmin, s))
if self.cmax and s>self.vmax:
raise RuntimeError('Value must be <= %f; found "%f"'%(self.vmax, s))
elif not self.cmax and s>=self.vmax:
raise RuntimeError('Value must be < %f; found "%f"'%(self.vmax, s))
return s
# a map from key -> value, converter
defaultParams = {
'backend' : ['Agg', validate_backend], # agg is certainly present
'backend_fallback' : [True, validate_bool], # agg is certainly present
'numerix' : ['numpy', validate_numerix],
'maskedarray' : [False, validate_bool],
'toolbar' : ['toolbar2', validate_toolbar],
'datapath' : [None, validate_path_exists], # handled by _get_data_path_cached
'units' : [False, validate_bool],
'interactive' : [False, validate_bool],
'timezone' : ['UTC', str],
# the verbosity setting
'verbose.level' : ['silent', validate_verbose],
'verbose.fileo' : ['sys.stdout', str],
# line props
'lines.linewidth' : [1.0, validate_float], # line width in points
'lines.linestyle' : ['-', str], # solid line
'lines.color' : ['b', validate_color], # blue
'lines.marker' : ['None', str], # black
'lines.markeredgewidth' : [0.5, validate_float],
'lines.markersize' : [6, validate_float], # markersize, in points
'lines.antialiased' : [True, validate_bool], # antialised (no jaggies)
'lines.dash_joinstyle' : ['miter', validate_joinstyle],
'lines.solid_joinstyle' : ['miter', validate_joinstyle],
'lines.dash_capstyle' : ['butt', validate_capstyle],
'lines.solid_capstyle' : ['projecting', validate_capstyle],
# patch props
'patch.linewidth' : [1.0, validate_float], # line width in points
'patch.edgecolor' : ['k', validate_color], # black
'patch.facecolor' : ['b', validate_color], # blue
'patch.antialiased' : [True, validate_bool], # antialised (no jaggies)
# font props
'font.family' : ['sans-serif', str], # used by text object
'font.style' : ['normal', str], #
'font.variant' : ['normal', str], #
'font.stretch' : ['normal', str], #
'font.weight' : ['normal', str], #
'font.size' : [12.0, validate_float], #
'font.serif' : [['Bitstream Vera Serif', 'DejaVu Serif',
'New Century Schoolbook', 'Century Schoolbook L',
'Utopia', 'ITC Bookman', 'Bookman',
'Nimbus Roman No9 L','Times New Roman',
'Times','Palatino','Charter','serif'],
validate_stringlist],
'font.sans-serif' : [['Bitstream Vera Sans', 'DejaVu Sans',
'Lucida Grande', 'Verdana', 'Geneva', 'Lucid',
'Arial', 'Helvetica', 'Avant Garde', 'sans-serif'],
validate_stringlist],
'font.cursive' : [['Apple Chancery','Textile','Zapf Chancery',
'Sand','cursive'], validate_stringlist],
'font.fantasy' : [['Comic Sans MS','Chicago','Charcoal','Impact'
'Western','fantasy'], validate_stringlist],
'font.monospace' : [['Bitstream Vera Sans Mono', 'DejaVu Sans Mono',
'Andale Mono', 'Nimbus Mono L', 'Courier New',
'Courier','Fixed', 'Terminal','monospace'],
validate_stringlist],
# text props
'text.color' : ['k', validate_color], # black
'text.usetex' : [False, validate_bool],
'text.latex.unicode' : [False, validate_bool],
'text.latex.preamble' : [[''], validate_stringlist],
'text.dvipnghack' : [None, validate_bool_maybe_none],
'text.fontstyle' : ['normal', str],
'text.fontangle' : ['normal', str],
'text.fontvariant' : ['normal', str],
'text.fontweight' : ['normal', str],
'text.fontsize' : ['medium', validate_fontsize],
'mathtext.cal' : ['cursive', validate_font_properties],
'mathtext.rm' : ['serif', validate_font_properties],
'mathtext.tt' : ['monospace', validate_font_properties],
'mathtext.it' : ['serif:italic', validate_font_properties],
'mathtext.bf' : ['serif:bold', validate_font_properties],
'mathtext.sf' : ['sans\-serif', validate_font_properties],
'mathtext.fontset' : ['cm', validate_fontset],
'mathtext.fallback_to_cm' : [True, validate_bool],
'image.aspect' : ['equal', validate_aspect], # equal, auto, a number
'image.interpolation' : ['bilinear', str],
'image.cmap' : ['jet', str], # one of gray, jet, etc
'image.lut' : [256, validate_int], # lookup table
'image.origin' : ['upper', str], # lookup table
'image.resample' : [False, validate_bool],
'contour.negative_linestyle' : ['dashed', validate_negative_linestyle_legacy],
# axes props
'axes.axisbelow' : [False, validate_bool],
'axes.hold' : [True, validate_bool],
'axes.facecolor' : ['w', validate_color], # background color; white
'axes.edgecolor' : ['k', validate_color], # edge color; black
'axes.linewidth' : [1.0, validate_float], # edge linewidth
'axes.titlesize' : ['large', validate_fontsize], # fontsize of the axes title
'axes.grid' : [False, validate_bool], # display grid or not
'axes.labelsize' : ['medium', validate_fontsize], # fontsize of the x any y labels
'axes.labelcolor' : ['k', validate_color], # color of axis label
'axes.formatter.limits' : [[-7, 7], validate_nseq_int(2)],
# use scientific notation if log10
# of the axis range is smaller than the
# first or larger than the second
'axes.unicode_minus' : [True, validate_bool],
'polaraxes.grid' : [True, validate_bool], # display polar grid or not
#legend properties
'legend.fancybox' : [False,validate_bool],
'legend.loc' : ['upper right',validate_legend_loc], # at some point, this should be changed to 'best'
'legend.isaxes' : [True,validate_bool], # this option is internally ignored - it never served any useful purpose
'legend.numpoints' : [2, validate_int], # the number of points in the legend line
'legend.fontsize' : ['large', validate_fontsize],
'legend.pad' : [0, validate_float], # was 0.2, deprecated; the fractional whitespace inside the legend border
'legend.borderpad' : [0.4, validate_float], # units are fontsize
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.02, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
'legend.labelspacing' : [0.5, validate_float], # the vertical space between the legend entries
'legend.handlelength' : [2., validate_float], # the length of the legend lines
'legend.handletextpad' : [.8, validate_float], # the space between the legend line and legend text
'legend.borderaxespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.columnspacing' : [2., validate_float], # the border between the axes and legend edge
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
# tick properties
'xtick.major.size' : [4, validate_float], # major xtick size in points
'xtick.minor.size' : [2, validate_float], # minor xtick size in points
'xtick.major.pad' : [4, validate_float], # distance to label in points
'xtick.minor.pad' : [4, validate_float], # distance to label in points
'xtick.color' : ['k', validate_color], # color of the xtick labels
'xtick.labelsize' : ['medium', validate_fontsize], # fontsize of the xtick labels
'xtick.direction' : ['in', str], # direction of xticks
'ytick.major.size' : [4, validate_float], # major ytick size in points
'ytick.minor.size' : [2, validate_float], # minor ytick size in points
'ytick.major.pad' : [4, validate_float], # distance to label in points
'ytick.minor.pad' : [4, validate_float], # distance to label in points
'ytick.color' : ['k', validate_color], # color of the ytick labels
'ytick.labelsize' : ['medium', validate_fontsize], # fontsize of the ytick labels
'ytick.direction' : ['in', str], # direction of yticks
'grid.color' : ['k', validate_color], # grid color
'grid.linestyle' : [':', str], # dotted
'grid.linewidth' : [0.5, validate_float], # in points
# figure props
# figure size in inches: width by height
'figure.figsize' : [ [8.0,6.0], validate_nseq_float(2)],
'figure.dpi' : [ 80, validate_float], # DPI
'figure.facecolor' : [ '0.75', validate_color], # facecolor; scalar gray
'figure.edgecolor' : [ 'w', validate_color], # edgecolor; white
'figure.autolayout' : [ False, validate_autolayout],
'figure.subplot.left' : [0.125, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.right' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.bottom' : [0.1, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.top' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.wspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'figure.subplot.hspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'savefig.dpi' : [100, validate_float], # DPI
'savefig.facecolor' : ['w', validate_color], # facecolor; white
'savefig.edgecolor' : ['w', validate_color], # edgecolor; white
'savefig.orientation' : ['portrait', validate_orientation], # edgecolor; white
'cairo.format' : ['png', validate_cairo_format],
'tk.window_focus' : [False, validate_bool], # Maintain shell focus for TkAgg
'tk.pythoninspect' : [False, validate_bool], # Set PYTHONINSPECT
'ps.papersize' : ['letter', validate_ps_papersize], # Set the papersize/type
'ps.useafm' : [False, validate_bool], # Set PYTHONINSPECT
'ps.usedistiller' : [False, validate_ps_distiller], # use ghostscript or xpdf to distill ps output
'ps.distiller.res' : [6000, validate_int], # dpi
'ps.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'pdf.compression' : [6, validate_int], # compression level from 0 to 9; 0 to disable
'pdf.inheritcolor' : [False, validate_bool], # ignore any color-setting commands from the frontend
'pdf.use14corefonts' : [False, validate_bool], # use only the 14 PDF core fonts
# embedded in every PDF viewing application
'pdf.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'svg.image_inline' : [True, validate_bool], # write raster image data directly into the svg file
'svg.image_noscale' : [False, validate_bool], # suppress scaling of raster data embedded in SVG
'svg.embed_char_paths' : [True, validate_bool], # True to save all characters as paths in the SVG
'docstring.hardcopy' : [False, validate_bool], # set this when you want to generate hardcopy docstring
'plugins.directory' : ['.matplotlib_plugins', str], # where plugin directory is locate
'path.simplify' : [False, validate_bool],
'agg.path.chunksize' : [0, validate_int] # 0 to disable chunking;
# recommend about 20000 to
# enable. Experimental.
}
if __name__ == '__main__':
rc = defaultParams
rc['datapath'][0] = '/'
for key in rc:
if not rc[key][1](rc[key][0]) == rc[key][0]:
print "%s: %s != %s"%(key, rc[key][1](rc[key][0]), rc[key][0])
| agpl-3.0 |
sgenoud/scikit-learn | sklearn/feature_selection/tests/test_selector_mixin.py | 1 | 1076 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_true
from sklearn.utils.testing import assert_less
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, seed=0)):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_true(np.mean(pred == iris.target) >= 0.7)
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 78 | 6016 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
from sklearn.ensemble.gradient_boosting import QuantileLossFunction
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float64)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float64)
y.fill(0.0)
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float64)
y.fill(1.0)
sw = np.ones(102, dtype=np.float64)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_quantile_loss_function():
# Non regression test for the QuantileLossFunction object
# There was a sign problem when evaluating the function
# for negative values of 'ytrue - ypred'
x = np.asarray([-1.0, 0.0, 1.0])
y_found = QuantileLossFunction(1, 0.9)(x, np.zeros_like(x))
y_expected = np.asarray([0.1, 0.0, 0.9]).mean()
np.testing.assert_allclose(y_found, y_expected)
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
mwil/collision | figs/ber_contour_AsAu/style_ber_contour_AsAu.py | 1 | 1412 | # Copyright 2013-2014 Matthias Wilhelm
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib as mpl
import matplotlib.pyplot as plt
class Style(object):
def __init__(self):
pass
class Style1col(Style):
def apply(self, mode, content, wide):
mpl.rc_file('../rc/1fig-contour-rc.txt')
def annotate(self, mode, content, wide):
if wide:
self._annotate_wide()
else:
self._annotate()
def _annotate(self):
plt.annotate(r'$\delta_\mathrm{SIR}$', xy=(-0.25, 1), xytext=(-1.25, 3), color='w', fontsize=8,
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=-0.2', color='w'))
def _annotate_wide(self):
plt.annotate(r'$\delta_\mathrm{SIR}$', xy=(-0.25, 1), xytext=(-2.75, 1.35), color='w', fontsize=8,
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=-0.2', color='w'))
| gpl-3.0 |
KaliLab/optimizer | optimizer/graphic.py | 1 | 113966 | import sys
from traceHandler import sizeError
try:
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
except RuntimeError as re:
print(re)
sys.exit()
#from inspyred.ec import analysis
from inspyred.ec.analysis import generation_plot
import inspyred
import matplotlib.pyplot as plt
import os
from copy import copy
import Core
import numpy
import os.path
from functools import partial
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QFileDialog , QTableWidgetItem , QSizePolicy , QVBoxLayout, QGroupBox
from PyQt5.QtGui import *
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
def popup(message):
"""
Implements modal message dialog from the PyQT package.
:param message: the string displayed in the window
"""
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(message)
msg.setInformativeText("")
msg.setWindowTitle("Warning")
msg.exec()
class Ui_Neuroptimus(object):
def __init__(self,*args):
super().__init__(*args)
def setupUi(self, Neuroptimus):
"""
Implements the widgets from the PyQT package.
"""
Neuroptimus.setObjectName("Neuroptimus")
Neuroptimus.resize(800, 589)
Neuroptimus.setSizePolicy(QtWidgets.QSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding))
self.centralwidget = QtWidgets.QWidget(Neuroptimus)
self.centralwidget.setObjectName("centralwidget")
Neuroptimus.setCentralWidget(self.centralwidget)
self.laybox = QtWidgets.QVBoxLayout(self.centralwidget)
self.tabwidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabwidget.setGeometry(QtCore.QRect(0, 0, 771, 551))
self.tabwidget.setObjectName("tabwidget")
self.laybox.addWidget(self.tabwidget)
self.tabwidget.setSizePolicy(QtWidgets.QSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding))
#filetab 1
self.filetab = QtWidgets.QWidget()
self.filetab.setObjectName("filetab")
self.intvalidator = QIntValidator()
self.size_ctrl = QtWidgets.QLineEdit(self.filetab)
self.size_ctrl.setGeometry(QtCore.QRect(10, 230, 221, 22))
self.size_ctrl.setObjectName("size_ctrl")
self.size_ctrl.setValidator(self.intvalidator)
self.doublevalidator = QDoubleValidator()
self.length_ctrl = QtWidgets.QLineEdit(self.filetab)
self.length_ctrl.setGeometry(QtCore.QRect(10, 280, 221, 22))
self.length_ctrl.setObjectName("length_ctrl")
self.length_ctrl.setValidator(self.doublevalidator)
self.freq_ctrl = QtWidgets.QLineEdit(self.filetab)
self.freq_ctrl.setGeometry(QtCore.QRect(10, 330, 221, 22))
self.freq_ctrl.setObjectName("freq_ctrl")
self.freq_ctrl.setValidator(self.doublevalidator)
self.label_3 = QtWidgets.QLabel(self.filetab)
self.label_3.setGeometry(QtCore.QRect(10, 130, 200, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.filetab)
self.label_4.setGeometry(QtCore.QRect(10, 260, 250, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.filetab)
self.label_5.setGeometry(QtCore.QRect(10, 210, 250, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_7 = QtWidgets.QLabel(self.filetab)
self.label_7.setGeometry(QtCore.QRect(250, 210, 120, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.pushButton_3 = QtWidgets.QPushButton(self.filetab)
self.pushButton_3.setGeometry(QtCore.QRect(10, 400, 80, 22))
self.pushButton_3.setObjectName("pushButton_3")
self.label_2 = QtWidgets.QLabel(self.filetab)
self.label_2.setGeometry(QtCore.QRect(10, 80, 180, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.base_dir_controll = QtWidgets.QPushButton(self.filetab)
self.base_dir_controll.setGeometry(QtCore.QRect(240, 150, 80, 22))
self.base_dir_controll.setObjectName("base_dir_controll")
self.label_6 = QtWidgets.QLabel(self.filetab)
self.label_6.setGeometry(QtCore.QRect(10, 310, 320, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.lineEdit_folder = QtWidgets.QLineEdit(self.filetab)
self.lineEdit_folder.setGeometry(QtCore.QRect(10, 150, 221, 22))
self.lineEdit_folder.setObjectName("lineEdit_2")
self.type_selector = QtWidgets.QComboBox(self.filetab)
self.type_selector.setGeometry(QtCore.QRect(500, 100, 120, 22))
self.type_selector.setObjectName("type_selector")
self.type_selector.addItem("")
self.type_selector.addItem("")
self.type_selector.addItem("")
self.type_selector.addItem("")
self.input_file_controll = QtWidgets.QPushButton(self.filetab)
self.input_file_controll.setGeometry(QtCore.QRect(240, 100, 80, 22))
self.input_file_controll.setObjectName("pushButton")
self.time_checker = QtWidgets.QCheckBox(self.filetab)
self.time_checker.setGeometry(QtCore.QRect(340, 100, 121, 20))
self.time_checker.setObjectName("time_checker")
self.dropdown = QtWidgets.QComboBox(self.filetab)
self.dropdown.setGeometry(QtCore.QRect(240, 230, 61, 22))
self.dropdown.setObjectName("dropdown")
self.dropdown.addItem("uV")
self.dropdown.addItem("mV")
self.dropdown.addItem("V")
self.lineEdit_file = QtWidgets.QLineEdit(self.filetab)
self.lineEdit_file.setGeometry(QtCore.QRect(10, 100, 221, 22))
self.lineEdit_file.setObjectName("lineEdit")
self.model = QStandardItemModel(0, 1)
self.widget = QtWidgets.QWidget(self.filetab)
self.widget.setGeometry(QtCore.QRect(290, 270, 331, 200))
self.widget.setObjectName("widget")
self.input_tree = QtWidgets.QScrollArea(self.filetab)
self.input_tree.setGeometry(QtCore.QRect(370, 130, 250, 100))
self.input_label = QtWidgets.QLabel(self.filetab)
self.input_label.setGeometry(QtCore.QRect(370, 130, 250, 100))
#model tab
self.tabwidget.addTab(self.filetab, "")
self.modeltab = QtWidgets.QWidget()
self.modeltab.setObjectName("modeltab")
self.load_mods_checkbox = QtWidgets.QCheckBox(self.modeltab)
self.load_mods_checkbox.setGeometry(QtCore.QRect(10, 130, 20, 20))
self.label_23 = QtWidgets.QLabel(self.modeltab)
self.label_23.setGeometry(QtCore.QRect(30, 130, 240, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_23.setFont(font)
self.label_23.setObjectName("label_23")
self.label_24 = QtWidgets.QLabel(self.modeltab)
self.label_24.setGeometry(QtCore.QRect(10, 80, 180, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_24.setFont(font)
self.label_24.setObjectName("label_24")
self.pushButton_12 = QtWidgets.QPushButton(self.modeltab)
self.pushButton_12.setGeometry(QtCore.QRect(150, 50, 140, 22))
self.pushButton_12.setObjectName("pushButton_12")
self.pushButton_13 = QtWidgets.QPushButton(self.modeltab)
self.pushButton_13.setGeometry(QtCore.QRect(330, 100, 80, 22))
self.pushButton_13.setObjectName("pushButton_13")
self.lineEdit_file2 = QtWidgets.QLineEdit(self.modeltab)
self.lineEdit_file2.setGeometry(QtCore.QRect(10, 100, 221, 22))
self.lineEdit_file2.setObjectName("lineEdit_file2")
self.modellist = QtWidgets.QTableWidget(self.modeltab)
self.modellist.setGeometry(QtCore.QRect(10, 200, 441, 261))
self.modellist.setObjectName("modellist")
self.pushButton_14 = QtWidgets.QPushButton(self.modeltab)
self.pushButton_14.setGeometry(QtCore.QRect(240, 150, 80, 22))
self.pushButton_14.setObjectName("pushButton_14")
self.pushButton_15 = QtWidgets.QPushButton(self.modeltab)
self.pushButton_15.setGeometry(QtCore.QRect(240, 100, 80, 22))
self.pushButton_15.setObjectName("pushButton_15")
self.pushButton_16 = QtWidgets.QPushButton(self.modeltab)
self.pushButton_16.setGeometry(QtCore.QRect(460, 200, 111, 22))
self.pushButton_16.setObjectName("pushButton_16")
font = QtGui.QFont()
font.setFamily("Ubuntu")
self.label_26 = QtWidgets.QLabel(self.modeltab)
self.label_26.setGeometry(QtCore.QRect(10, 80, 300, 16))
font.setWeight(50)
self.label_26.setFont(font)
self.label_26.setObjectName("label_26")
self.label_27 = QtWidgets.QLabel(self.modeltab)
self.label_27.setGeometry(QtCore.QRect(10, 130, 300, 16))
font.setWeight(50)
self.label_27.setFont(font)
self.label_27.setObjectName("label_26")
font.setWeight(75)
self.dd_type = QtWidgets.QComboBox(self.modeltab)
self.dd_type.setGeometry(QtCore.QRect(10, 50, 121, 23))
self.dd_type.setObjectName("dd_type")
self.dd_type.addItem("Neuron")
self.dd_type.addItem("External (Python)")
self.dd_type.addItem("External")
self.dd_type.currentIndexChanged.connect(self.sim_plat)
self.lineEdit_folder2 = QtWidgets.QLineEdit(self.modeltab)
self.lineEdit_folder2.setGeometry(QtCore.QRect(10, 150, 221, 22))
self.lineEdit_folder2.setObjectName("lineEdit_folder2")
self.sim_path = QtWidgets.QLineEdit(self.modeltab)
self.sim_path.setGeometry(QtCore.QRect(10, 100, 301, 22))
self.sim_path.setObjectName("sim_path")
self.sim_path.hide()
self.sim_param = QtWidgets.QLineEdit(self.modeltab)
self.sim_param.setGeometry(QtCore.QRect(10, 150, 50, 22))
self.sim_param.setObjectName("sim_param")
self.sim_param.hide()
self.setter = QtWidgets.QPushButton(self.modeltab)
self.setter.setGeometry(QtCore.QRect(460, 250, 80, 22))
self.setter.setObjectName("setter")
self.remover = QtWidgets.QPushButton(self.modeltab)
self.remover.setGeometry(QtCore.QRect(460, 280, 80, 22))
self.remover.setObjectName("remover")
self.remover.setEnabled(False)
#sim tab 3
self.tabwidget.addTab(self.modeltab, "")
self.simtab = QtWidgets.QWidget()
self.simtab.setObjectName("simtab")
self.param_to_record = QtWidgets.QComboBox((self.simtab))
self.param_to_record.setGeometry(QtCore.QRect(220, 100, 121, 23))
self.param_to_record.setObjectName("parameter to record")
self.label_44 = QtWidgets.QLabel(self.simtab)
self.label_44.setGeometry(QtCore.QRect(10, 220, 111, 16))
font.setWeight(50)
self.label_44.setFont(font)
self.label_44.setObjectName("label_44")
self.label_66 = QtWidgets.QLabel(self.simtab)
self.label_66.setGeometry(QtCore.QRect(420, 80, 200, 16))
self.label_66.setFont(font)
self.label_66.setObjectName("label_66")
self.label_67 = QtWidgets.QLabel(self.simtab)
self.label_67.setGeometry(QtCore.QRect(420, 130, 200, 16))
self.label_67.setFont(font)
self.label_67.setObjectName("label_67")
self.label_45 = QtWidgets.QLabel(self.simtab)
self.label_45.setGeometry(QtCore.QRect(10, 320, 200, 16))
self.label_45.setFont(font)
self.label_45.setObjectName("label_45")
self.lineEdit_pos = QtWidgets.QLineEdit(self.simtab)
self.lineEdit_pos.setGeometry(QtCore.QRect(220, 200, 113, 22))
self.lineEdit_pos.setObjectName("position")
self.lineEdit_pos.setValidator(self.doublevalidator)
self.section_stim = QtWidgets.QComboBox(self.simtab)
self.section_stim.setGeometry(QtCore.QRect(10, 340, 121, 23))
self.section_stim.setObjectName("section stim")
self.label_46 = QtWidgets.QLabel(self.simtab)
self.label_46.setGeometry(QtCore.QRect(10, 270, 200, 16))
self.label_46.setFont(font)
self.label_46.setObjectName("label_46")
self.stimprot = QtWidgets.QComboBox(self.simtab)
self.stimprot.setGeometry(QtCore.QRect(10, 100, 121, 23))
self.stimprot.setObjectName("stimprot")
self.stimulus_type = QtWidgets.QComboBox(self.simtab)
self.stimulus_type.setGeometry(QtCore.QRect(10, 150, 121, 23))
self.stimulus_type.setObjectName("stimulus type")
self.label_71 = QtWidgets.QLabel(self.simtab)
self.label_71.setGeometry(QtCore.QRect(10, 370, 300, 16))
self.label_71.setFont(font)
self.label_71.setObjectName("label_71")
self.lineEdit_posins = QtWidgets.QLineEdit(self.simtab)
self.lineEdit_posins.setGeometry(QtCore.QRect(10, 390, 113, 22))
self.lineEdit_posins.setObjectName("posinside")
self.lineEdit_posins.setValidator(self.doublevalidator)
self.lineEdit_duration = QtWidgets.QLineEdit(self.simtab)
self.lineEdit_duration.setGeometry(QtCore.QRect(10, 290, 113, 22))
self.lineEdit_duration.setObjectName("duration")
self.lineEdit_duration.setValidator(self.doublevalidator)
font.setWeight(75)
self.base_dir_controll9 = QtWidgets.QPushButton(self.simtab)
self.base_dir_controll9.setGeometry(QtCore.QRect(10, 180, 115, 22))
self.base_dir_controll9.setObjectName("base_dir_controll9")
self.label_48 = QtWidgets.QLabel(self.simtab)
self.label_48.setGeometry(QtCore.QRect(220, 130, 200, 16))
font.setWeight(50)
self.label_48.setFont(font)
self.label_48.setObjectName("label_48")
self.lineEdit_tstop = QtWidgets.QLineEdit(self.simtab)
self.lineEdit_tstop.setGeometry(QtCore.QRect(420, 150, 113, 22))
self.lineEdit_tstop.setObjectName("tstop")
self.lineEdit_tstop.setValidator(self.doublevalidator)
self.label_49 = QtWidgets.QLabel(self.simtab)
self.label_49.setGeometry(QtCore.QRect(10, 130, 200, 16))
font.setWeight(50)
self.label_49.setFont(font)
self.label_49.setObjectName("label_49")
self.label_68 = QtWidgets.QLabel(self.simtab)
self.label_68.setGeometry(QtCore.QRect(420, 180, 200, 16))
font.setWeight(50)
self.label_68.setFont(font)
self.label_68.setObjectName("label_68")
font.setWeight(75)
self.lineEdit_delay = QtWidgets.QLineEdit(self.simtab)
self.lineEdit_delay.setGeometry(QtCore.QRect(10, 240, 113, 22))
self.lineEdit_delay.setObjectName("Delay")
self.lineEdit_delay.setValidator(self.doublevalidator)
self.label_51 = QtWidgets.QLabel(self.simtab)
self.label_51.setGeometry(QtCore.QRect(220, 180, 200, 16))
font.setWeight(50)
self.label_51.setFont(font)
self.label_51.setObjectName("label_51")
self.label_52 = QtWidgets.QLabel(self.simtab)
self.label_52.setGeometry(QtCore.QRect(220, 80, 200, 16))
font.setWeight(50)
self.label_52.setFont(font)
self.label_52.setObjectName("label_52")
self.lineEdit_dt = QtWidgets.QLineEdit(self.simtab)
self.lineEdit_dt.setGeometry(QtCore.QRect(420, 200, 113, 22))
self.lineEdit_dt.setObjectName("lineEdit_dt")
self.lineEdit_dt.setValidator(self.doublevalidator)
self.section_rec = QtWidgets.QComboBox(self.simtab)
self.section_rec.setGeometry(QtCore.QRect(220, 150, 121, 23))
self.section_rec.setObjectName("section")
self.lineEdit_initv = QtWidgets.QLineEdit(self.simtab)
self.lineEdit_initv.setGeometry(QtCore.QRect(420, 100, 113, 22))
self.lineEdit_initv.setObjectName("initv")
self.lineEdit_initv.setValidator(self.doublevalidator)
self.label_55 = QtWidgets.QLabel(self.simtab)
self.label_55.setGeometry(QtCore.QRect(10, 80, 200, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_55.setFont(font)
self.label_55.setObjectName("label_55")
#fit tab 4
self.tabwidget.addTab(self.simtab, "")
self.fittab = QtWidgets.QWidget()
self.fittab.setObjectName("fittab")
self.label_56 = QtWidgets.QLabel(self.fittab)
self.label_56.setGeometry(QtCore.QRect(10, 50, 270, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(11)
font.setWeight(50)
self.label_56.setFont(font)
self.label_56.setObjectName("label_56")
self.fitlist = QtWidgets.QTableWidget(self.fittab)
self.fitlist.setGeometry(QtCore.QRect(10, 80, 301, 401))
self.fitlist.setObjectName("fitlist")
self.spike_tresh = QtWidgets.QLineEdit(self.fittab)
self.spike_tresh.setGeometry(QtCore.QRect(370,110, 113, 22))
self.spike_tresh.setObjectName("spike_tresh")
self.spike_window = QtWidgets.QLineEdit(self.fittab)
self.spike_window.setGeometry(QtCore.QRect(370, 210, 113, 22))
self.spike_window.setObjectName("spike_window")
self.label_69 = QtWidgets.QLabel(self.fittab)
self.label_69.setGeometry(QtCore.QRect(330, 90, 300, 16))
self.spike_tresh.setText("0.0")
self.spike_window.setText("1.0")
self.label_69.setFont(font)
self.label_69.setObjectName("label_69")
self.label_70 = QtWidgets.QLabel(self.fittab)
self.label_70.setGeometry(QtCore.QRect(330, 190, 300, 16))
self.label_70.setFont(font)
self.label_70.setObjectName("label_70")
self.pushButton_normalize = QtWidgets.QPushButton(self.fittab)
self.pushButton_normalize.setGeometry(QtCore.QRect(220, 50, 80, 22))
self.pushButton_normalize.setObjectName("pushButton_normalize")
self.pushButton_normalize.setText("Normalize")
#run tab 5
self.tabwidget.addTab(self.fittab, "")
self.runtab = QtWidgets.QWidget()
self.runtab.setObjectName("runtab")
self.pushButton_30 = QtWidgets.QPushButton(self.runtab)
self.pushButton_30.setGeometry(QtCore.QRect(630, 460, 80, 22))
self.pushButton_30.setObjectName("pushButton_30")
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.pushButton_31 = QtWidgets.QPushButton(self.runtab)
self.pushButton_31.setGeometry(QtCore.QRect(110, 460, 111, 22))
self.pushButton_31.setObjectName("pushButton_31")
self.pushButton_32 = QtWidgets.QPushButton(self.runtab)
self.pushButton_32.setGeometry(QtCore.QRect(10, 460, 80, 22))
self.pushButton_32.setObjectName("pushButton_32")
self.label_59 = QtWidgets.QLabel(self.runtab)
self.label_59.setGeometry(QtCore.QRect(10, 70, 200, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_59.setFont(font)
self.label_59.setObjectName("label_59")
self.pushButton_Inspyred = QtWidgets.QPushButton(self.runtab)
self.pushButton_Inspyred.setGeometry(QtCore.QRect(105, 90, 88, 32))
self.pushButton_Inspyred.setObjectName("Inspyred")
self.pushButton_Pygmo = QtWidgets.QPushButton(self.runtab)
self.pushButton_Pygmo.setGeometry(QtCore.QRect(193, 90, 88, 32))
self.pushButton_Pygmo.setObjectName("Pygmo")
self.pushButton_Bluepyopt = QtWidgets.QPushButton(self.runtab)
self.pushButton_Bluepyopt.setGeometry(QtCore.QRect(281, 90, 88, 32))
self.pushButton_Bluepyopt.setObjectName("Bluepyopt")
self.pushButton_Scipy = QtWidgets.QPushButton(self.runtab)
self.pushButton_Scipy.setGeometry(QtCore.QRect(369, 90, 82, 32))
self.pushButton_Scipy.setObjectName("Scipy")
self.pushButton_Recom = QtWidgets.QPushButton(self.runtab)
self.pushButton_Recom.setGeometry(QtCore.QRect(10, 90, 95, 32))
self.pushButton_Recom.setObjectName("Recommended")
self.algolist = QtWidgets.QTableWidget(self.runtab)
self.algolist.setGeometry(QtCore.QRect(10, 120, 441, 321))
self.algolist.setObjectName("algolist")
self.aspectlist = QtWidgets.QTableWidget(self.runtab)
self.aspectlist.setGeometry(QtCore.QRect(470, 90, 241, 351))
self.aspectlist.setObjectName("aspectlist")
self.label_60 = QtWidgets.QLabel(self.runtab)
self.label_60.setGeometry(QtCore.QRect(470, 70, 200, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_60.setFont(font)
self.label_60.setObjectName("label_60")
self.tabwidget.addTab(self.runtab, "")
self.eval_tab = QtWidgets.QWidget()
self.eval_tab.setObjectName("eval_tab")
#eval tab 6
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(11)
font.setBold(False)
font.setWeight(75)
self.label_72 = QtWidgets.QLabel(self.eval_tab)
self.label_72.setGeometry(QtCore.QRect(10, 50, 200, 16))
self.label_72.setFont(font)
self.label_72.setObjectName("label_72")
self.tabwidget.addTab(self.eval_tab, "")
self.plot_tab = QtWidgets.QWidget()
self.plot_tab.setObjectName("plot_tab")
self.widget2 = QtWidgets.QWidget(self.eval_tab)
self.widget2.setGeometry(QtCore.QRect(180, 80, 630, 400))
self.widget2.setObjectName("widget2")
self.pushButton_34 = QtWidgets.QPushButton(self.eval_tab)
self.pushButton_34.setGeometry(QtCore.QRect(50, 360, 111, 22))
self.pushButton_34.setObjectName("pushButton_34")
#plot tab 7
self.tabwidget.addTab(self.plot_tab, "")
self.pushButton_35 = QtWidgets.QPushButton(self.plot_tab)
self.pushButton_35.setGeometry(QtCore.QRect(30, 400, 111, 22))
self.pushButton_35.setObjectName("pushButton_34")
#self.pushButton_36 = QtWidgets.QPushButton(self.plot_tab) grid_plot
#self.pushButton_36.setGeometry(QtCore.QRect(150, 400, 111, 22))
#self.pushButton_36.setObjectName("pushButton_34")
self.pushButton_37 = QtWidgets.QPushButton(self.plot_tab)
self.pushButton_37.setGeometry(QtCore.QRect(300, 400, 111, 22))
self.pushButton_37.setObjectName("pushButton_34")
self.label_74 = QtWidgets.QLabel(self.plot_tab)
self.label_74.setGeometry(QtCore.QRect(10, 50, 200, 16))
self.label_74.setFont(font)
self.label_74.setObjectName("label_74")
self.errorlist = QtWidgets.QTableWidget(self.plot_tab)
self.errorlist.setGeometry(QtCore.QRect(300, 200, 350, 180))
self.errorlist.setObjectName("errorlist")
self.fitstat = QtWidgets.QLabel(self.plot_tab)
self.fitstat.setGeometry(QtCore.QRect(300, 50,200, 24))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.fitstat.setFont(font)
self.fitstat.setObjectName("label")
self.fitstat.setText(QtCore.QCoreApplication.translate("Neuroptimus", 'Fitness statistics'))
Neuroptimus.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Neuroptimus)
self.menubar.setGeometry(QtCore.QRect(0, 0, 771, 19))
self.menubar.setObjectName("menubar")
self.menuMenu = QtWidgets.QMenu(self.menubar)
self.menuMenu.setObjectName("menuMenu")
Neuroptimus.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Neuroptimus)
self.statusbar.setObjectName("statusbar")
Neuroptimus.setStatusBar(self.statusbar)
self.actionunlock = QtWidgets.QAction(Neuroptimus)
self.actionunlock.setObjectName("actionunlock")
self.actionexit = QtWidgets.QAction(Neuroptimus)
self.actionexit.setObjectName("actionexit")
self.menuMenu.addAction(self.actionunlock)
self.menubar.addAction(self.menuMenu.menuAction())
self.menuMenu.addAction(self.actionexit)
self.menubar.addAction(self.menuMenu.menuAction())
self.retranslateUi(Neuroptimus)
QtCore.QMetaObject.connectSlotsByName(Neuroptimus)
self.tabwidget.setCurrentIndex(0)
def retranslateUi(self, Neuroptimus):
"""
Set PyQT widgets behaviors and implements functions.
"""
_translate = QtCore.QCoreApplication.translate
Neuroptimus.setWindowTitle(_translate("Neuroptimus", "Neuroptimus"))
#self.tabwidget.currentChanged.connect(self.onChange)
#modeltab 2 disappearing
self.actionunlock.triggered.connect(self.unlocktabs)
self.actionexit.triggered.connect(QApplication.quit)
self.tabwidget.setTabText(self.tabwidget.indexOf(self.filetab), _translate("Neuroptimus", "Target data"))
self.label_23.setText(_translate("Neuroptimus", "Load mod files from:"))
self.label_24.setText(_translate("Neuroptimus", "Model file"))
self.lineEdit_folder2.setEnabled(False)
self.pushButton_14.setEnabled(False)
self.load_mods_checkbox.clicked.connect(self.disable_mod_path)
self.pushButton_13.setText(_translate("Neuroptimus", "Load"))
self.pushButton_13.clicked.connect(self.Load2)
self.pushButton_12.setText(_translate("Neuroptimus", "Load python file"))
self.pushButton_12.clicked.connect(self.Loadpython)
self.pushButton_12.hide()
self.pushButton_14.setText(_translate("Neuroptimus", "Browse..."))
self.pushButton_14.clicked.connect(self.openFolderNameDialog2)
self.pushButton_15.setText(_translate("Neuroptimus", "Browse..."))
self.pushButton_15.clicked.connect(self.openFileNameDialog2)
self.pushButton_16.setText(_translate("Neuroptimus", "Define function"))
self.pushButton_16.clicked.connect(self.UF)
self.label_26.setText(_translate("Neuroptimus", "Command"))
self.label_26.hide()
self.label_27.setText(_translate("Neuroptimus", "Number of parameters"))
self.label_27.hide()
self.setter.setText(_translate("Neuroptimus", "Set"))
self.setter.clicked.connect(self.Set)
self.remover.setText(_translate("Neuroptimus", "Remove"))
self.remover.clicked.connect(self.Remove)
self.modellist.setColumnCount(4)
self.modellist.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.modellist.setHorizontalHeaderLabels(("Section;Segment;Mechanism;Parameter").split(";"))
#self.modellist.resizeColumnsToContents()
self.modellist.horizontalHeader().setStretchLastSection(True)
self.modellist.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.modellist.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.input_tree.setWidgetResizable(True)
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.input_label.setFont(font)
self.input_label.setObjectName("label")
self.input_tree.setWidget(self.input_label)
#filetab 1
self.datfileName = ""
self.label_3.setText(_translate("Neuroptimus", "Base directory"))
self.label_4.setText(_translate("Neuroptimus", "Length of traces (ms)"))
self.label_5.setText(_translate("Neuroptimus", "Number of traces"))
self.label_7.setText(_translate("Neuroptimus", "Units"))
self.pushButton_3.setText(_translate("Neuroptimus", "Load trace"))
self.pushButton_3.setEnabled(False)
self.pushButton_3.clicked.connect(self.Load)
self.label_2.setText(_translate("Neuroptimus", "Data file"))
self.base_dir_controll.setText(_translate("Neuroptimus", "Browse..."))
self.base_dir_controll.clicked.connect(self.openFolderNameDialog)
self.label_6.setText(_translate("Neuroptimus", "Sampling frequency (Hz)"))
self.type_selector.setItemText(0, _translate("Neuroptimus", "Voltage trace"))
self.type_selector.setItemText(1, _translate("Neuroptimus", "Current trace"))
self.type_selector.setItemText(2, _translate("Neuroptimus", "Features"))
self.type_selector.setItemText(3, _translate("Neuroptimus", "Other"))
self.type_selector.currentTextChanged.connect(self.unitchange)
self.input_file_controll.setText(_translate("Neuroptimus", "Browse..."))
self.input_file_controll.clicked.connect(self.openFileNameDialog)
self.time_checker.setText(_translate("Neuroptimus", "Contains time"))
self.time_checker.toggled.connect(self.time_calc)
self.dropdown.setItemText(0, _translate("Neuroptimus", "uV"))
self.dropdown.setItemText(1, _translate("Neuroptimus", "mV"))
self.dropdown.setItemText(2, _translate("Neuroptimus", "V"))
self.dropdown.setCurrentIndex(1)
self.tvoltage=None
self.tcurrent=None
self.tspike_t=None
self.tother=None
self.tfeatures=None
#self.vbox.setItemText(_translate("Neuroptimus", "Vbox"))
self.figure = plt.figure(figsize=(4,2.5), dpi=80)
self.canvas = FigureCanvas(self.figure)
self.canvas.setParent(self.widget)
#enable this later
self.loaded_input_types=[self.tvoltage ,
self.tcurrent ,
# self.tspike_t ,
# self.tother,
self.tfeatures]
self.core=Core.coreModul()
#optiontab 3
self.tabwidget.setTabText(self.tabwidget.indexOf(self.modeltab), _translate("Neuroptimus", "Model"))
self.label_44.setText(_translate("Neuroptimus", "Delay (ms)"))
self.label_66.setText(_translate("Neuroptimus", "Initial voltage (mV)"))
self.label_67.setText(_translate("Neuroptimus", "tstop (ms)"))
self.label_45.setText(_translate("Neuroptimus", "Section"))
self.label_46.setText(_translate("Neuroptimus", "Duration (ms)"))
self.base_dir_controll9.setText(_translate("Neuroptimus", "Amplitude(s)"))
self.base_dir_controll9.clicked.connect(self.amplitudes_fun)
self.label_48.setText(_translate("Neuroptimus", "Section"))
self.label_49.setText(_translate("Neuroptimus", "Stimulus Type"))
self.label_68.setText(_translate("Neuroptimus", "Time step"))
self.label_51.setText(_translate("Neuroptimus", "Position inside section"))
self.label_52.setText(_translate("Neuroptimus", "Parameter to record"))
self.label_55.setText(_translate("Neuroptimus", "Stimulation protocol"))
self.label_71.setText(_translate("Neuroptimus", "Position inside section"))
self.lineEdit_pos.setText("0.5")
self.lineEdit_posins.setText("0.5")
self.lineEdit_initv.setText("-65")
self.lineEdit_dt.setText("0.05")
self.stimprot.addItems(["IClamp","VClamp"])
self.stimulus_type.addItems(["Step Protocol","Custom Waveform"])
self.stimulus_type.currentIndexChanged.connect(self.typeChange)
self.param_to_record.addItems(["v","i"])
#self.stimprot.setItemText(0, _translate("Neuroptimus", "IClamp"))
#self.stimprot.setItemText(1, _translate("Neuroptimus", "VClamp"))
self.container = []
self.temp=[]
#fittab 4
self.tabwidget.setTabText(self.tabwidget.indexOf(self.simtab), _translate("Neuroptimus", "Settings"))
self.fitlist.setColumnCount(2)
#self.fitlist.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
#self.flist.setHorizontalHeaderLabels(("Section;Segment;Mechanism;Parameter").split(";"))
self.fitlist.resizeColumnsToContents()
#self.fitlist.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.fitlist.setHorizontalHeaderLabels(["Fitness functions","Weights"])
#self.fitlist.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.fitlist.setColumnWidth(0,200)
self.fitlist.setColumnWidth(1,80)
self.fitlist.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
#self.fitlist.itemSelectionChanged.connect(self.fitselect)
#self.fitlist.cellClicked.connect(self.fitselect)
self.fitlist.horizontalHeader().setStretchLastSection(True)
self.label_69.setText(_translate("Neuroptimus", "Spike detection tresh. (mV)"))
self.label_70.setText(_translate("Neuroptimus", "Spike window (ms)"))
self.pushButton_normalize.clicked.connect(self.Normalize)
#runtab 5
self.tabwidget.setTabText(self.tabwidget.indexOf(self.fittab), _translate("Neuroptimus", "Fitness"))
self.pushButton_30.setText(_translate("Neuroptimus", "Run"))
self.pushButton_30.clicked.connect(self.runsim)
self.pushButton_31.setText(_translate("Neuroptimus", "Starting points"))
self.pushButton_31.clicked.connect(self.startingpoints)
self.pushButton_32.setText(_translate("Neuroptimus", "Boundaries"))
self.pushButton_32.clicked.connect(self.boundarywindow)
self.label_59.setText(_translate("Neuroptimus", "Algorithms"))
self.label_60.setText(_translate("Neuroptimus", "Parameters"))
self.tabwidget.setTabText(self.tabwidget.indexOf(self.runtab), _translate("Neuroptimus", "Run"))
self.pushButton_Recom.setText(_translate("Neuroptimus", "Recommended"))
self.pushButton_Recom.clicked.connect(partial(self.packageselect,"Recommended"))
self.pushButton_Inspyred.setText(_translate("Neuroptimus", "Inspyred"))
self.pushButton_Inspyred.clicked.connect(partial(self.packageselect,"Inspyred"))
self.pushButton_Pygmo.setText(_translate("Neuroptimus", "Pygmo"))
self.pushButton_Pygmo.clicked.connect(partial(self.packageselect,"Pygmo"))
self.pushButton_Bluepyopt.setText(_translate("Neuroptimus", "Bluepyopt"))
self.pushButton_Bluepyopt.clicked.connect(partial(self.packageselect,"Bluepyopt"))
self.pushButton_Scipy.setText(_translate("Neuroptimus", "Scipy"))
self.pushButton_Scipy.clicked.connect(partial(self.packageselect,"Scipy"))
self.algolist.setColumnCount(1)
self.algolist.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.algolist.clicked.connect(self.algoselect)
self.algolist.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.algolist.setColumnWidth(0,440)
self.algolist.setHorizontalHeaderLabels(['Algorithms'])
self.aspectlist.setColumnCount(2)
self.aspectlist.horizontalHeader().setStretchLastSection(True)
self.aspectlist.setHorizontalHeaderLabels(["Option","Value"])
self.aspectlist.cellChanged.connect(self.aspect_changed)
self.seed = []
self.resolution=0
self.Recom=["Classical Evolution Strategy (CES) - Inspyred","Covariance Matrix Adaptation ES (CMAES) - Pygmo",
"Particle Swarm (PSO) - Inspyred","Particle Swarm Gen (PSOG) - Pygmo","Indicator Based (IBEA) - Bluepyopt","L-BFGS-B - Scipy","Random Search"]
self.Inspyred=["Classical Evolution Strategy (CES) - Inspyred","Particle Swarm (PSO) - Inspyred",
"Differential Evolution (DE) - Inspyred",
"Nondominated Sorted GA (NSGA2) - Inspyred","Pareto Archived ES (PAES) - Inspyred",
"Simulated Annealing (SA) - Inspyred"]
self.Scipy=["Basinhopping (BH) - Scipy","Nelder-Mead (NM) - Scipy","L-BFGS-B - Scipy"]
self.Bluepyopt=["Nondominated Sorted GA (NSGA2) - Bluepyopt","Indicator Based (IBEA) - Bluepyopt"]
self.Pygmo=["Particle Swarm Gen (PSOG) - Pygmo","Nondominated Sorted Particle Swarm (NSPSO) - Pygmo",
"Nondominated Sorted GA (NSGA2) - Pygmo","Differential Evolution (DE) - Pygmo",
"Extended Ant Colony (GACO) - Pygmo","Multi-Objective Ant Colony (MACO) - Pygmo","Self-Adaptive DE (SADE) - Pygmo",
"Particle Swarm (PSO) - Pygmo","Exponential Natural ES (XNES) - Pygmo",
"Simple Genetic Algorithm (SGA) - Pygmo","Covariance Matrix Adaptation ES (CMAES) - Pygmo",
"Single Differential Evolution (SDE) - Pygmo","Differential Evolution (DE1220) - Pygmo",
"Bee Colony (ABC) - Pygmo","FullGrid - Pygmo","Praxis - Pygmo","Nelder-Mead (NM) - Pygmo"]
self.algos={
'Recommended':self.Recom,
'Inspyred': self.Inspyred,
'Scipy': self.Scipy,
'Bluepyopt': self.Bluepyopt,
'Pygmo': self.Pygmo}
self.algolist.setRowCount(len(self.Recom))
for index,item in enumerate(self.Recom):
self.algolist.setItem(index, 0, QTableWidgetItem(item))
descr19 = {'Size of Population:':100}
descr20 = {'Number of Generations:':100}
descr21 = {'Mutation Rate:':0.25}
descr22 = {'Cooling Rate:':0.5}
descr23 = {'Mean of Gaussian:':0}
descr24 = {'Std. Deviation of Gaussian:':1}
descr26 = {'Initial Temperature:':1.2}
descr28 = {'Accuracy:':1e-06}
descr25 = {'Update Frequency:':50}
descr27 = {'Temperature:':0.1}
descr29 = {'Step Size:':0.1}
descr32 = {'Number of Iterations:':100}
descr33 = {'Number of Repetition:':100}
descr30 = {'Error Tolerance for x:':0.0001}
descr31 = {'Error Tolerance for f:':0.0001}
descr34 = {'Inertia:': 0.5}
descr35 = {'Cognitive Rate:': 2.1}
descr36 = {'Social Rate:':2.1}
descr37 = {'Neighborhood Size:': 5}
descr38 = {'Topology:':0}
descr39 = {'Crossover Rate:':1}
descr40 = {'Number of CPU:':1}
descr41 = {'Number of Islands:':1}
descr42 = {'Force bounds:' : False} #extend options
self.algo_dict={
"Classical Evolution Strategy (CES) - Inspyred": [descr19.copy(),descr20.copy(),descr21.copy(),descr40],
"Simulated Annealing (SA) - Inspyred": [descr20.copy(),descr21.copy(),descr22.copy(),descr23.copy(),descr24.copy(),descr26.copy(),descr40],
"Particle Swarm (PSO) - Inspyred" : [descr19.copy(),descr20.copy(),descr34.copy(),descr35.copy(),descr36.copy(),descr40],
"Basinhopping (BH) - Scipy": [descr32.copy(),descr33.copy(),descr25.copy(),descr27.copy(),descr29],
"Nelder-Mead (NM) - Scipy": [descr20.copy(),descr30.copy(),descr31],
"L-BFGS-B - Scipy": [descr20.copy(),descr28],
"Differential Evolution (DE) - Inspyred": [descr19.copy(),descr20.copy(),descr21.copy(),descr39.copy(),descr40],
"Random Search": [descr19.copy(),descr40],
"Nondominated Sorted GA (NSGA2) - Inspyred": [descr19.copy(),descr20.copy(),descr21.copy(),descr40],
"Pareto Archived ES (PAES) - Inspyred": [descr19.copy(),descr20.copy(),descr40],
"Nondominated Sorted GA (NSGA2) - Bluepyopt": [descr19.copy(),descr20.copy(),descr21.copy(),descr40],
"Indicator Based (IBEA) - Bluepyopt": [descr19.copy(),descr20.copy(),descr21.copy(),descr40],
"Differential Evolution (DE) - Pygmo":[descr19.copy(),descr20.copy(),descr41],
"Self-Adaptive DE (SADE) - Pygmo":[descr19.copy(),descr20.copy(),descr41],
"Exponential Natural ES (XNES) - Pygmo":[descr19.copy(),descr20.copy(),descr42,descr41],
"Simple Genetic Algorithm (SGA) - Pygmo":[descr19.copy(),descr20.copy(),descr41],
"Particle Swarm (PSO) - Pygmo":[descr19.copy(),descr20.copy(),descr35.copy(),descr36.copy(),descr41],
"Particle Swarm Gen (PSOG) - Pygmo":[descr19.copy(),descr20.copy(),descr35.copy(),descr36.copy(),descr40,descr41],
"Nondominated Sorted Particle Swarm (NSPSO) - Pygmo":[descr19.copy(),descr20.copy(),descr35.copy(),descr36.copy(),descr40,descr41],
"Nondominated Sorted GA (NSGA2) - Pygmo":[descr19.copy(),descr20.copy(),descr21.copy(),descr40,descr41],
"Extended Ant Colony (GACO) - Pygmo":[descr19.copy(),descr20.copy(),descr40,descr41],
"Multi-Objective Ant Colony (MACO) - Pygmo":[descr19.copy(),descr20.copy(),descr40,descr41],
"Covariance Matrix Adaptation ES (CMAES) - Pygmo":[descr19.copy(),descr20.copy(),descr42,descr41],
"Single Differential Evolution (SDE) - Pygmo":[descr19.copy(),descr20.copy(),descr41],
"Differential Evolution (DE1220) - Pygmo":[descr19.copy(),descr20.copy(),descr41],
"Bee Colony (ABC) - Pygmo":[descr19.copy(),descr20.copy(),descr41],
"FullGrid - Pygmo":[descr19.copy(),descr20.copy(),descr41],
"Praxis - Pygmo":[descr19.copy(),descr20.copy(),descr41],
"Nelder-Mead (NM) - Pygmo":[descr19.copy(),descr20.copy(),descr41]
#NM,prax
}
self.tabwidget.setTabText(self.tabwidget.indexOf(self.eval_tab), _translate("Neuroptimus", "Results"))
self.label_72.setText(_translate("Neuroptimus", "Final Result"))
#plt.tight_layout()
self.figure2 = plt.figure(figsize=(4,2), dpi=130)
# self.figure2.gcf().subplots_adjust()
self.canvas2 = FigureCanvas(self.figure2)
self.canvas2.setParent(self.widget2)
self.pushButton_34.setText(_translate("Neuroptimus", "Save Parameters"))
self.pushButton_34.clicked.connect(self.SaveParam)
self.tabwidget.setTabText(self.tabwidget.indexOf(self.plot_tab), _translate("Neuroptimus", "Statistics"))
self.label_74.setText(_translate("Neuroptimus", "Analysis"))
self.pushButton_35.setText(_translate("Neuroptimus", "Generation Plot"))
self.pushButton_35.clicked.connect(self.PlotGen)
#self.pushButton_36.setText(_translate("Neuroptimus", "Grid Plot"))
#self.pushButton_36.clicked.connect(self.PlotGrid)
self.pushButton_37.setText(_translate("Neuroptimus", "Error Details"))
self.pushButton_37.clicked.connect(self.ShowErrorDialog)
self.errorlist.setColumnCount(4)
self.errorlist.setHorizontalHeaderLabels(["Error Functions","Value","Weight","Weighted Value"])
self.errorlist.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.menuMenu.setTitle(_translate("Neuroptimus", "Menu"))
self.actionunlock.setText(_translate("Neuroptimus", "Unlock Tabs"))
self.actionexit.setText(_translate("Neuroptimus", "Exit"))
self.tabwidget.setTabEnabled(1,False)
self.tabwidget.setTabEnabled(2,False)
self.tabwidget.setTabEnabled(3,False)
self.tabwidget.setTabEnabled(4,False)
self.tabwidget.setTabEnabled(5,False)
self.tabwidget.setTabEnabled(6,False)
def unlocktabs(self):
self.tabwidget.setTabEnabled(1,True)
self.tabwidget.setTabEnabled(2,True)
self.tabwidget.setTabEnabled(3,True)
self.tabwidget.setTabEnabled(4,True)
self.tabwidget.setTabEnabled(5,True)
self.tabwidget.setTabEnabled(6,True)
def openFileNameDialog(self):
"""
File dialog for the file tab to open file.
"""
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
self.datfileName, _ = QFileDialog.getOpenFileName(None,"QFileDialog.getOpenFileName()", "","Data files (*.dat *.json);;All Files (*);;", options=options)
if self.datfileName:
self.lineEdit_file.setText(self.datfileName)
self.lineEdit_folder.setText(os.path.dirname(os.path.realpath(self.datfileName)))
self.pushButton_3.setEnabled(True)
if self.time_checker.isChecked():
self.time_calc()
def time_calc(self):
try:
with open(str(self.lineEdit_file.text())) as data:
all_line=data.read().splitlines()
time_vec=[float(x.split()[0]) for x in all_line]
self.length_ctrl.setText(str(round(max(time_vec))))
self.freq_ctrl.setText(str(round(len(time_vec)-1)*1000/(round(max(time_vec))-round(min(time_vec))))) #frequency
self.size_ctrl.setText(str(len(all_line[0].split())-1)) #trace number
except:
print('No data file selected')
def openFolderNameDialog2(self):
"""
File dialog for the model tab to open folder.
"""
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
folderName= QFileDialog.getExistingDirectory(None, options=options)
if folderName:
self.lineEdit_folder2.setText(folderName)
def openFileNameDialog2(self):
"""
File dialog for the model tab to open file.
"""
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(None,"QFileDialog.getOpenFileName()", "","Hoc Files (*.hoc);;All Files (*);;", options=options)
if fileName:
self.lineEdit_file2.setText(fileName)
self.lineEdit_folder2.setText(os.path.dirname(os.path.realpath(fileName)))
self.pushButton_3.setEnabled(True)
def openFolderNameDialog(self):
"""
File dialog for the file tab to open folder.
"""
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
folderName= QFileDialog.getExistingDirectory(None, options=options)
if folderName:
self.lineEdit_folder.setText(folderName)
def disable_mod_path(self):
"""
Disables mod files path if checked for usage
"""
if self.load_mods_checkbox.isChecked():
self.lineEdit_folder2.setEnabled(True)
self.pushButton_14.setEnabled(True)
else:
self.lineEdit_folder2.setEnabled(False)
self.pushButton_14.setEnabled(False)
def unitchange(self):
"""
Sets units for drop down widget selecting simulation type.
"""
self.dropdown.clear()
if self.type_selector.currentIndex()==0:
self.dropdown.addItems(["uV","mV","V"])
elif self.type_selector.currentIndex()==1:
self.dropdown.addItems(["pA","nA","uA"])
elif self.type_selector.currentIndex()==2:
self.dropdown.addItems(["uV","mV","V","pA","nA","uA"])
else:
self.dropdown.addItems(["none"])
self.dropdown.setCurrentIndex(1)
def add_data_dict(self,data_dict):
"""
Creates Input tree *not implemented yet*
:param data_dict:
:param root:
"""
stack = data_dict
string=""
while stack:
key, value = stack.popitem()
if isinstance(value, dict):
string+=str("{0} : ".format(key))+"\n"
stack.update(value)
else:
string+=str(" {0} : {1}".format(key, value))+"\n"
return string
def Load(self):
"""
Loads the model after the 'Load Trace' clicked
First creates a dictionary with the paths and options and call the First step, giving these as argument
Plots the trace in matplotlib on the file tab.
"""
if (self.type_selector.currentText() == 'Features'):
try:
kwargs = {"file" : str(self.lineEdit_folder.text()),
"input" : [str(self.lineEdit_file.text()),
None,
str(self.dropdown.currentText()),
None,
None,
None,
self.type_selector.currentText().split()[0].lower()]}
except ValueError as ve:
print(ve)
else:
try:
kwargs = {"file" : str(self.lineEdit_folder.text()),
"input" : [str(self.lineEdit_file.text()),
int(self.size_ctrl.text()),
str(self.dropdown.currentText()),
float(self.length_ctrl.text()),
float(self.freq_ctrl.text()),
self.time_checker.isChecked(),
self.type_selector.currentText().split()[0].lower()]}
except ValueError as ve:
print(ve)
self.core.FirstStep(kwargs)
self.tabwidget.setTabEnabled(1,True)
if self.type_selector.currentIndex()==0 or self.type_selector.currentIndex()==1 or self.type_selector.currentIndex()==3:
f = self.core.option_handler.input_freq
t = self.core.option_handler.input_length
no_traces=self.core.option_handler.input_size
#self.graphicsView.set_xticks([n for n in range(0, int((t*no_traces)/(1000.0/f)), int((t*no_traces)/(1000.0/f)/5.0)) ])
#self.graphicsView.set_xticklabels([str(n) for n in range(0, t*no_traces, (t*no_traces)/5)])
#self.graphicsView.set_xlabel("time [ms]")
_type="voltage" if self.type_selector.currentIndex==0 else "current" if self.type_selector.currentIndex==1 else "unkown"
#unit="V" if self.type_selector.GetSelection()==0 else "A" if self.type_selector.GetSelection()==1 else ""
#self.graphicsView.set_ylabel(_type+" [" + self.core.option_handler.input_scale + "]")
exp_data = []
freq=float(self.freq_ctrl.text())
for k in range(self.core.data_handler.number_of_traces()):
exp_data.extend(self.core.data_handler.data.GetTrace(k))
self.figure.clf()
ax = self.figure.add_subplot(111)
ax.cla()
if self.time_checker.isChecked():
ax.plot([x/freq*1000 for x in range(len(exp_data))],exp_data)
else:
ax.plot(exp_data)
self.canvas.draw()
plt.tight_layout()
#self.graphicsView.set_title('PyQt Matplotlib Example')
for k in range(self.core.data_handler.number_of_traces()):
exp_data.extend(self.core.data_handler.data.GetTrace(k))
#axes.plot(list(range(0, len(exp_data))), exp_data)
self.model.insertRow(0)
if self.type_selector.currentIndex()==0:
for n in [x for x in enumerate(self.loaded_input_types) if x[1]!=None and x[0]!=2]:
self.loaded_input_types[n[0]]=None
input_string="Voltage trace \n"
self.loaded_input_types[0]=self.tvoltage
#self.model.setData(self.model.index(0), self.tvoltage,self.input_file_controll.GetValue().split("/")[-1])
input_string+=str(str(self.lineEdit_file.text()).split("/")[-1])+"\n"
elif self.type_selector.currentIndex()==1:
for n in [x for x in enumerate(self.loaded_input_types) if x[1]!=None and x[0]!=2]:
#self.input_tree.Delete(n[1])
self.loaded_input_types[n[0]]=None
input_string="Current trace"
self.loaded_input_types[1]=self.tcurrent
#self.model.setData(self.model.index(0),self.tcurrent,self.input_file_controll.GetValue().split("/")[-1])
input_string+=str(str(self.lineEdit_file.text()).split("/")[-1])+"\n"
'''
elif self.type_selector.GetSelection()==3:
try:
self.input_tree.Delete(self.tspike_t)
except ValueError:
pass
self.tspike_t=self.input_tree.AppendItem(self.troot,"Spike times")
self.input_tree.AppendItem(self.tspike_t,self.input_file_controll.GetValue().split("/")[-1])
'''
elif self.type_selector.currentIndex()==2:
for n in [x for x in enumerate(self.loaded_input_types) if x[1]!=None and x[0]!=2]:
#self.input_tree.Delete(n[1])
self.loaded_input_types[n[0]]=None
input_string="Features"
#self.loaded_input_types[2]=self.tfeatures
input_string+=str(str(self.lineEdit_file.text()).split("/")[-1])+"\n"
input_string+=self.add_data_dict(self.core.data_handler.features_dict)
else:
pass
self.input_label.setText(QtCore.QCoreApplication.translate("Neuroptimus", input_string))
if self.core.option_handler.type[-1]!="features":
self.my_list = copy(self.core.ffun_calc_list)
else:
self.my_list=list(self.core.data_handler.features_data.keys())[3:]
self.param_list = [[]] * len(self.my_list)
if self.core.option_handler.type[-1]!="features":
self.param_list[2] = [("Spike detection thres. (mV)",0.0)]
self.param_list[1] = [("Spike detection thres. (mV)",0.0), ("Spike Window (ms)",1.0)]
else:
self.param_list[0] = [("Spike detection thres. (mV)",0.0)]
if self.core.option_handler.type[-1]=="features":
for l in range(len(self.core.data_handler.features_data["stim_amp"])):
self.container.append(float(self.core.data_handler.features_data["stim_amp"][l]))
self.fitlist.setRowCount(len(self.my_list))
for index,elems in enumerate(self.my_list):
item = QTableWidgetItem(elems)
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.fitlist.setItem(index, 0, item)
if self.core.option_handler.type[-1]=="features":
itemv = QTableWidgetItem(str(self.core.data_handler.features_data[self.my_list[index]]["weight"]))
else:
itemv = QTableWidgetItem("0")
self.fitlist.setItem(index, 1, itemv)
if self.core.option_handler.type[-1]!="features":
self.kwargs={"runparam" : [self.core.data_handler.data.t_length,
self.core.data_handler.data.step,
"record",
"soma",
"pos",
"vrest"]
}
else:
self.kwargs={"runparam" : [self.core.data_handler.features_data["stim_delay"] + self.core.data_handler.features_data["stim_duration"]+100,
0.05,
"record",
"soma",
"pos",
"vrest"]}
if self.core.option_handler.output_level=="1":
self.core.Print()
self.fit_container=[]
if self.core.option_handler.type[-1]!="features":
self.lineEdit_tstop.setText(str(self.core.data_handler.data.t_length))
else:
self.lineEdit_tstop.setText(str(self.core.data_handler.features_data["stim_delay"] + self.core.data_handler.features_data["stim_duration"]+100))
self.lineEdit_delay.setText(str(self.core.data_handler.features_data["stim_delay"]))
self.lineEdit_duration.setText(str(self.core.data_handler.features_data["stim_duration"]))
#self.fitlist.cellChanged.connect(self.fitchanged)
def Set(self, e):
"""
Set the selected parameters to optimize on the model.
Loop through every selected line.
"""
items = self.modellist.selectionModel().selectedRows()
print(items)
self.remover.setEnabled(True)
for item_selected in items:
#try to use the table for selection
selected_row=item_selected.row()
section = str(self.modellist.item(selected_row, 0).text())
#
segment = str(self.modellist.item(selected_row, 1).text())
chan = str(self.modellist.item(selected_row, 2).text())
morph=""
par = str(self.modellist.item(selected_row, 3).text())
if chan == "morphology":
chan = "None"
par= "None"
morph = str(self.modellist.item(selected_row, 3).text())
kwargs = {"section" : section,
"segment" : segment,
"channel" : chan,
"morph" : morph,
"params" : par,
"values" : 0}
for j in range(4):
self.modellist.item(selected_row,j).setBackground(QtGui.QColor(255,0,0))
self.core.SetModel2(kwargs)
"""else:
for idx in range(self.modellist.rowCount()):
item = self.modellist.item(idx, 3)
item1 = self.modellist.item(idx, 1)
item2 = self.modellist.item(idx, 2)
item0 = self.modellist.item(idx, 0)
if (item0 == searchValue[0] and item1 == searchValue[1])and(item == searchValue[2] or item2 == searchValue[3]):
for j in range(4):
self.modellist.item(idx,j).setBackground(QtGui.QColor(0,255,0))
self.core.SetModel(kwargs)"""
def Remove(self, e):
"""
Remove the selected parameters to optimize on the model.
Loop through every selected line.
"""
items = self.modellist.selectionModel().selectedRows()
for item_selected in items:
#try to use the table for selection
selected_row=item_selected.row()
section = str(self.modellist.item(selected_row, 0).text())
#
segment = str(self.modellist.item(selected_row, 1).text())
chan = str(self.modellist.item(selected_row, 2).text())
morph=""
par = str(self.modellist.item(selected_row, 3).text())
if chan == "morphology":
chan = "None"
par= "None"
morph = str(self.modellist.item(selected_row, 3).text())
kwargs = {"section" : section,
"segment" : segment,
"channel" : chan,
"morph" : morph,
"params" : par}
if kwargs["channel"] == "None":
temp = kwargs["section"] + " " + kwargs["morph"]
else:
temp = kwargs["section"] + " " + kwargs["segment"] + " " + kwargs["channel"] + " " + kwargs["params"]
self.core.option_handler.param_vals.pop(self.core.option_handler.GetObjTOOpt().index(temp))
self.core.option_handler.adjusted_params.remove(temp)
if len(self.core.option_handler.GetObjTOOpt()) == 0:
self.remover.setEnabled(False )
for j in range(4):
self.modellist.item(selected_row,j).setBackground(QtGui.QColor(255,255,255))
def sim_plat(self):
"""
Called when simulation platform changed, locks unnecessary widgets and swap Label of Load button.
"""
if self.dd_type.currentIndex()==1:
self.sim_path.show()#setEnabled(True)
self.sim_param.show()
self.pushButton_13.setText(QtCore.QCoreApplication.translate("Neuroptimus", "Set"))
self.pushButton_12.show()
self.pushButton_14.hide()#setEnabled(False)
self.pushButton_15.hide()#setEnabled(False)
self.pushButton_16.hide()#setEnabled(False)
self.setter.hide()#setEnabled(False)
self.remover.hide()#setEnabled(False)
self.modellist.hide()#setEnabled(False)
self.lineEdit_file2.hide()#setEnabled(False)
self.lineEdit_folder2.hide()#setEnabled(False)
self.label_23.hide()
self.label_24.hide()
self.label_26.show()
self.label_27.show()
self.load_mods_checkbox.hide()
elif self.dd_type.currentIndex()==2:
self.sim_path.show()#setEnabled(True)
self.sim_param.show()
self.pushButton_13.setText(QtCore.QCoreApplication.translate("Neuroptimus", "Set"))
self.pushButton_12.hide()
self.pushButton_14.hide()#setEnabled(False)
self.pushButton_15.hide()#setEnabled(False)
self.pushButton_16.hide()#setEnabled(False)
self.setter.hide()#setEnabled(False)
self.remover.hide()#setEnabled(False)
self.modellist.hide()#setEnabled(False)
self.lineEdit_file2.hide()#setEnabled(False)
self.lineEdit_folder2.hide()#setEnabled(False)
self.label_23.hide()
self.label_24.hide()
self.label_26.show()
self.label_27.show()
self.load_mods_checkbox.hide()
else:
self.pushButton_13.setText(QtCore.QCoreApplication.translate("Neuroptimus", "Load"))
self.sim_path.hide()#setEnabled(False)
self.sim_param.hide()
self.pushButton_12.hide()
self.pushButton_14.show()#setEnabled(True)
self.pushButton_15.show()#setEnabled(True)
self.pushButton_16.show()#setEnabled(True)
self.setter.show()#setEnabled(True)
self.remover.show()#setEnabled(True)
self.modellist.show()#setEnabled(True)
self.lineEdit_file2.show()#setEnabled(True)
self.lineEdit_folder2.show()#setEnabled(True)
self.label_23.show()
self.label_24.show()
self.label_26.hide()
self.label_27.hide()
self.load_mods_checkbox.show()
def Loadpython(self, e):
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(None,"QFileDialog.getOpenFileName()", "","Python files (*.py);;All Files (*);;", options=options)
if fileName:
self.sim_path.setText("python "+str(fileName))
def Load2(self, e):
"""
Load the selected Neuron model and displays the sections in a tablewidget
"""
self.model_file = self.lineEdit_file2.text()
self.tabwidget.setTabEnabled(2,True)
self.tabwidget.setTabEnabled(3,True)
self.tabwidget.setTabEnabled(4,True)
if self.load_mods_checkbox.isChecked():
self.spec_file = self.lineEdit_folder2.text()
else:
self.spec_file = None
try:
self.core.LoadModel({"model" : [self.model_file, self.spec_file],
"simulator" : self.dd_type.currentText(),
"sim_command" : self.sim_path.text() if not self.dd_type else self.sim_path.text()+" "+self.sim_param.text()}) # path + param for external
temp = self.core.model_handler.GetParameters()
if temp!=None:
out = open("model.txt", 'w')
for i in temp:
out.write(str(i))
out.write("\n")
index=0
self.modellist.setRowCount(self.recursive_len(temp))
for row in temp:
for k in (row[1]):
if k != []:
for s in (k[2]):
self.modellist.setItem(index, 0, QTableWidgetItem(row[0]))
self.modellist.setItem(index, 1, QTableWidgetItem(str(k[0])))
self.modellist.setItem(index, 2, QTableWidgetItem(k[1]))
self.modellist.setItem(index, 3, QTableWidgetItem(s))
index+=1
self.modellist.setRowCount(index)
else:
pass
except OSError as oe:
print(oe)
if not self.dd_type.currentIndex():
try:
tmp=self.core.ReturnSections()
[tmp.remove("None") for i in range(tmp.count("None"))]
self.section_rec.addItems(tmp)
self.section_stim.addItems(tmp)
except:
popup("Section error")
def typeChange(self):
_translate = QtCore.QCoreApplication.translate
if self.stimulus_type.currentIndex()==0:#step prot
self.lineEdit_delay.setDisabled(False)
self.lineEdit_duration.setDisabled(False)
self.base_dir_controll9.clicked.disconnect(self.openFileNameDialogWaveform)
self.base_dir_controll9.clicked.connect(self.amplitudes_fun)
self.base_dir_controll9.setText(_translate("Neuroptimus", "Amplitude(s)"))
if self.stimulus_type.currentIndex()==1:#wave prot
self.lineEdit_delay.setDisabled(True)
self.lineEdit_delay.setText("0")
self.lineEdit_duration.setDisabled(True)
self.lineEdit_duration.setText("1e9")
self.base_dir_controll9.setText(_translate("Neuroptimus", "Load Waveform"))
self.base_dir_controll9.clicked.disconnect(self.amplitudes_fun)
self.base_dir_controll9.clicked.connect(self.openFileNameDialogWaveform)
def openFileNameDialogWaveform(self):
"""
File dialog for the file tab to open file.
"""
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(None,"QFileDialog.getOpenFileName()", "","Data files (*.dat *.json);;All Files (*);;", options=options)
if fileName:
self.container=[fileName]
def recursive_len(self,item):
if type(item) == list:
return sum(self.recursive_len(subitem) for subitem in item)
else:
return 1
def UF(self):
"""
Calls the user function window for the Model tab.
"""
self.SW = SecondWindow(self)
self.SW.setObjectName("Neuroptimus")
self.SW.resize(500, 500)
self.SW.show()
def amplitudes_fun(self):
"""
Calls the amplitude window for the Options tab.
"""
self.SiW = StimuliWindow(self)
self.SiW.setObjectName("Neuroptimus")
self.SiW.resize(400, 500)
self.SiW.show()
def fitselect(self):
"""
Calls when fitness functions selected, colours the item and adds them to a set.
"""
items = self.fitlist.selectionModel().selectedIndexes()
for item_selected in items:
if item_selected.column()==0:
current_item=str(self.fitlist.item(item_selected.row(), 0).text())
if current_item in self.fitset:
self.fitlist.item(item_selected.row(),0).setBackground(QtGui.QColor(255,255,255))
self.fitset.remove(current_item)
else:
self.fitlist.item(item_selected.row(),0).setBackground(QtGui.QColor(0,255,0))
self.fitset.add(current_item)
def fitchanged(self):
"""
Calls when the weights changed for the fitness functions. Stores the weights in a list.
"""
self.weights=[]
try:
allRows = self.fitlist.rowCount()
for row in range(0,allRows):
current_fun=str(self.fitlist.item(row, 0).text())
current_weight=float(self.fitlist.item(row, 1).text())
if current_weight:
self.weights.append(current_weight)
except:
self.fitlist.item(row, 1).setText("0")
def Normalize(self, e):
"""
Normalize the weigths of only the selected fitness functions.
Iterates through all fitness functions and scans the ones contained in the fitness set (selected ones) with an 'if' statement.
"""
try:
#self.fitselect()
#self.fitchanged()
allRows = self.fitlist.rowCount()
self.weights=[float(self.fitlist.item(row, 1).text()) for row in range(0,allRows)]
sum_o_weights = float(sum(self.weights))
for row in range(0,allRows):
current_fun=str(self.fitlist.item(row, 0).text())
current_weight=float(str(self.fitlist.item(row, 1).text()))
if current_weight:
try:
self.fitlist.item(row, 1).setText(str(round(current_weight / sum_o_weights,4)))
except:
continue
else:
try:
self.fitlist.item(row, 1).setText("0")
except:
continue
except Exception as e:
popup("Wrong values given. "+str(e))
def packageselect(self,pack_name):
"""
Writes the given aspects to algorithm in an other table, where the user can change the option (generation, population size, etc.).
Iterates through the selected algorithms options list and writes the names of it to the first column and sets the cell immutable,
and the values to the second row.
"""
selected_package = self.algos.get(pack_name)
self.algolist.setRowCount(len(selected_package))
for index,elems in enumerate(selected_package):
item = QTableWidgetItem(str(elems))
self.algolist.setItem(index, 0, item)
self.algolist.item(0,0)
def algoselect(self):
"""
Writes the given aspects to algorithm in an other table, where the user can change the option (generation, population size, etc.).
Iterates through the selected algorithms options list and writes the names of it to the first column and sets the cell immutable,
and the values to the second row.
"""
try:
selected_algo = self.algolist.selectionModel().selectedRows()[0].row()
aspects=self.algo_dict.get(str(self.algolist.item(selected_algo, 0).text()))
self.aspectlist.setRowCount(len(aspects)+1)
item = QTableWidgetItem('Seed')
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.aspectlist.setItem(0, 0, item)
item2 = QTableWidgetItem('1234')
self.aspectlist.setItem(0, 1, item2)
for index,elems in enumerate(aspects):
key=next(iter(elems))
item = QTableWidgetItem(key)
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.aspectlist.setItem(index+1, 0, item)
item2 = QTableWidgetItem(str(elems.get(key)))
if str(key)=='Force bounds:':
item2 = QTableWidgetItem()
item2.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)
item2.setCheckState(QtCore.Qt.Unchecked)
self.aspectlist.setItem(index+1, 1, item2)
except:
print('Algorithm selection error')
def aspect_changed(self):
"""
Stores the options value separately for each algorithm.
Clears selection, because if other algorithm clicked after change, it's counts as a change again.
So the same value is going to be stored for the next algorirhm selection.
"""
try:
selected_algo = self.algolist.selectionModel().selectedRows()
selected_asp = self.aspectlist.selectionModel().selectedIndexes()
if selected_asp[0].row():
self.algo_dict[str(self.algolist.item(selected_algo[0].row(), 0).text())][selected_asp[0].row()-1][str(self.aspectlist.item(selected_asp[0].row(), 0).text())]=float(self.aspectlist.item(selected_asp[0].row(), 1).text())
self.aspectlist.clearSelection()
except:
"ok"
def runsim(self):
"""
Check all the tabs and sends the options to the Core.
Check the fitness values and if they are normalized.
Check the selected algorithm and the options for it then launch the optimization.
Calls the last step if the optimization ended.
If an error happens, stores the number of tab in a list and it's error string in an other list.
Switch to the tab, where the error happened and popup the erro.
"""
err=[]
errpop=[]
if not self.dd_type.currentIndex():
try:
self.core.SecondStep({"stim" : [str(self.stimprot.currentText()), float(self.lineEdit_pos.text()), str(self.section_rec.currentText())],
"stimparam" : [self.container, float(self.lineEdit_delay.text()), float(self.lineEdit_duration.text())]})
self.kwargs = {"runparam":[float(self.lineEdit_tstop.text()),
float(self.lineEdit_dt.text()),
str(self.param_to_record.currentText()),
str(self.section_stim.currentText()),
float(self.lineEdit_posins.text()),
float(self.lineEdit_initv.text())]}
except AttributeError:
err.append(2)
errpop.append("No stimulus amplitude was selected!")
except ValueError:
errpop.append('Some of the cells are empty. Please fill out all of them!')
err.append(2)
except Exception as e:
err.append(2)
print(e)
errpop.append("There was an error")
self.fitfun_list=[]
self.weights=[]
try:
allRows = self.fitlist.rowCount()
for row in range(0,allRows):
current_fun=str(self.fitlist.item(row, 0).text())
current_weight=float(self.fitlist.item(row, 1).text())
if current_weight:
self.fitfun_list.append(current_fun)
self.weights.append(current_weight)
if self.core.option_handler.type[-1]!="features":
self.kwargs.update({"feat":
[{"Spike Detection Thres. (mv)": float(self.spike_tresh.text()), "Spike Window (ms)":float(self.spike_window.text())},
self.fitfun_list]
})
self.kwargs.update({"weights" : self.weights})
else:
self.kwargs.update({"feat":
[{"Spike Detection Thres. (mv)": float(self.spike_tresh.text()), "Spike Window (ms)":float(self.spike_window.text())},
self.fitfun_list]
})
self.kwargs.update({"weights" : self.weights})
if not(0.99<sum(self.kwargs["weights"])<=1.01):
ret = QtGui.QMessageBox.question("You did not normalize your weights! \n Would you like to continue anyway?")
if ret == QtGui.QMessageBox.No:
err.append(3)
errpop.append("Normalize")
except:
err.append(3)
errpop.append("Fitness Values not right")
try:
selected_algo = self.algolist.selectionModel().selectedRows()
algo_name=str(self.algolist.item(selected_algo[0].row(), 0).text())
algo_str=algo_name[algo_name.find("(")+1:].replace(")","")
print(algo_str)
tmp = {"seed" : int(self.aspectlist.item(0,1).text()),
"evo_strat" : str(algo_str)
}
#for n in self.algo_param:
#tmp.update({str(n[1]) : float(n[0].GetValue())})
allRows = self.aspectlist.rowCount()
for row in range(1,allRows):
aspect=str(self.aspectlist.item(row,0).text())
if aspect=='Force bounds:':
value=bool(self.aspectlist.item(row,1).checkState())
else:
value=float(self.aspectlist.item(row,1).text())
tmp.update({aspect:value})
tmp.update({
"num_params" : len(self.core.option_handler.GetObjTOOpt()),
"boundaries" : self.core.option_handler.boundaries ,
"starting_points" : self.seed
})
self.kwargs.update({"algo_options":tmp})
except:
err.append(4)
errpop.append("You forget to select an algorithm!")
try:
self.seed = None
self.core.ThirdStep(self.kwargs)
if self.core.option_handler.output_level=="1":
self.core.Print()
except TypeError:
self.core.ThirdStep(self.kwargs)
if self.core.option_handler.output_level=="1":
self.core.Print()
except sizeError as sE:
err.append(4)
errpop.append("There was an error during the optimization: "+sE.m)
except Exception as e:
err.append(4)
print(e)
errpop.append("There was an error")
if err:
if not errpop[0]=="Normalize":
popup(errpop[0])
self.tabwidget.setCurrentIndex(int(min(err)))
else:
#try:
self.core.FourthStep()
self.tabwidget.setTabEnabled(5,True)
self.tabwidget.setTabEnabled(6,True)
self.eval_tab_plot()
self.plot_tab_fun()
self.tabwidget.setCurrentIndex(5)
#except:
# popup("Forth step error")
def eval_tab_plot(self):
"""
First writes out all the fitnesses for the parameters in a scrollable text area.
Plots the experimental and result traces, sets tight layout not to crop the sides.
"""
text = "Results:"
#for n, k in zip(self.core.option_handler.GetObjTOOpt(), self.core.Neuroptimus.fit_obj.ReNormalize(self.core.Neuroptimus.final_pop[0].candidate[0:len(self.core.option_handler.adjusted_params)])):
for n, k in zip(self.core.option_handler.GetObjTOOpt(), self.core.cands[0]):
if n.split()[0]==n.split()[-1]:
param=[n.split()[0], n.split()[-1]]
text += "\n" + param[0] + "\n" + "\t" + str(k)
else:
param=[n.split()[0], "segment: " + n.split()[1], n.split()[-1]]
#print param
if n.split()[1]!=n.split()[-1]:
text += "\n" + ": \n".join(param) + ":" + "\n" + "\t" + str(k)
else:
text += "\n" + param[0] + ": " + param[-1] + "\n" + "\t" + str(k)
#text += "\n" + "fitness:\n" + "\t" + str(self.core.Neuroptimus.final_pop[0].fitnes)
text += "\n" + "fitness:\n" + "\t" + str(self.core.last_fitness)
for tabs in [self.eval_tab,self.plot_tab]:
label = QtWidgets.QLabel(tabs)
label.setGeometry(QtCore.QRect(10, 70, 170, 206))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
label.setFont(font)
label.setObjectName("label")
label.setText(QtCore.QCoreApplication.translate("Neuroptimus", text))
scroll_area = QtWidgets.QScrollArea(tabs)
scroll_area.setGeometry(QtCore.QRect(10, 100, 170, 256))
scroll_area.setWidget(label)
scroll_area.setWidgetResizable(True)
label = QtWidgets.QLabel(self.plot_tab)
label.setGeometry(QtCore.QRect(300, 80, 250, 146))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
label.setFont(font)
label.setObjectName("label")
label.setText(QtCore.QCoreApplication.translate("Neuroptimus", text))
scroll_area = QtWidgets.QScrollArea(self.plot_tab)
scroll_area.setGeometry(QtCore.QRect(300,80, 350, 100))
scroll_area.setWidget(label)
scroll_area.setWidgetResizable(True)
exp_data = []
model_data = []
axes = self.figure2.add_subplot(1,1,1)
axes.cla()
if self.core.option_handler.type[-1]!="features":
for n in range(self.core.data_handler.number_of_traces()):
exp_data.extend(self.core.data_handler.data.GetTrace(n))
model_data.extend(self.core.final_result[n])
no_traces=self.core.data_handler.number_of_traces()
t = self.core.option_handler.input_length
step = self.core.option_handler.run_controll_dt
axes.set_xlabel("time [ms]")
_type=self.core.data_handler.data.type
unit="mV" if _type=="voltage" else "nA" if _type=="current" else ""
axes.set_ylabel(_type+" [" + unit + "]")
axes.set_xticks([n for n in range(0, int((t * no_traces) / (step)), int((t * no_traces) / (step) / 5.0)) ])
axes.set_xticklabels([str(n) for n in range(0, int(t * no_traces), int((t * no_traces) / 5))])
axes.plot(list(range(0, len(exp_data))), exp_data)
axes.plot(list(range(0, len(model_data))), model_data, 'r')
axes.legend(["target", "model"])
self.figure2.savefig("result_trace.png", dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1)
self.figure2.savefig("result_trace.eps", dpi=300, facecolor='w', edgecolor='w')
self.figure2.savefig("result_trace.svg", dpi=300, facecolor='w', edgecolor='w')
self.canvas2.draw()
plt.tight_layout()
else:
for n in range(len(self.core.data_handler.features_data["stim_amp"])):
model_data.extend(self.core.final_result[n])
no_traces=len(self.core.data_handler.features_data["stim_amp"])
t = int(self.core.option_handler.run_controll_tstop) # instead of input_length
step = self.core.option_handler.run_controll_dt
axes.set_xlabel("time [ms]")
_type=str(self.kwargs["runparam"][2]) #parameter to record
_type_ = "Voltage" if _type =="v" else "Current" if _type=="c" else ""
unit="mV" if _type=="v" else "nA" if _type=="c" else ""
axes.set_ylabel(_type_+" [" + unit + "]")
axes.set_xticks([n for n in range(0, int((t * no_traces) / (step)), int((t * no_traces) / (step) / 5.0)) ])
axes.set_xticklabels([str(n) for n in range(0, int(t * no_traces), int((t * no_traces) / 5))])
axes.plot(list(range(0, len(model_data))),model_data, 'r')
axes.legend(["model"])
self.figure2.savefig("result_trace.png", dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1)
self.figure2.savefig("result_trace.eps", dpi=300, facecolor='w', edgecolor='w')
self.figure2.savefig("result_trace.svg", dpi=300, facecolor='w', edgecolor='w')
self.canvas2.draw()
plt.tight_layout()
plt.close()
def SaveParam(self, e):
"""
Saves the found values in a file.
"""
try:
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
save_file_name, _ = QFileDialog.getSaveFileName(None,"QFileDialog.getSaveFileName()", "","Data files (*txt);;All Files (*);;", options=options)
if save_file_name[0]:
f=open(str(save_file_name)+".txt","w")
#params=self.core.Neuroptimus.final_pop[0].candidate[0:len(self.core.option_handler.adjusted_params)]
f.write("\n".join(map(str,self.core.renormed_params)))
except Exception as e:
popup("Couldn't save the parameters." + str(e))
def plot_tab_fun(self):
"""
Writes out the same fitnesses for parameters as in the previous tab.
"""
try:
fits=self.core.fits
stats={'best' : str(min(fits)),'worst' : str(max(fits)),'mean' : str(numpy.mean(fits)),'median' : str(numpy.median(fits)), 'std' : str(numpy.std(fits))}
except AttributeError:
stats={'best' : "unkown",'worst' : "unkown",'mean' : "unkown",'median' : "unkown", 'std' : "unkown"}
string = "Best: " + str(stats['best']) + "\nWorst: " + str(stats['worst']) + "\nMean: " + str(stats['mean']) + "\nMedian: " + str(stats['median']) + "\nStd:" + str(stats['std'])
label = QtWidgets.QLabel(self.plot_tab)
label.setGeometry(QtCore.QRect(300, 80, 250, 146))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
label.setFont(font)
label.setObjectName("label")
label.setText(QtCore.QCoreApplication.translate("Neuroptimus", string))
scroll_area = QtWidgets.QScrollArea(self.plot_tab)
scroll_area.setGeometry(QtCore.QRect(300,80, 350, 100))
scroll_area.setWidget(label)
scroll_area.setWidgetResizable(True)
self.errorlist.setRowCount(self.recursive_len(self.core.error_comps))
idx=0
for c_idx,c in enumerate(zip(*self.core.error_comps)):
tmp=[0]*4
for t_idx in range(len(c)):
tmp[1]+=c[t_idx][2]
tmp[2]=c[t_idx][0]
tmp[3]+=c[t_idx][2]*c[t_idx][0]
if self.core.option_handler.type[-1]!='features':
tmp[0]=self.core.ffun_mapper[c[t_idx][1].__name__]
else:
tmp[0]=(c[t_idx][1])
idx+=1
tmp=list(map(str,tmp))
self.errorlist.setItem(c_idx, 0, QTableWidgetItem(tmp[0]))
self.errorlist.setItem(c_idx, 1, QTableWidgetItem("{:.4f}".format(float(tmp[1]))))
self.errorlist.setItem(c_idx, 2, QTableWidgetItem("{:.4f}".format(float(tmp[2]))))
self.errorlist.setItem(c_idx, 3, QTableWidgetItem("{:.4f}".format(float(tmp[3]))))
self.errorlist.setRowCount(idx)
def PlotGen(self, e):
"""
Generates the Generation plot for Inspired and Deap packages differently.
Deap plots generated by using regular expression on statistics file.
Inspyred plots generated by it's own function.
"""
plt.close('all')
try:
generation_plot("stat_file.txt")
except ValueError:
stat_file=open("stat_file.txt","rt")
generation_plot(stat_file)
except Exception as e:
popup("Generation Plot generation error." + e)
def PlotGrid(self, e):
self.prev_bounds=copy(self.core.option_handler.boundaries)
self.PG=gridwindow(self)
self.PG.setObjectName("Neuroptimus")
self.PG.resize(400, 500)
self.PG.show()
def ShowErrorDialog(self,e):
self.extra_error_dialog=ErrorDialog(self)
self.extra_error_dialog.setObjectName("Neuroptimus")
self.extra_error_dialog.resize(400, 500)
self.extra_error_dialog.show()
def boundarywindow(self):
self.BW = BoundaryWindow(self)
self.BW.setObjectName("Neuroptimus")
self.BW.resize(400, 500)
self.BW.show()
def startingpoints(self):
num_o_params=len(self.core.option_handler.GetObjTOOpt())
self.SPW = Startingpoints(self,num_o_params)
self.SPW.setObjectName("Neuroptimus")
self.SPW.resize(400, 500)
self.SPW.show()
class SecondWindow(QtWidgets.QMainWindow):
def __init__(self,parent):
super(SecondWindow, self).__init__()
_translate = QtCore.QCoreApplication.translate
self.core=Core.coreModul()
self.plaintext = QtWidgets.QPlainTextEdit(self)
self.plaintext.insertPlainText("#Please define your function below in the template!\n"+
"#You may choose an arbitrary name for your function,\n"+
"#but the input parameters must be self and a vector!In the first line of the function specify the length of the vector in a comment!\n"+
"#In the second line you may specify the names of the parameters in a comment, separated by spaces.\n")
self.plaintext.move(10,10)
self.plaintext.resize(350,400)
self.pushButton_45 = QtWidgets.QPushButton(self)
self.pushButton_45.setGeometry(QtCore.QRect(370, 30, 80, 22))
self.pushButton_45.setObjectName("pushButton_45")
self.pushButton_45.setText(_translate("Ufun", "Load"))
self.pushButton_45.clicked.connect(self.loaduserfun)
self.pushButton_46 = QtWidgets.QPushButton(self)
self.pushButton_46.setGeometry(QtCore.QRect(20, 440, 80, 22))
self.pushButton_46.setObjectName("pushButton_46")
self.pushButton_46.setText(_translate("Ufun", "Ok"))
self.pushButton_46.clicked.connect(self.OnOk)
self.pushButton_47 = QtWidgets.QPushButton(self)
self.pushButton_47.setGeometry(QtCore.QRect(120, 440, 80, 22))
self.pushButton_47.setObjectName("pushButton_47")
self.pushButton_47.setText(_translate("Ufun", "Cancel"))
self.pushButton_47.clicked.connect(self.close)
self.option_handler=parent.core.option_handler
self.modellist=parent.modellist
def loaduserfun(self):
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(None,"QFileDialog.getOpenFileName()", "","Text Files (*.txt);;All Files (*);;", options=options)
if fileName:
f = open(fileName, "r")
fun = ("#Please define your function below in the template!\n"+
"#You may choose an arbitrary name for your function,\n"+
"#but the input parameters must be self and a vector!In the first line of the function specify the length of the vector in a comment!\n"+
"#In the second line you may specify the names of the parameters in a comment, separated by spaces.\n")
for l in f:
fun = fun + l
self.plaintext.setPlainText(str(fun))
def OnOk(self, e):
try:
self.option_handler.u_fun_string = str(self.plaintext.toPlainText())
self.option_handler.adjusted_params=[]
self.modellist.setRowCount(0)
text = ""
text = list(map(str.strip, str(self.plaintext.toPlainText()).split("\n")))[4:-1]
variables = []
variables = list(map(str.strip, str(text[0][text[0].index("(") + 1:text[0].index(")")]).split(",")))
var_len = int(text[1].lstrip("#"))
i=0
var_names=[]
while text[i+2][0]=="#" and i<var_len:
var_names.append(text[i+2].lstrip("#"))
i+=1
if len(var_names)!=var_len and len(var_names)!=0:
raise SyntaxError("Number of parameter names must equal to number of parameters")
if var_names==[]:
var_names=None
for i in range(var_len):
self.option_handler.SetOptParam(0.1)
if var_names != None:
self.option_handler.SetObjTOOpt(var_names[i])
else:
self.option_handler.SetObjTOOpt("Vector" + "[" + str(i) + "]")
if variables[0] == '':
raise ValueError
compile(self.plaintext.toPlainText(), '<string>', 'exec')
self.close()
except ValueError as val_err:
popup("Your function doesn't have any input parameters!")
except SyntaxError as syn_err:
popup(str(syn_err) +"Syntax Error")
class StimuliWindow(QtWidgets.QMainWindow):
def __init__(self,parent):
super(StimuliWindow, self).__init__()
_translate = QtCore.QCoreApplication.translate
self.parent=parent
self.amplit_edit = QtWidgets.QLineEdit(self)
self.amplit_edit.setGeometry(QtCore.QRect(140, 10, 61, 22))
self.amplit_edit.setObjectName("amplit_edit")
self.amplit_edit.setValidator(self.parent.intvalidator)
self.label_amplit = QtWidgets.QLabel(self)
self.label_amplit.setGeometry(QtCore.QRect(10, 10, 141, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_amplit.setFont(font)
self.label_amplit.setObjectName("label_amplit")
self.label_amplit.setText(_translate("Neuroptimus", "Number of stimuli:"))
self.pushButton_create = QtWidgets.QPushButton(self)
self.pushButton_create.setGeometry(QtCore.QRect(250, 10, 61, 21))
self.pushButton_create.setObjectName("pushButton_create")
self.pushButton_create.setText(_translate("Neuroptimus", "Create"))
self.pushButton_create.clicked.connect(self.Set)
self.pushButton_accept = QtWidgets.QPushButton(self)
self.pushButton_accept.setGeometry(QtCore.QRect(200, 450, 61, 21))
self.pushButton_accept.setObjectName("pushButton_accept")
self.pushButton_accept.setText(_translate("Neuroptimus", "Accept"))
self.pushButton_accept.clicked.connect(self.Accept)
self.pushButton_accept.setEnabled(False)
self.option_handler=self.parent.core.option_handler
self.data_handler=self.parent.core.data_handler
self.stim_table= QtWidgets.QTableWidget(self)
self.stim_table.setGeometry(QtCore.QRect(80, 50, 150, 361))
self.stim_table.setObjectName("stim_table")
self.stim_table.setColumnCount(1)
self.stim_table.setHorizontalHeaderLabels(["Amplitude (nA)"])
self.stim_table.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.stim_table.horizontalHeader().setStretchLastSection(True)
if self.parent.container:
self.amplit_edit.setText(str(len(self.parent.container)))
self.stim_table.setRowCount(len(self.parent.container))
for idx,n in enumerate(self.parent.container):
self.stim_table.setItem(idx, 0, QTableWidgetItem(str(n)))
try:
if self.option_handler.type[-1]=="features":
self.amplit_edit.setText(str(len(self.data_handler.features_data["stim_amp"])))
self.Set(self)
except:
print("No input file found")
def Set(self, e):
try:
self.stim_table.setRowCount(int(self.amplit_edit.text()))
self.pushButton_accept.setEnabled(True)
except:
self.close()
def Accept(self, e):
self.parent.container=[]
try:
for n in range(self.stim_table.rowCount()):
self.parent.container.append(float(self.stim_table.item(n, 0).text()))
except:
print("Stimuli values are missing or incorrect")
self.close()
class BoundaryWindow(QtWidgets.QMainWindow):
def __init__(self,parent):
super(BoundaryWindow, self).__init__()
_translate = QtCore.QCoreApplication.translate
hstep = 130
vstep = 35
hoffset = 10
voffset = 15
self.option_handler=parent.core.option_handler
self.boundary_table = QtWidgets.QTableWidget(self)
self.boundary_table.setGeometry(QtCore.QRect(10, 10, 302, 361))
self.boundary_table.setObjectName("boundary_table")
self.boundary_table.setColumnCount(3)
self.boundary_table.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.boundary_table.setHorizontalHeaderLabels(("Parameters;Minimum;Maximum").split(";"))
self.boundary_table.setRowCount(len(self.option_handler.GetObjTOOpt()))
for l in range(len(self.option_handler.GetObjTOOpt())):
param=self.option_handler.GetObjTOOpt()[l].split()
if len(param)==4:
label=param[0] + " " + param[1] + " " + param[3]
else:
if param[0]!=param[-1]:
label=param[0] + " " + param[-1]
else:
label=param[-1]
item = QTableWidgetItem(label)
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.boundary_table.setItem(l, 0, item)
if len(self.option_handler.boundaries[1]) == len(self.option_handler.GetObjTOOpt()):
minitem = QTableWidgetItem(str(self.option_handler.boundaries[0][l]))
maxitem = QTableWidgetItem(str(self.option_handler.boundaries[1][l]))
self.boundary_table.setItem(l, 1, minitem)
self.boundary_table.setItem(l, 2, maxitem)
Setbutton = QtWidgets.QPushButton(self)
Setbutton.setGeometry(QtCore.QRect(10, 400, 80, 22))
Setbutton.setObjectName("Setbutton")
Setbutton.setText(_translate("Neuroptimus", "Set"))
Setbutton.clicked.connect(self.Set)
Savebutton = QtWidgets.QPushButton(self)
Savebutton.setGeometry(QtCore.QRect(100, 400, 80, 22))
Savebutton.setObjectName("Savebutton")
Savebutton.setText(_translate("Neuroptimus", "Save"))
Savebutton.clicked.connect(self.Save)
Loadbutton = QtWidgets.QPushButton(self)
Loadbutton.setGeometry(QtCore.QRect(190, 400, 80, 22))
Loadbutton.setObjectName("Savebutton")
Loadbutton.setText(_translate("Neuroptimus", "Load"))
Loadbutton.clicked.connect(self.Load)
self.save_file_name="boundaries.txt"
def Set(self, e):
try:
min_l=[]
max_l=[]
for idx in range(self.boundary_table.rowCount()):
min_l.append(float(self.boundary_table.item(idx,1).text()))
max_l.append(float(self.boundary_table.item(idx,2).text()))
self.option_handler.boundaries[0] = min_l
self.option_handler.boundaries[1] = max_l
for i in range(len(self.option_handler.boundaries[0])):
if self.option_handler.boundaries[0][i] >= self.option_handler.boundaries[1][i]:
popup("""Min boundary must be lower than max
Invalid Values""")
raise Exception
except ValueError:
popup("Invalid Value")
except Exception:
print('Error Occured')
self.close()
def Save(self,e):
try:
save_bound = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File')
if name[0]:
f = open(str(save_bound)+".txt",'w')
for idx in range(self.boundary_table.rowCount()):
f.write(str(self.boundary_table.item(idx,1).text()))
f.write("\t")
f.write(str(self.boundary_table.item(idx,2).text()))
f.write("\n")
f.close()
except IOError:
popup("Error writing the file!")
def Load(self,e):
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(None,"QFileDialog.getOpenFileName()", "","Text Files (*.txt);;All Files (*);;", options=options)
if fileName:
try:
f = open(fileName, "r")
for idx,l in enumerate(f):
bounds=l.split()
self.boundary_table.setItem(idx, 1, QTableWidgetItem(str(bounds[0])))
self.boundary_table.setItem(idx, 2, QTableWidgetItem(str(bounds[1])))
except IOError:
popup("Error reading the file!")
except Exception as e:
print("Error:"+ str(e))
class Startingpoints(QtWidgets.QMainWindow):
def __init__(self,parent,*args,**kwargs):
super(Startingpoints,self).__init__()
_translate = QtCore.QCoreApplication.translate
n_o_params=args[0]
self.parent=parent
self.container=[]
hstep = 130
vstep = 35
hoffset = 10
voffset = 15
for n in range(n_o_params):
param=parent.core.option_handler.GetObjTOOpt()[n].split()
if len(param)==4:
p_name=param[0] + " " + param[1] + " " + param[3]
else:
if param[0]!=param[-1]:
p_name=param[0] + " " + param[-1]
else:
p_name=param[-1]
#p_name=self.parent.core.option_handler.GetObjTOOpt()[n].split()[-1]
lbl = QtWidgets.QLabel(self)
lbl.setGeometry(QtCore.QRect(hoffset, voffset + n * vstep, 121, 16))
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
lbl.setFont(font)
lbl.setObjectName("ctrl")
lbl.setText(QtCore.QCoreApplication.translate("Neuroptimus", p_name))
ctrl = QtWidgets.QLineEdit(self)
ctrl.setGeometry(QtCore.QRect(hstep, voffset + n * vstep, 61, 22))
ctrl.setObjectName("ctrl")
if self.parent.seed:
ctrl.setText(str(self.parent.seed[n]))
lbl.show()
ctrl.show()
self.container.append(ctrl)
Okbutton = QtWidgets.QPushButton(self)
Okbutton.setGeometry(QtCore.QRect(10, 400, 80, 22))
Okbutton.setObjectName("Okbutton")
Okbutton.setText(_translate("Neuroptimus", "Ok"))
Okbutton.clicked.connect(self.OnOk)
Closebutton = QtWidgets.QPushButton(self)
Closebutton.setGeometry(QtCore.QRect(100, 400, 80, 22))
Closebutton.setObjectName("Closebutton")
Closebutton.setText(_translate("Neuroptimus", "Cancel"))
Closebutton.clicked.connect(self.close)
Loadpopbutton = QtWidgets.QPushButton(self)
Loadpopbutton.setGeometry(QtCore.QRect(280, 400, 80, 22))
Loadpopbutton.setObjectName("Loadpopbutton")
Loadpopbutton.setText(_translate("Neuroptimus", "Load Population"))
Loadpopbutton.clicked.connect(self.OnLoadPop)
Loadbutton = QtWidgets.QPushButton(self)
Loadbutton.setGeometry(QtCore.QRect(190, 400, 80, 22))
Loadbutton.setObjectName("Loadbutton")
Loadbutton.setText(_translate("Neuroptimus", "Load Point"))
Loadbutton.clicked.connect(self.OnLoad)
def OnOk(self,e):
try:
self.parent.seed=[]
for n in self.container:
self.parent.seed.append(float(n.text()))
self.close()
except ValueError:
popup("""You must give every parameter an initial value!
Error""")
def OnLoad(self):
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(None,"QFileDialog.getOpenFileName()", "","Text Files (*.txt);;All Files (*);;", options=options)
if fileName:
try:
f = open(fileName, "r")
for idx, l in enumerate(f):
self.container[idx].setText(str(l))
except Exception as e:
popup("Error: " + e)
def OnLoadPop(self,e):
self.size_of_pop = 0
file_path = ""
popup("This function is only supported by the algorithms from inspyred!")
text, ok = QInputDialog.getText(self, 'TLoad Population', 'Enter size of population:')
if ok:
self.size_of_pop = int(text)
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
file_path, _ = QFileDialog.getOpenFileName(None,"QFileDialog.getOpenFileName()", "","Text Files (*.txt);;All Files (*);;", options=options)
def lastlines(hugefile, n, bsize=2048):
import errno
hfile = open(hugefile, 'rU')
if not hfile.readline():
return
sep = hfile.newlines
hfile.close()
hfile = open(hugefile, 'rb')
hfile.seek(0, os.SEEK_END)
linecount = 0
pos = 0
while linecount <= n:
try:
hfile.seek(-bsize, os.SEEK_CUR)
linecount += hfile.read(bsize).count(sep)
hfile.seek(-bsize, os.SEEK_CUR)
except IOError as e:
if e.errno == errno.EINVAL:
# Attempted to seek past the start, can't go further
bsize = hfile.tell()
hfile.seek(0, os.SEEK_SET)
linecount += hfile.read(bsize).count(sep)
pos = hfile.tell()
hfile.close()
hfile = open(hugefile, 'r')
hfile.seek(pos, os.SEEK_SET) # our file position from above
for line in hfile:
# We've located n lines *or more*, so skip if needed
if linecount > n:
linecount -= 1
continue
# The rest we yield
yield line
for l in lastlines(file_path, self.size_of_pop, 1):
s=l.strip()
params = [float(x.lstrip("[").rstrip("]")) for x in s.split(", ")][3:-1]
params = params[0:len(params) / 2 + 1]
self.parent.seed.append(params)
self.close()
class gridwindow(QtWidgets.QMainWindow):
def __init__(self,parent,*args):
super(gridwindow,self).__init__()
_translate = QtCore.QCoreApplication.translate
hstep = 200
vstep = 35
hoffset = 10
voffset = 15
self.min = []
self.max = []
font = QtGui.QFont()
font.setFamily("Ubuntu")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.option_handler=parent.core.option_handler
for l in range(len(self.option_handler.GetObjTOOpt())):
lbl = QtWidgets.QLabel(self)
lbl.setGeometry(QtCore.QRect(hoffset, voffset + l * vstep, 121, 16))
lbl.setFont(font)
lbl.setObjectName("ctrl")
lbl.setText(QtCore.QCoreApplication.translate("Neuroptimus", self.option_handler.GetObjTOOpt()[l].split()[-1]))
tmp_min = QtWidgets.QLineEdit(self)
tmp_min.setGeometry(QtCore.QRect(hstep, voffset + l * vstep, 75, 30))
tmp_min.setObjectName("tmp_min")
tmp_max = QtWidgets.QLineEdit(self)
tmp_max.setGeometry(QtCore.QRect(hstep + hstep/2, voffset + l * vstep, 75, 30))
tmp_max.setObjectName("tmp_min")
lbl.show()
tmp_min.show()
self.min.append(tmp_min)
self.max.append(tmp_max)
if len(self.option_handler.boundaries[1]) == len(self.option_handler.GetObjTOOpt()):
tmp_min.setText(str(self.option_handler.boundaries[0][l]))
tmp_max.setText(str(self.option_handler.boundaries[1][l]))
self.resolution_ctrl = QtWidgets.QLineEdit(self)
self.resolution_ctrl.setGeometry(QtCore.QRect(hstep,600, 75, 30))
self.resolution_ctrl.setObjectName("ctrl")
self.resolution_ctrl.setText(str(parent.resolution))
Setbutton = QtWidgets.QPushButton(self)
Setbutton.setGeometry(QtCore.QRect(hstep, 650, 80, 22))
Setbutton.setObjectName("Okbutton")
Setbutton.setText(_translate("Neuroptimus", "Ok"))
Setbutton.clicked.connect(self.Set)
def Set(self, e):
try:
self.option_handler.boundaries[0] = [float(n.GetValue()) for n in self.min]
self.option_handler.boundaries[1] = [float(n.GetValue()) for n in self.max]
self.resolution=int(self.resolution_ctrl.text())
self.close()
except ValueError as ve:
popup("Invalid Value")
class ErrorDialog(QtWidgets.QMainWindow):
def __init__(self,parent):
super(ErrorDialog,self).__init__()
self.error_comp_table = QtWidgets.QTableWidget(self)
self.error_comp_table.setGeometry(QtCore.QRect(10, 200, 441, 261))
self.error_comp_table.setObjectName("error_comp_table")
self.error_comp_table.setColumnCount(4)
self.error_comp_table.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.error_comp_table.setHorizontalHeaderLabels(("Error Function;Value;Weight;Weighted Value").split(";"))
self.error_comp_table.setRowCount(parent.recursive_len(parent.core.error_comps))
tmp_w_sum=0
c_idx=0
for t in parent.core.error_comps:
for c in t:
#tmp_str.append( "*".join([str(c[0]),c[1].__name__]))
if parent.core.option_handler.type[-1]!="features":
self.error_comp_table.setItem(c_idx,0,QTableWidgetItem(parent.core.ffun_mapper[c[1].__name__]))
else:
self.error_comp_table.setItem(c_idx,0,QTableWidgetItem(c[1]))
self.error_comp_table.setItem(c_idx,1,QTableWidgetItem(str("{:.4f}".format(c[2]))))
self.error_comp_table.setItem(c_idx,2,QTableWidgetItem(str("{:.4f}".format(c[0]))))
self.error_comp_table.setItem(c_idx,3,QTableWidgetItem(str("{:.4f}".format(c[0]*c[2]))))
c_idx+=1
tmp_w_sum +=c[0]*c[2]
c_idx+=1
self.error_comp_table.setItem(c_idx,0,QTableWidgetItem("Weighted Sum"))
self.error_comp_table.setItem(c_idx,1,QTableWidgetItem("-"))
self.error_comp_table.setItem(c_idx,2,QTableWidgetItem("-"))
self.error_comp_table.setItem(c_idx,3,QTableWidgetItem(str(tmp_w_sum)))
tmp_w_sum=0
self.error_comp_table.setRowCount(c_idx)
def main(param=None):
if param!=None:
core=Core.coreModul()
core.option_handler.output_level=param.lstrip("-v_level=")
app = QtWidgets.QApplication(sys.argv)
Neuroptimus = QtWidgets.QMainWindow()
ui = Ui_Neuroptimus()
ui.setupUi(Neuroptimus)
Neuroptimus.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| lgpl-2.1 |
ligovirgo/seismon | RfPrediction/old/earthquake_hist.py | 2 | 2161 | import numpy as np
import matplotlib
matplotlib.use('Agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
plt.figure(1)
mu, sigma = 1.5e-06, 1.0e-02
mu2, sigma2 = 1.0e-07,1.5e-02
hist_array = np.random.normal(mu, sigma, 1000)
hist2_array = np.random.normal(mu2,sigma2,1000)
prob = np.divide(hist_array,hist2_array)
count, bins,ignored = plt.hist(hist_array,30, normed=True)
plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins - mu) ** 2 / (2 * sigma**2) ),linewidth=2,color='r',label='lockloss')
count3, bins3,ignored3 = plt.hist(hist2_array,30, normed=True)
plt.plot(bins3, 1/(sigma2 * np.sqrt(2 * np.pi)) * np.exp( - (bins3 - mu2) ** 2 / (2 * sigma2**2) ),linewidth=2,color='k',label='locked')
plt.title('Velocity Histogram(generated data)')
plt.legend(loc='best')
plt.savefig('/home/eric.coughlin/public_html/hist_test.png')
plt.figure(2)
acceleration_array = np.diff(hist_array)
acceleration2_array = np.diff(hist2_array)
prob_acceleration = np.divide(acceleration_array, acceleration2_array)
count, bins,ignored = plt.hist(acceleration_array,30, normed=True)
plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins - mu) ** 2 / (2 * sigma**2) ),linewidth=2,color='r',label='lockloss')
count, bins,ignored = plt.hist(acceleration2_array,30, normed=True)
plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins - mu) ** 2 / (2 * sigma**2) ),linewidth=2,color='k',label='locked')
plt.title('Acceleration Histogram(generated data)')
plt.legend(loc='best')
plt.savefig('/home/eric.coughlin/public_html/hist_acc_test.png')
plt.figure(3)
count, bins,ignored = plt.hist(hist_array,30, normed=True)
count2, bins2,ignored2 = plt.hist(acceleration_array,30, normed=True)
plt.plot((bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins - mu) ** 2 / (2 * sigma**2) )), (bins2, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins2 - mu) ** 2 / (2 * sigma**2) )),linewidth=2,color='k',label='acc/vel')
plt.title('acceleration versus velocity(generated data)')
plt.xlabel('Velocity')
plt.ylabel('Acceleration')
plt.legend(loc='best')
plt.savefig('/home/eric.coughlin/public_html/hist_acc_vel_test.png')
plt.figure(4)
| gpl-3.0 |
NelisVerhoef/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
h2educ/scikit-learn | sklearn/cross_validation.py | 47 | 67782 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3, shuffle=False, random_state=None):
super(LabelKFold, self).__init__(len(labels), n_folds, shuffle,
random_state)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
for i in range(self.n_folds):
yield (self.idxs == i)
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
akrherz/dep | scripts/convergence/plot_avgs.py | 2 | 3951 | """Go."""
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import pandas as pd
from pyiem.util import get_dbconn
YEAR = int(sys.argv[1])
pgconn = get_dbconn("idep")
cursor = pgconn.cursor()
# Load up HUC12s
HUC12s = []
cursor.execute(
"""
SELECT distinct huc_12 from results where scenario = 5
ORDER by huc_12"""
)
for row in cursor:
HUC12s.append(row[0])
results = []
for huc12 in ["070600060701"]: # HUC12s:
cursor.execute(
"""
SELECT hs_id, extract(year from valid) as yr,
sum(runoff) as sum_runoff,
sum(loss) as sum_loss, sum(delivery) as sum_delivery from results
WHERE scenario = 5 and valid between '%s-01-01' and '%s-01-01'
and huc_12 = '%s'
GROUP by hs_id, yr
"""
% (YEAR, YEAR + 1, huc12)
)
data = {}
print("%s %s" % (huc12, cursor.rowcount))
for row in cursor:
fpath = row[0]
if fpath < 100:
catchment = 0
else:
catchment = int(str(fpath)[:-2])
sample = int(str(fpath)[-2:])
year = row[1]
runoff = row[2]
loss = row[3]
delivery = row[4]
res = data.setdefault(
catchment, {"runoff": [], "loss": [], "delivery": []}
)
res["runoff"].append(runoff)
res["loss"].append(loss * 10.0)
res["delivery"].append(delivery * 10.0)
(fig, ax) = plt.subplots(1, 1, figsize=(8, 6))
averages = []
for i in range(10):
averages.append([])
for i in range(10):
for catchment in data:
if len(data[catchment]["loss"]) <= i:
continue
for val in data[catchment]["loss"][: i + 1]:
averages[i].append(val)
ax.grid(axis="y")
ax.text(
0.02,
0.95,
"Average",
color="b",
transform=ax.transAxes,
ha="left",
va="bottom",
bbox=dict(color="white"),
)
ax.text(
0.02,
0.9,
"Median",
color="r",
transform=ax.transAxes,
ha="left",
va="bottom",
bbox=dict(color="white"),
)
d = ax.boxplot(averages, widths=0.7)
for i, a in enumerate(averages):
ax.text(
i + 1,
np.average(a),
"%.2f" % (np.average(a),),
ha="center",
va="bottom",
color="b",
fontsize=10,
)
ax.text(
i + 1,
np.median(a) + 0.05,
"%.2f" % (np.median(a),),
ha="center",
va="bottom",
color="r",
fontsize=10,
)
ax.set_title(
"Convergence Study: %s %s Detachment Estimates" % (huc12, YEAR)
)
ax.set_ylabel("Soil Detachment (Mg/ha)")
ax.set_xlabel("Sub-catchment Sample Size, T-test based on 10 sample avg")
box = ax.get_position()
ax.set_position(
[box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]
)
labels = ["#\nT\nP"]
for i in range(10):
x = stats.ttest_1samp(averages[i], np.average(averages[-1]))
labels.append("%s\n%.3f\n%.3f" % (i + 1, x[0], x[1]))
results.append(
dict(
huc12=huc12,
one=np.average(averages[0]),
ten=np.average(averages[-1]),
)
)
ax.set_xticks(range(11))
ax.set_xticklabels(labels)
ax.set_ylim(bottom=-3)
fig.savefig("%s_%s.pdf" % (huc12, YEAR), dpi=600)
plt.close()
# df = pd.DataFrame(results)
# df.to_csv('results.csv')
df = pd.read_csv("results.csv")
(fig, ax) = plt.subplots(1, 1)
ax.scatter(df["one"].values * 10.0, df["ten"].values * 10.0)
ax.plot([0, 120], [0, 120], lw=2, color="k")
ax.set_xlabel("Soil Detachment with 1 Sample (Mg/ha)")
ax.set_ylabel("Soil Detachment with 10 Samples (Mg/ha)")
ax.grid(True)
ax.set_title("DEP 30 HUC12 Convergence Test for 2014")
fig.savefig("Figure6.pdf", dpi=600)
| mit |
scipy/scipy | scipy/special/_precompute/wright_bessel.py | 12 | 12928 | """Precompute coefficients of several series expansions
of Wright's generalized Bessel function Phi(a, b, x).
See https://dlmf.nist.gov/10.46.E1 with rho=a, beta=b, z=x.
"""
from argparse import ArgumentParser, RawTextHelpFormatter
import numpy as np
from scipy.integrate import quad
from scipy.optimize import minimize_scalar, curve_fit
from time import time
try:
import sympy # type: ignore[import]
from sympy import EulerGamma, Rational, S, Sum, \
factorial, gamma, gammasimp, pi, polygamma, symbols, zeta
from sympy.polys.polyfuncs import horner # type: ignore[import]
except ImportError:
pass
def series_small_a():
"""Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.
"""
order = 5
a, b, x, k = symbols("a b x k")
A = [] # terms with a
X = [] # terms with x
B = [] # terms with b (polygammas)
# Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i])
expression = Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
expression = gamma(b)/sympy.exp(x) * expression
# nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
for n in range(0, order+1):
term = expression.diff(a, n).subs(a, 0).simplify().doit()
# set the whole bracket involving polygammas to 1
x_part = (term.subs(polygamma(0, b), 1)
.replace(polygamma, lambda *args: 0))
# sign convetion: x part always positive
x_part *= (-1)**n
A.append(a**n/factorial(n))
X.append(horner(x_part))
B.append(horner((term/x_part).simplify()))
s = "Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.\n"
s += "Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i], i=0..5)\n"
for name, c in zip(['A', 'X', 'B'], [A, X, B]):
for i in range(len(c)):
s += f"\n{name}[{i}] = " + str(c[i])
return s
# expansion of digamma
def dg_series(z, n):
"""Symbolic expansion of digamma(z) in z=0 to order n.
See https://dlmf.nist.gov/5.7.E4 and with https://dlmf.nist.gov/5.5.E2
"""
k = symbols("k")
return -1/z - EulerGamma + \
sympy.summation((-1)**k * zeta(k) * z**(k-1), (k, 2, n+1))
def pg_series(k, z, n):
"""Symbolic expansion of polygamma(k, z) in z=0 to order n."""
return sympy.diff(dg_series(z, n+k), z, k)
def series_small_a_small_b():
"""Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5.
Be aware of cancellation of poles in b=0 of digamma(b)/Gamma(b) and
polygamma functions.
digamma(b)/Gamma(b) = -1 - 2*M_EG*b + O(b^2)
digamma(b)^2/Gamma(b) = 1/b + 3*M_EG + b*(-5/12*PI^2+7/2*M_EG^2) + O(b^2)
polygamma(1, b)/Gamma(b) = 1/b + M_EG + b*(1/12*PI^2 + 1/2*M_EG^2) + O(b^2)
and so on.
"""
order = 5
a, b, x, k = symbols("a b x k")
M_PI, M_EG, M_Z3 = symbols("M_PI M_EG M_Z3")
c_subs = {pi: M_PI, EulerGamma: M_EG, zeta(3): M_Z3}
A = [] # terms with a
X = [] # terms with x
B = [] # terms with b (polygammas expanded)
C = [] # terms that generate B
# Phi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i])
# B[0] = 1
# B[k] = sum(C[k] * b**k/k!, k=0..)
# Note: C[k] can be obtained from a series expansion of 1/gamma(b).
expression = gamma(b)/sympy.exp(x) * \
Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
# nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
for n in range(0, order+1):
term = expression.diff(a, n).subs(a, 0).simplify().doit()
# set the whole bracket involving polygammas to 1
x_part = (term.subs(polygamma(0, b), 1)
.replace(polygamma, lambda *args: 0))
# sign convetion: x part always positive
x_part *= (-1)**n
# expansion of polygamma part with 1/gamma(b)
pg_part = term/x_part/gamma(b)
if n >= 1:
# Note: highest term is digamma^n
pg_part = pg_part.replace(polygamma,
lambda k, x: pg_series(k, x, order+1+n))
pg_part = (pg_part.series(b, 0, n=order+1-n)
.removeO()
.subs(polygamma(2, 1), -2*zeta(3))
.simplify()
)
A.append(a**n/factorial(n))
X.append(horner(x_part))
B.append(pg_part)
# Calculate C and put in the k!
C = sympy.Poly(B[1].subs(c_subs), b).coeffs()
C.reverse()
for i in range(len(C)):
C[i] = (C[i] * factorial(i)).simplify()
s = "Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5."
s += "\nPhi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i], i=0..5)\n"
s += "B[0] = 1\n"
s += "B[i] = sum(C[k+i-1] * b**k/k!, k=0..)\n"
s += "\nM_PI = pi"
s += "\nM_EG = EulerGamma"
s += "\nM_Z3 = zeta(3)"
for name, c in zip(['A', 'X'], [A, X]):
for i in range(len(c)):
s += f"\n{name}[{i}] = "
s += str(c[i])
# For C, do also compute the values numerically
for i in range(len(C)):
s += f"\n# C[{i}] = "
s += str(C[i])
s += f"\nC[{i}] = "
s += str(C[i].subs({M_EG: EulerGamma, M_PI: pi, M_Z3: zeta(3)})
.evalf(17))
# Does B have the assumed structure?
s += "\n\nTest if B[i] does have the assumed structure."
s += "\nC[i] are derived from B[1] allone."
s += "\nTest B[2] == C[1] + b*C[2] + b^2/2*C[3] + b^3/6*C[4] + .."
test = sum([b**k/factorial(k) * C[k+1] for k in range(order-1)])
test = (test - B[2].subs(c_subs)).simplify()
s += f"\ntest successful = {test==S(0)}"
s += "\nTest B[3] == C[2] + b*C[3] + b^2/2*C[4] + .."
test = sum([b**k/factorial(k) * C[k+2] for k in range(order-2)])
test = (test - B[3].subs(c_subs)).simplify()
s += f"\ntest successful = {test==S(0)}"
return s
def asymptotic_series():
"""Asymptotic expansion for large x.
Phi(a, b, x) ~ Z^(1/2-b) * exp((1+a)/a * Z) * sum_k (-1)^k * C_k / Z^k
Z = (a*x)^(1/(1+a))
Wright (1935) lists the coefficients C_0 and C_1 (he calls them a_0 and
a_1). With slightly different notation, Paris (2017) lists coefficients
c_k up to order k=3.
Paris (2017) uses ZP = (1+a)/a * Z (ZP = Z of Paris) and
C_k = C_0 * (-a/(1+a))^k * c_k
"""
order = 8
class g(sympy.Function):
"""Helper function g according to Wright (1935)
g(n, rho, v) = (1 + (rho+2)/3 * v + (rho+2)*(rho+3)/(2*3) * v^2 + ...)
Note: Wright (1935) uses square root of above definition.
"""
nargs = 3
@classmethod
def eval(cls, n, rho, v):
if not n >= 0:
raise ValueError("must have n >= 0")
elif n == 0:
return 1
else:
return g(n-1, rho, v) \
+ gammasimp(gamma(rho+2+n)/gamma(rho+2)) \
/ gammasimp(gamma(3+n)/gamma(3))*v**n
class coef_C(sympy.Function):
"""Calculate coefficients C_m for integer m.
C_m is the coefficient of v^(2*m) in the Taylor expansion in v=0 of
Gamma(m+1/2)/(2*pi) * (2/(rho+1))^(m+1/2) * (1-v)^(-b)
* g(rho, v)^(-m-1/2)
"""
nargs = 3
@classmethod
def eval(cls, m, rho, beta):
if not m >= 0:
raise ValueError("must have m >= 0")
v = symbols("v")
expression = (1-v)**(-beta) * g(2*m, rho, v)**(-m-Rational(1, 2))
res = expression.diff(v, 2*m).subs(v, 0) / factorial(2*m)
res = res * (gamma(m + Rational(1, 2)) / (2*pi)
* (2/(rho+1))**(m + Rational(1, 2)))
return res
# in order to have nice ordering/sorting of expressions, we set a = xa.
xa, b, xap1 = symbols("xa b xap1")
C0 = coef_C(0, xa, b)
# a1 = a(1, rho, beta)
s = "Asymptotic expansion for large x\n"
s += "Phi(a, b, x) = Z**(1/2-b) * exp((1+a)/a * Z) \n"
s += " * sum((-1)**k * C[k]/Z**k, k=0..6)\n\n"
s += "Z = pow(a * x, 1/(1+a))\n"
s += "A[k] = pow(a, k)\n"
s += "B[k] = pow(b, k)\n"
s += "Ap1[k] = pow(1+a, k)\n\n"
s += "C[0] = 1./sqrt(2. * M_PI * Ap1[1])\n"
for i in range(1, order+1):
expr = (coef_C(i, xa, b) / (C0/(1+xa)**i)).simplify()
factor = [x.denominator() for x in sympy.Poly(expr).coeffs()]
factor = sympy.lcm(factor)
expr = (expr * factor).simplify().collect(b, sympy.factor)
expr = expr.xreplace({xa+1: xap1})
s += f"C[{i}] = C[0] / ({factor} * Ap1[{i}])\n"
s += f"C[{i}] *= {str(expr)}\n\n"
import re
re_a = re.compile(r'xa\*\*(\d+)')
s = re_a.sub(r'A[\1]', s)
re_b = re.compile(r'b\*\*(\d+)')
s = re_b.sub(r'B[\1]', s)
s = s.replace('xap1', 'Ap1[1]')
s = s.replace('xa', 'a')
# max integer = 2^31-1 = 2,147,483,647. Solution: Put a point after 10
# or more digits.
re_digits = re.compile(r'(\d{10,})')
s = re_digits.sub(r'\1.', s)
return s
def optimal_epsilon_integral():
"""Fit optimal choice of epsilon for integral representation.
The integrand of
int_0^pi P(eps, a, b, x, phi) * dphi
can exhibit oscillatory behaviour. It stems from the cosine of P and can be
minimized by minimizing the arc length of the argument
f(phi) = eps * sin(phi) - x * eps^(-a) * sin(a * phi) + (1 - b) * phi
of cos(f(phi)).
We minimize the arc length in eps for a grid of values (a, b, x) and fit a
parametric function to it.
"""
def fp(eps, a, b, x, phi):
"""Derivative of f w.r.t. phi."""
eps_a = np.power(1. * eps, -a)
return eps * np.cos(phi) - a * x * eps_a * np.cos(a * phi) + 1 - b
def arclength(eps, a, b, x, epsrel=1e-2, limit=100):
"""Compute Arc length of f.
Note that the arg length of a function f fro t0 to t1 is given by
int_t0^t1 sqrt(1 + f'(t)^2) dt
"""
return quad(lambda phi: np.sqrt(1 + fp(eps, a, b, x, phi)**2),
0, np.pi,
epsrel=epsrel, limit=100)[0]
# grid of minimal arc length values
data_a = [1e-3, 0.1, 0.5, 0.9, 1, 2, 4, 5, 6, 8]
data_b = [0, 1, 4, 7, 10]
data_x = [1, 1.5, 2, 4, 10, 20, 50, 100, 200, 500, 1e3, 5e3, 1e4]
data_a, data_b, data_x = np.meshgrid(data_a, data_b, data_x)
data_a, data_b, data_x = (data_a.flatten(), data_b.flatten(),
data_x.flatten())
best_eps = []
for i in range(data_x.size):
best_eps.append(
minimize_scalar(lambda eps: arclength(eps, data_a[i], data_b[i],
data_x[i]),
bounds=(1e-3, 1000),
method='Bounded', options={'xatol': 1e-3}).x
)
best_eps = np.array(best_eps)
# pandas would be nice, but here a dictionary is enough
df = {'a': data_a,
'b': data_b,
'x': data_x,
'eps': best_eps,
}
def func(data, A0, A1, A2, A3, A4, A5):
"""Compute parametric function to fit."""
a = data['a']
b = data['b']
x = data['x']
return (A0 * b * np.exp(-0.5 * a)
+ np.exp(A1 + 1 / (1 + a) * np.log(x) - A2 * np.exp(-A3 * a)
+ A4 / (1 + np.exp(A5 * a))))
func_params = list(curve_fit(func, df, df['eps'], method='trf')[0])
s = "Fit optimal eps for integrand P via minimal arc length\n"
s += "with parametric function:\n"
s += "optimal_eps = (A0 * b * exp(-a/2) + exp(A1 + 1 / (1 + a) * log(x)\n"
s += " - A2 * exp(-A3 * a) + A4 / (1 + exp(A5 * a)))\n\n"
s += "Fitted parameters A0 to A5 are:\n"
s += ', '.join(['{:.5g}'.format(x) for x in func_params])
return s
def main():
t0 = time()
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument('action', type=int, choices=[1, 2, 3, 4],
help='chose what expansion to precompute\n'
'1 : Series for small a\n'
'2 : Series for small a and small b\n'
'3 : Asymptotic series for large x\n'
' This may take some time (>4h).\n'
'4 : Fit optimal eps for integral representation.'
)
args = parser.parse_args()
switch = {1: lambda: print(series_small_a()),
2: lambda: print(series_small_a_small_b()),
3: lambda: print(asymptotic_series()),
4: lambda: print(optimal_epsilon_integral())
}
switch.get(args.action, lambda: print("Invalid input."))()
print("\n{:.1f} minutes elapsed.\n".format((time() - t0)/60))
if __name__ == '__main__':
main()
| bsd-3-clause |
PredictiveScienceLab/GPy | doc/conf.py | 8 | 13843 | # -*- coding: utf-8 -*-
#
# GPy documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 18 15:30:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
#autodoc_default_flags = ['members', 'show-inheritance', 'private-members', 'special-members']
#autodoc_default_flags = ['private-members', 'special-members']
#autodoc_default_flags = 'private-members'
#autodoc_member_order = "source"
#def autodoc_skip_member(app, what, name, obj, skip, options):
#exclusions = ('__weakref__', # special-members
#'__doc__', '__module__', '__dict__', # undoc-members
#)
#exclude = name in exclusions
#inclusions = ('_src')
#include = name in inclusions
#if include:
#print app, what, name, obj, skip, options
#return False
#return skip or exclude
#def setup(app):
##app.connect('autodoc-process-docstring', cut_lines(2))
##app.connect('autodoc_default_flags', autodoc_default_flags)
##app.connect('autodoc_member_order', autodoc_member_order)
#app.connect('autodoc-skip-member', autodoc_skip_member)
import sys
import os
print "python exec:", sys.executable
print "sys.path:", sys.path
try:
import numpy
print "numpy: %s, %s" % (numpy.__version__, numpy.__file__)
except ImportError:
print "no numpy"
try:
import matplotlib
print "matplotlib: %s, %s" % (matplotlib.__version__, matplotlib.__file__)
except ImportError:
print "no matplotlib"
try:
import ipython
print "ipython: %s, %s" % (ipython.__version__, ipython.__file__)
except ImportError:
print "no ipython"
try:
import sphinx
print "sphinx: %s, %s" % (sphinx.__version__, sphinx.__file__)
except ImportError:
print "no sphinx"
print "sys.path:", sys.path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('../GPy'))
#print "sys.path.after:", sys.path
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('./sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
print "Importing extensions"
extensions = ['sphinx.ext.autodoc',
#'sphinx.ext.doctest'
'sphinx.ext.viewcode',
#'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'ipython_directive',
'ipython_console_highlighting'
#'matplotlib.sphinxext.plot_directive'
]
plot_formats = [('png', 80), ('pdf', 50)]
#pngmath_latex_preamble=r'\usepackage[active]{preview}\usepackage{MnSymbol}' # + other custom stuff for inline math, such as non-default math fonts etc.
#pngmath_use_preview=True
print "finished importing"
##############################################################################
##
## Mock out imports with C dependencies because ReadTheDocs can't build them.
#############################################################################
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
#import mock
print "Mocking"
MOCK_MODULES = ['sympy',
'sympy.utilities', 'sympy.utilities.codegen', 'sympy.core.cache',
'sympy.core', 'sympy.parsing', 'sympy.parsing.sympy_parser', 'Tango', 'numdifftools'
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# ----------------------- READTHEDOCS ------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
#on_rtd = True
if on_rtd:
sys.path.append(os.path.abspath('../GPy'))
import subprocess
proc = subprocess.Popen("pwd", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
proc = subprocess.Popen("ls ../", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
#Lets regenerate our rst files from the source, -P adds private modules (i.e kern._src)
proc = subprocess.Popen("sphinx-apidoc -P -f -o . ../GPy", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
#proc = subprocess.Popen("whereis numpy", stdout=subprocess.PIPE, shell=True)
#(out, err) = proc.communicate()
#print "program output:", out
#proc = subprocess.Popen("whereis matplotlib", stdout=subprocess.PIPE, shell=True)
#(out, err) = proc.communicate()
#print "program output:", out
print "Compiled files"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GPy'
copyright = u'2013, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# This is to revert to the default theme on readthedocs
html_style = '/alabaster.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '\\usepackage{MnSymbol,amsmath}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GPy.tex', u'GPy Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gpy', u'GPy Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GPy', u'GPy Documentation',
u'Author', 'GPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'GPy'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2013, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| bsd-3-clause |
sergpolly/Thermal_adapt_scripts | ArchNew/BOOTSTRAP/Equal_Cherry_extract_analyse_CAI_Rnd.py | 1 | 7490 | import re
import os
import sys
from Bio import Seq
from Bio import SeqIO
from Bio import SeqUtils
import numpy as np
import pandas as pd
import cairi
from multiprocessing import Pool
# import matplotlib.pyplot as plt
#MAYBE WE DONT NEED THAT ANYMORE ...
# # STUPID FIX TO AVOID OLDER PANDAS HERE ...
# # PYTHONPATH seems to be ignored by th ipython ...
# sys.path.insert(1,"/home/venevs/.local/lib/python2.7/site-packages/")
# # this is needed just to avoid BUG in pandas (float indexing related: https://github.com/pydata/pandas/issues/5824)
# # when tsking quantile(q=0.75) ...
# import scipy.stats as stat
RIBO_LIMIT = 24
# path = os.path.join(os.path.expanduser('~'),'GENOMES_BACTER_RELEASE69/genbank')
path = "."
dat = pd.read_csv(os.path.join(path,"complete_arch_CDS_Rnd_Equal.dat"))
# plot_path = os.path.join(os.path.expanduser('~'),'GENOMES_BACTER_RELEASE69/plots')
plot_path = path
# first: identify ribosomal proteins ...
# here is our heuristic way to check if it's a ribosomal protein or not, given corresponding gene's product description ...
ribo = re.compile("ribosomal.+protein",re.I)
ribo_check = lambda line: bool(ribo.search(line)) if not('transferase' in line) else False
dat['ribosomal'] = dat['product'].apply(ribo_check)
# based on these identified proteins, then calculate CAI ....
# group the data by the assembly_accession ...
orgs = dat.groupby('assembly_accession')
genom_id = orgs.groups.keys()
ribo_counts = [(idx,orgs.get_group(idx)['ribosomal'].nonzero()[0].size) for idx in genom_id]
ribo_cai_info = pd.DataFrame(ribo_counts,columns=['assembly_accession','ribo_count'])
# some lists to describe organism's CAI distribution features ...
percentile = []
median = []
mean = []
sigma = []
idx_for_ribo = []
ribo_count_for_df = []
#
pid_cai_list = []
for idx,ribo_count in ribo_cai_info.itertuples(index=False):
if ribo_count >= RIBO_LIMIT:
cds_dat = orgs.get_group(idx)
ribo_cds = cds_dat[cds_dat['ribosomal']]['cDNA_rnd'] # cDNA_rnd of ribosomal proteins ...
codon_usage = cairi.count_codons(ribo_cds)
codon_index = cairi.generate_codon_index(codon_usage,genetic_table=list(cds_dat['table'])[0]) # fix that ...
# we need to track index from 'dat', as there are some stupid duplications ...
pid_cai = pd.DataFrame(((dat_idx,pid,cairi.cai_for_gene(sequence,codon_index)) for dat_idx,pid,sequence in cds_dat[['pid','cDNA_rnd']].itertuples()),columns=['dat_idx','pid','CAI'])
pid_cai = pid_cai.set_index(keys='dat_idx')
# characterize CAI distribution for a given organism ...
local_mean = pid_cai['CAI'].mean()
local_median = pid_cai['CAI'].median()
local_sigma = pid_cai['CAI'].std()
mean.append(local_mean)
median.append(local_median)
sigma.append(local_sigma)
idx_for_ribo.append(idx)
ribo_count_for_df.append(ribo_count)
#
local_ribo_indexes = cds_dat['ribosomal'].nonzero()[0]
local_ribo = pid_cai.iloc[local_ribo_indexes].reset_index(drop=True)
# let's also check our t.o. score
qH_all = pid_cai['CAI'].quantile(q=0.75)
qL_rib = local_ribo['CAI'].quantile(q=0.25)
percentile.append( bool(qL_rib >= qH_all) )
#
# OPTIONAL HISTOGRAM PLOTTING ...
# # # let's also plot histograms ...
# # plt.clf()
# # plt.hist(pid_cai['CAI'],range=(0,1),bins=100,color='blue',alpha=1.0)
# # plt.hist(local_ribo['CAI'],range=(0,1),bins=25,color='red',alpha=0.8)
# # plt.title("%s, CAI median: %.2f, CoV %.3f, t.o. %s"%(idx,local_median,local_sigma/local_mean,str(qL_rib >= qH_all)))
# # plt.savefig(os.path.join(plot_path,idx+".pdf"))
#
pid_cai_list.append( pid_cai )
# ttt = ["30S ribosomal subunit protein S9", "ribosomal-protein-alanine acetyltransferase", "Ribosomal protein L33", "ribosomal subunit interface protein", "Ribosomal protein S10", "ribosomal 5S rRNA E-loop binding protein Ctc/L25/TL5", "ribosomal-protein-alanine acetyltransferase", "16S ribosomal RNA methyltransferase KsgA/Dim1 family protein", "30S ribosomal proteinS16", "Acetyltransferases including N-acetylases of ribosomal proteins"]
org_cai_descr = {"assembly_accession":idx_for_ribo,"ribo_count":ribo_count_for_df,"TrOp":percentile,"median_cai":median,"mean_cai":mean,"sigma_cai":sigma}
org_cai_df = pd.DataFrame(org_cai_descr)
pid_cai_df = pd.concat(pid_cai_list)
#
# # before any mergings ...
# ###########################################
# # MERGE BY THE INDEX .... TO BE CONTINUED ...
# ###########################################
# # 1) merging
# yyy = dat.join(pid_cai_df,lsuffix='',rsuffix='_wnans')#
# # 2) merging orther way ...
# xxx = pd.concat([dat,pid_cai_df],axis=1)
# #
# indexes = (xxx.CAI != yyy.CAI).nonzero()[0]
# # beware (np.nan==np.nan) is False ...
# # so there are ~1200 indexes ...
# # TO BE CONTINUED ...
# # merging is done, outputtting and that's it ...
dat_with_cai = dat.join(pid_cai_df,lsuffix='',rsuffix='_wnans')
# then simple check ...
# all instances, where (pid != pid_wnans) must be NULL ...
if dat_with_cai.pid_wnans[dat_with_cai.pid!=dat_with_cai.pid_wnans].isnull().all():
pass
else:
print "ACHTUNG!!! All pid_wnans items whose (pid_wnans!=pid), must be NULL. Check"
########### let's try joining the 'org_cai_df' to the dat_with_cai as well, so that we'd be able to easily grab Trans.Optimized
########### organisms ...
dat_with_cai_trop = pd.merge(dat_with_cai, org_cai_df, how='left', on='assembly_accession')
# apparently 'join' is a legacy procedure, so using 'merge' is encouraged instead!
# http://stackoverflow.com/questions/10114399/pandas-simple-join-not-working
# output CDS info with the calculated CAI ...
dat_with_cai_trop[['assembly_accession','cDNA_rnd','fid','pid','product','protein','status','table','ribosomal','CAI','TrOp']].to_csv(os.path.join(path,"complete_arch_CDS_CAI_DNA_Rnd_Equal.dat"),index=False)
# ['assembly_accession', 'cDNA_rnd', 'fid', 'pid', 'product', 'protein', 'status', 'table', 'ribosomal', 'pid_wnans', 'CAI']
# ['assembly_accession', 'cDNA_rnd', 'fid', 'pid', 'product', 'protein', 'status', 'table', 'ribosomal', 'CAI']
# #
# # some characterization plotting ...
# plt.clf()
# org_cai_trop = org_cai_df[org_cai_df["TrOp"]]
# org_cai_notrop = org_cai_df[~org_cai_df["TrOp"]]
# trop_dots = plt.plot(org_cai_trop.median_cai,np.true_divide(org_cai_trop.sigma_cai,org_cai_trop.mean_cai),'ro',label='translational optimization')
# notrop_dots = plt.plot(org_cai_notrop.median_cai,np.true_divide(org_cai_notrop.sigma_cai,org_cai_notrop.mean_cai),'bo',alpha=0.8,label='No translational optimization')
# ax = plt.gca()
# ax.set_title("Organism level CAI: t.o. criteria comparison (Margalit vs ours)")
# ax.set_xlabel("median CAI")
# ax.set_ylabel("CAI coefficient of variation") # using plain sigma works worse ...
# ax.legend(loc='best')
# plt.savefig(os.path.join(path,"org_cai_to.pdf"))
# #
# #
# plt.clf()
# size_dot = lambda x: 10 if 50<x<60 else 120
# plt.scatter(x=org_cai_df.median_cai,y=np.true_divide(org_cai_df.sigma_cai,org_cai_df.mean_cai),s=org_cai_df.ribo_count.apply(size_dot),c="blue",edgecolor=None)
# ax = plt.gca()
# ax.set_title("Organism level CAI: effect of # ribosomal proteins (no effect)")
# ax.set_xlabel("median CAI")
# ax.set_ylabel("CAI coefficient of variation") # using plain sigma works worse ...
# plt.savefig(os.path.join(path,"org_cai_ribonum.pdf"))
| mit |
plotly/python-api | packages/python/plotly/plotly/graph_objs/histogram/marker/_colorbar.py | 1 | 69712 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram.marker"
_path_str = "histogram.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.histogram.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.histogram.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.histogram.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.histogram.mark
er.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
histogram.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.histogram.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.histogram.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use histogram.marker.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use histogram.marker.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Note that the title's location used
to be set by the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.histogram.marke
r.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.histog
ram.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
histogram.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.histogram.marker.colorbar.
Title` instance or dict with compatible properties
titlefont
Deprecated: Please use
histogram.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
histogram.marker.colorbar.title.side instead.
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.histogram.marke
r.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.histog
ram.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
histogram.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.histogram.marker.colorbar.
Title` instance or dict with compatible properties
titlefont
Deprecated: Please use
histogram.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
histogram.marker.colorbar.title.side instead.
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.marker.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
sinhrks/scikit-learn | examples/ensemble/plot_isolation_forest.py | 65 | 2363 | """
==========================================
IsolationForest example
==========================================
An example using IsolationForest for anomaly detection.
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a measure
of abnormality and our decision function.
Random partitioning produces noticeable shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path lengths
for particular samples, they are highly likely to be anomalies.
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
rng = np.random.RandomState(42)
# Generate train data
X = 0.3 * rng.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rng.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
# plot the line, the samples, and the nearest vectors to the plane
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("IsolationForest")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([b1, b2, c],
["training observations",
"new regular observations", "new abnormal observations"],
loc="upper left")
plt.show()
| bsd-3-clause |
chrisdjscott/Atoman | doc/source/conf.py | 1 | 9745 | from __future__ import absolute_import
from __future__ import unicode_literals
# -*- coding: utf-8 -*-
#
# Atoman documentation build configuration file, created by
# sphinx-quickstart2 on Thu Nov 19 19:05:02 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
import shlex
import atoman
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Atoman'
copyright = '2016, Chris Scott'
author = 'Chris Scott'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = atoman.__version__
# The full version, including alpha/beta/rc tags.
release = atoman.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Atomandoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Atoman.tex', 'Atoman Documentation',
'Chris Scott', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'atoman', 'Atoman Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Atoman', 'Atoman Documentation',
author, 'Atoman', 'Analysis and visualisation of atomistic simulations.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# matplotlib version
import matplotlib
rst_epilog = ".. |mpl_version| replace:: %s" % matplotlib.__version__
| mit |
RayMick/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
evgchz/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
mrcslws/htmresearch | tests/frameworks/layers/physical_objects_test.py | 9 | 4987 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
import matplotlib.pyplot as plt
from htmresearch.frameworks.layers.physical_objects import (
Sphere, Cylinder, Box, Cube
)
@unittest.skip("needs work to get these running")
class PhysicalObjectsTest(unittest.TestCase):
"""Unit tests for physical objects."""
def testInitParams(self):
"""Simple construction test."""
sphere = Sphere(radius=5, dimension=6)
cylinder = Cylinder(height=50, radius=100, epsilon=5)
box = Box(dimensions=[1, 2, 3, 4], dimension=4)
cube = Cube(width=10, dimension=2)
self.assertEqual(sphere.radius, 5)
self.assertEqual(sphere.dimension, 6)
self.assertEqual(sphere.epsilon, sphere.DEFAULT_EPSILON)
self.assertEqual(cylinder.radius, 100)
self.assertEqual(cylinder.height, 50)
self.assertEqual(cylinder.dimension, 3)
self.assertEqual(cylinder.epsilon, 5)
self.assertEqual(box.dimensions, [1, 2, 3, 4])
self.assertEqual(box.dimension, 4)
self.assertEqual(box.epsilon, box.DEFAULT_EPSILON)
self.assertEqual(cube.dimensions, [10, 10])
self.assertEqual(cube.width, 10)
self.assertEqual(cube.dimension, 2)
self.assertEqual(sphere.epsilon, cube.DEFAULT_EPSILON)
def testSampleContains(self):
"""Samples points from the objects and test contains."""
sphere = Sphere(radius=20, dimension=6)
cylinder = Cylinder(height=50, radius=100, epsilon=2)
box = Box(dimensions=[10, 20, 30, 40], dimension=4)
cube = Cube(width=20, dimension=2)
for i in xrange(50):
self.assertTrue(sphere.contains(sphere.sampleLocation()))
self.assertTrue(cylinder.contains(cylinder.sampleLocation()))
self.assertTrue(box.contains(box.sampleLocation()))
self.assertTrue(cube.contains(cube.sampleLocation()))
# inside
self.assertFalse(sphere.contains([1] * sphere.dimension))
self.assertFalse(cube.contains([1] * cube.dimension))
self.assertFalse(cylinder.contains([1] * cylinder.dimension))
self.assertFalse(box.contains([1] * box.dimension))
# outside
self.assertFalse(sphere.contains([100] * sphere.dimension))
self.assertFalse(cube.contains([100] * cube.dimension))
self.assertFalse(cylinder.contains([100] * cylinder.dimension))
self.assertFalse(box.contains([100] * box.dimension))
def testPlotSampleLocations(self):
"""Samples points from objects and plots them in a 3D scatter."""
objects = []
objects.append(Sphere(radius=20, dimension=3))
objects.append(Cylinder(height=50, radius=100, epsilon=2))
objects.append(Box(dimensions=[10, 20, 30], dimension=3))
objects.append(Cube(width=20, dimension=3))
numPoints = 500
for i in xrange(4):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for _ in xrange(numPoints):
x, y, z = tuple(objects[i].sampleLocation())
ax.scatter(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.title("Sampled points from {}".format(objects[i]))
plt.savefig("object{}.png".format(str(i)))
plt.close()
def testPlotSampleFeatures(self):
"""Samples points from objects and plots them in a 3D scatter."""
objects = []
objects.append(Sphere(radius=20, dimension=3))
objects.append(Cylinder(height=50, radius=100, epsilon=2))
objects.append(Box(dimensions=[10, 20, 30], dimension=3))
objects.append(Cube(width=20, dimension=3))
numPoints = 500
for i in xrange(4):
for feature in objects[i].features:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for _ in xrange(numPoints):
x, y, z = tuple(objects[i].sampleLocationFromFeature(feature))
ax.scatter(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.title("Sampled points on {} from {}".format(feature, objects[i]))
plt.savefig("object_{}_{}.png".format(str(i), feature))
plt.close()
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
anderspitman/scikit-bio | skbio/stats/distance/_anosim.py | 8 | 8598 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from functools import partial
import numpy as np
from scipy.stats import rankdata
from ._base import (_preprocess_input, _run_monte_carlo_stats, _build_results)
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def anosim(distance_matrix, grouping, column=None, permutations=999):
"""Test for significant differences between groups using ANOSIM.
Analysis of Similarities (ANOSIM) is a non-parametric method that tests
whether two or more groups of objects (e.g., samples) are significantly
different based on a categorical factor. The ranks of the distances in the
distance matrix are used to calculate an R statistic, which ranges between
-1 (anti-grouping) to +1 (strong grouping), with an R value of 0 indicating
random grouping.
Statistical significance is assessed via a permutation test. The assignment
of objects to groups (`grouping`) is randomly permuted a number of times
(controlled via `permutations`). An R statistic is computed for each
permutation and the p-value is the proportion of permuted R statisics that
are equal to or greater than the original (unpermuted) R statistic.
Parameters
----------
distance_matrix : DistanceMatrix
Distance matrix containing distances between objects (e.g., distances
between samples of microbial communities).
grouping : 1-D array_like or pandas.DataFrame
Vector indicating the assignment of objects to groups. For example,
these could be strings or integers denoting which group an object
belongs to. If `grouping` is 1-D ``array_like``, it must be the same
length and in the same order as the objects in `distance_matrix`. If
`grouping` is a ``DataFrame``, the column specified by `column` will be
used as the grouping vector. The ``DataFrame`` must be indexed by the
IDs in `distance_matrix` (i.e., the row labels must be distance matrix
IDs), but the order of IDs between `distance_matrix` and the
``DataFrame`` need not be the same. All IDs in the distance matrix must
be present in the ``DataFrame``. Extra IDs in the ``DataFrame`` are
allowed (they are ignored in the calculations).
column : str, optional
Column name to use as the grouping vector if `grouping` is a
``DataFrame``. Must be provided if `grouping` is a ``DataFrame``.
Cannot be provided if `grouping` is 1-D ``array_like``.
permutations : int, optional
Number of permutations to use when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and the p-value
will be ``np.nan``.
Returns
-------
pandas.Series
Results of the statistical test, including ``test statistic`` and
``p-value``.
See Also
--------
permanova
Notes
-----
See [1]_ for the original method reference. The general algorithm and
interface are similar to ``vegan::anosim``, available in R's vegan package
[2]_.
The p-value will be ``np.nan`` if `permutations` is zero.
References
----------
.. [1] Clarke, KR. "Non-parametric multivariate analyses of changes in
community structure." Australian journal of ecology 18.1 (1993):
117-143.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
Examples
--------
Load a 4x4 distance matrix and grouping vector denoting 2 groups of
objects:
>>> from skbio import DistanceMatrix
>>> dm = DistanceMatrix([[0, 1, 1, 4],
... [1, 0, 3, 2],
... [1, 3, 0, 3],
... [4, 2, 3, 0]],
... ['s1', 's2', 's3', 's4'])
>>> grouping = ['Group1', 'Group1', 'Group2', 'Group2']
Run ANOSIM using 99 permutations to calculate the p-value:
>>> import numpy as np
>>> # make output deterministic; not necessary for normal use
>>> np.random.seed(0)
>>> from skbio.stats.distance import anosim
>>> anosim(dm, grouping, permutations=99)
method name ANOSIM
test statistic name R
sample size 4
number of groups 2
test statistic 0.25
p-value 0.67
number of permutations 99
Name: ANOSIM results, dtype: object
The return value is a ``pandas.Series`` object containing the results of
the statistical test.
To suppress calculation of the p-value and only obtain the R statistic,
specify zero permutations:
>>> anosim(dm, grouping, permutations=0)
method name ANOSIM
test statistic name R
sample size 4
number of groups 2
test statistic 0.25
p-value NaN
number of permutations 0
Name: ANOSIM results, dtype: object
You can also provide a ``pandas.DataFrame`` and a column denoting the
grouping instead of a grouping vector. The following ``DataFrame``'s
``Group`` column specifies the same grouping as the vector we used in the
previous examples:
>>> # make output deterministic; not necessary for normal use
>>> np.random.seed(0)
>>> import pandas as pd
>>> df = pd.DataFrame.from_dict(
... {'Group': {'s2': 'Group1', 's3': 'Group2', 's4': 'Group2',
... 's5': 'Group3', 's1': 'Group1'}})
>>> anosim(dm, df, column='Group', permutations=99)
method name ANOSIM
test statistic name R
sample size 4
number of groups 2
test statistic 0.25
p-value 0.67
number of permutations 99
Name: ANOSIM results, dtype: object
The results match the first example above.
Note that when providing a ``DataFrame``, the ordering of rows and/or
columns does not affect the grouping vector that is extracted. The
``DataFrame`` must be indexed by the distance matrix IDs (i.e., the row
labels must be distance matrix IDs).
If IDs (rows) are present in the ``DataFrame`` but not in the distance
matrix, they are ignored. The previous example's ``s5`` ID illustrates this
behavior: note that even though the ``DataFrame`` had 5 objects, only 4
were used in the test (see the "Sample size" row in the results above to
confirm this). Thus, the ``DataFrame`` can be a superset of the distance
matrix IDs. Note that the reverse is not true: IDs in the distance matrix
*must* be present in the ``DataFrame`` or an error will be raised.
"""
sample_size, num_groups, grouping, tri_idxs, distances = _preprocess_input(
distance_matrix, grouping, column)
divisor = sample_size * ((sample_size - 1) / 4)
ranked_dists = rankdata(distances, method='average')
test_stat_function = partial(_compute_r_stat, tri_idxs, ranked_dists,
divisor)
stat, p_value = _run_monte_carlo_stats(test_stat_function, grouping,
permutations)
return _build_results('ANOSIM', 'R', sample_size, num_groups, stat,
p_value, permutations)
def _compute_r_stat(tri_idxs, ranked_dists, divisor, grouping):
"""Compute ANOSIM R statistic (between -1 and +1)."""
# Create a matrix where True means that the two objects are in the same
# group. This ufunc requires that grouping is a numeric vector (e.g., it
# won't work with a grouping vector of strings).
grouping_matrix = np.equal.outer(grouping, grouping)
# Extract upper triangle from the grouping matrix. It is important to
# extract the values in the same order that the distances are extracted
# from the distance matrix (see ranked_dists). Extracting the upper
# triangle (excluding the diagonal) preserves this order.
grouping_tri = grouping_matrix[tri_idxs]
# within
r_W = np.mean(ranked_dists[grouping_tri])
# between
r_B = np.mean(ranked_dists[np.invert(grouping_tri)])
return (r_B - r_W) / divisor
| bsd-3-clause |
IvonLiu/SportsHack | local/create_dataset.py | 3 | 4857 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 23:26:27 2015
@author: Owner
"""
import pandas as pd
import numpy as np
#pd.set_option('display.max_rows',5000)
roster = pd.read_csv("data/cfl_data/cfl_roster.csv")
height = roster.iloc[0:,9]
weight = roster.iloc[0:,10]
location = roster.iloc[0:,12]
ID = roster.iloc[0:,0]
hometown = location.str.split(',').str.get(0)
state = location.str.split(',').str.get(1)
state = state.str.replace('California|CA|CA. ', 'CA')
state =state.str.replace('Ontario', 'ON' )
state =state.str.replace('Saskatchewan', 'SK')
state =state.str.replace('Florida', 'FL')
state =state.str.replace('Mississippi', 'MS')
state =state.str.replace('Montana', 'MT')
state =state.str.replace('North Carolina|NC.|N.C.', 'NC')
state =state.str.replace('Jamaica|Jamiaca|JM ','JM')
state =state.str.replace('Texas', 'TX')
state =state.str.replace('Quebec|Québec', 'QC')
state =state.str.replace('Illinois', 'IL')
state =state.str.replace('New Jersey', 'NJ')
state =state.str.replace('Louisiana', 'LA')
state =state.str.replace('Manitoba', 'MB')
state =state.str.replace('Nigeria', 'NG')
state =state.str.replace('D.C.|D.C', 'DC')
state =state.str.replace('Alabama', 'AL')
state =state.str.replace('New York|N.Y.|NY.', 'NY')
state =state.str.replace('Tennessee', 'TN')
state =state.str.replace('Hawaii|Hawai"i', 'HI')
state =state.str.replace('Indiana', 'IN')
state =state.str.replace('Ireland', 'IE')
state =state.str.replace('Georgia', 'GE')
#state =state.str.replace('C-B/BC', None)
state =state.str.replace('Oklahoma', 'OK')
state =state.str.replace('Ghana', 'GH')
#state =state.str.replace('Zaire', None)
state =state.str.replace('Argentina', 'AR')
state =state.str.replace('Poland', 'PL')
state =state.str.replace('Ohio', 'OH')
state =state.str.replace('Bahamas', 'BS')
state =state.str.replace('B.C.|British Columbia', 'BC')
state =state.str.replace('Michigan','MI')
state =state.str.replace('Washington|Wash.', 'WA')
state =state.str.replace('Alberta', 'AB')
state =state.str.replace('Chad', 'TD')
state =state.str.replace('England', 'UK')
state =state.str.replace('Arkansas', 'AR')
state =state.str.replace('Guyana', 'GY')
state =state.str.replace('Virginia', 'VA')
state =state.str.replace('Minnesota|Minn.', 'MN')
state =state.str.replace('Iran', 'IR')
state =state.str.replace('Germany', 'DE')
state =state.str.replace('Japan', 'JP')
state =state.str.replace('Haiti', 'HT')
state =state.str.replace('Phillipines', 'PH')
state =state.str.replace('Lebanon', 'LB')
state =state.str.replace('Nevada', 'NV')
state =state.str.replace('Gabon', 'GA')
state =state.str.replace('Arizona', 'AZ')
#state =state.str.replace('east africa', None)
state =state.str.replace('Nebraska', 'NE')
state =state.str.replace('South Korea', 'KP')
state =state.str.replace('S.C.|South Carolina', 'SC')
state =state.str.replace('Connecticut', 'CT')
#state =state.str.replace('Canada', None)
state =state.str.replace('Oregon', 'OR')
state =state.str.replace('Maryland', 'MD')
state =state.str.replace('Trinidad', 'TT')
state =state.str.replace('France', 'FR')
state =state.str.replace('Africa', 'CF')
state =state.str.replace('Nicaragua', 'NI')
state =state.str.replace('Cuba', 'CU')
state =state.str.replace('Rwanda', 'RW')
state =state.str.replace('Afghanistan', 'AF')
state =state.str.replace('Congo', 'CG')
state =state.str.replace('Idaho', 'ID')
state =state.str.replace('Venezuela', 'VE')
state =state.str.replace('new zealand', 'NZ')
#state =state.str.replace('Fla', None)
state =state.str.replace('costa rica', 'CR')
state =state.str.replace('liberia', 'LR')
state =state.str.replace('Serbia', 'RS')
state =state.str.replace('Cameroon', 'CM')
state =state.str.replace('Missouri', 'MO')
state =state.str.replace('Kentucky', "KY")
#state =state.str.replace('City', None)
state =state.str.replace('Pennsylvania|PA.', 'PA')
state =state.str.replace('IND', 'IN')
state =state.str.replace('Netherlands', 'AN')
state =state.str.replace('Australia', 'AU')
#state =state.str.replace('1990', None)
state =state.str.replace('Honduras', 'HN')
state =state.str.replace('Panama', 'PA')
#state =state.str.replace('Petersburg', None)
state =state.str.replace('Virgina', 'VA')
state =state.str.replace('WI.', 'WI')
state =state.str.replace('IL.', 'IL')
state =state.str.replace('FL.', 'FL')
state =state.str.replace('VA.', 'VA')
state =state.str.replace('Georgie', 'GA')
state =state.str.replace('Miss.', 'MS')
state =state.str.replace('OH ', 'OH')
state =state.str.replace('Wisconsin', 'WI')
state =state.str.replace('West VA', 'VA')
state =state.str.replace('Kansas', 'KS')
state =state.str.replace('UTah', 'UT')
dataset = pd.DataFrame({'ID': ID, 'height':height, 'weight':weight, 'City':hometown, 'State':state })
dataset.height = dataset.height.convert_object(convert_numeric=True)
#dataset.to_csv("data/output/dataset.csv", na_rep="NA", index=False) | mit |
renesugar/arrow | python/pyarrow/tests/test_hdfs.py | 3 | 13419 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pickle
import pytest
import random
import unittest
from io import BytesIO
from os.path import join as pjoin
import numpy as np
import pyarrow as pa
import pyarrow.tests.test_parquet as test_parquet
from pyarrow.compat import guid
from pyarrow.pandas_compat import _pandas_api
# ----------------------------------------------------------------------
# HDFS tests
def hdfs_test_client(driver='libhdfs'):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')
user = os.environ.get('ARROW_HDFS_TEST_USER', None)
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
return pa.hdfs.connect(host, port, user, driver=driver)
@pytest.mark.hdfs
class HdfsTestCases(object):
def _make_test_file(self, hdfs, test_name, test_path, test_data):
base_path = pjoin(self.tmp_path, test_name)
hdfs.mkdir(base_path)
full_path = pjoin(base_path, test_path)
with hdfs.open(full_path, 'wb') as f:
f.write(test_data)
return full_path
@classmethod
def setUpClass(cls):
cls.check_driver()
cls.hdfs = hdfs_test_client(cls.DRIVER)
cls.tmp_path = '/tmp/pyarrow-test-{0}'.format(random.randint(0, 1000))
cls.hdfs.mkdir(cls.tmp_path)
@classmethod
def tearDownClass(cls):
cls.hdfs.delete(cls.tmp_path, recursive=True)
cls.hdfs.close()
def test_unknown_driver(self):
with pytest.raises(ValueError):
hdfs_test_client(driver="not_a_driver_name")
def test_pickle(self):
s = pickle.dumps(self.hdfs)
h2 = pickle.loads(s)
assert h2.is_open
assert h2.host == self.hdfs.host
assert h2.port == self.hdfs.port
assert h2.user == self.hdfs.user
assert h2.kerb_ticket == self.hdfs.kerb_ticket
assert h2.driver == self.hdfs.driver
# smoketest unpickled client works
h2.ls(self.tmp_path)
def test_cat(self):
path = pjoin(self.tmp_path, 'cat-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
contents = self.hdfs.cat(path)
assert contents == data
def test_capacity_space(self):
capacity = self.hdfs.get_capacity()
space_used = self.hdfs.get_space_used()
disk_free = self.hdfs.df()
assert capacity > 0
assert capacity > space_used
assert disk_free == (capacity - space_used)
def test_close(self):
client = hdfs_test_client()
assert client.is_open
client.close()
assert not client.is_open
with pytest.raises(Exception):
client.ls('/')
def test_mkdir(self):
path = pjoin(self.tmp_path, 'test-dir/test-dir')
parent_path = pjoin(self.tmp_path, 'test-dir')
self.hdfs.mkdir(path)
assert self.hdfs.exists(path)
self.hdfs.delete(parent_path, recursive=True)
assert not self.hdfs.exists(path)
def test_mv_rename(self):
path = pjoin(self.tmp_path, 'mv-test')
new_path = pjoin(self.tmp_path, 'mv-new-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
assert self.hdfs.exists(path)
self.hdfs.mv(path, new_path)
assert not self.hdfs.exists(path)
assert self.hdfs.exists(new_path)
assert self.hdfs.cat(new_path) == data
self.hdfs.rename(new_path, path)
assert self.hdfs.cat(path) == data
def test_info(self):
path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(path, 'ex')
self.hdfs.mkdir(path)
data = b'foobarbaz'
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
path_info = self.hdfs.info(path)
file_path_info = self.hdfs.info(file_path)
assert path_info['kind'] == 'directory'
assert file_path_info['kind'] == 'file'
assert file_path_info['size'] == len(data)
def test_exists_isdir_isfile(self):
dir_path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(dir_path, 'ex')
missing_path = pjoin(dir_path, 'this-path-is-missing')
self.hdfs.mkdir(dir_path)
with self.hdfs.open(file_path, 'wb') as f:
f.write(b'foobarbaz')
assert self.hdfs.exists(dir_path)
assert self.hdfs.exists(file_path)
assert not self.hdfs.exists(missing_path)
assert self.hdfs.isdir(dir_path)
assert not self.hdfs.isdir(file_path)
assert not self.hdfs.isdir(missing_path)
assert not self.hdfs.isfile(dir_path)
assert self.hdfs.isfile(file_path)
assert not self.hdfs.isfile(missing_path)
def test_disk_usage(self):
path = pjoin(self.tmp_path, 'disk-usage-base')
p1 = pjoin(path, 'p1')
p2 = pjoin(path, 'p2')
subdir = pjoin(path, 'subdir')
p3 = pjoin(subdir, 'p3')
if self.hdfs.exists(path):
self.hdfs.delete(path, True)
self.hdfs.mkdir(path)
self.hdfs.mkdir(subdir)
data = b'foobarbaz'
for file_path in [p1, p2, p3]:
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
assert self.hdfs.disk_usage(path) == len(data) * 3
def test_ls(self):
base_path = pjoin(self.tmp_path, 'ls-test')
self.hdfs.mkdir(base_path)
dir_path = pjoin(base_path, 'a-dir')
f1_path = pjoin(base_path, 'a-file-1')
self.hdfs.mkdir(dir_path)
f = self.hdfs.open(f1_path, 'wb')
f.write(b'a' * 10)
contents = sorted(self.hdfs.ls(base_path, False))
assert contents == [dir_path, f1_path]
def test_chmod_chown(self):
path = pjoin(self.tmp_path, 'chmod-test')
with self.hdfs.open(path, 'wb') as f:
f.write(b'a' * 10)
def test_download_upload(self):
base_path = pjoin(self.tmp_path, 'upload-test')
data = b'foobarbaz'
buf = BytesIO(data)
buf.seek(0)
self.hdfs.upload(base_path, buf)
out_buf = BytesIO()
self.hdfs.download(base_path, out_buf)
out_buf.seek(0)
assert out_buf.getvalue() == data
def test_file_context_manager(self):
path = pjoin(self.tmp_path, 'ctx-manager')
data = b'foo'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
assert f.size() == 3
result = f.read(10)
assert result == data
def test_open_not_exist_error_message(self):
# ARROW-226
path = pjoin(self.tmp_path, 'does-not-exist-123')
try:
self.hdfs.open(path)
except Exception as e:
assert 'file does not exist' in e.args[0].lower()
def test_read_whole_file(self):
path = pjoin(self.tmp_path, 'read-whole-file')
data = b'foo' * 1000
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
result = f.read()
assert result == data
def _write_multiple_hdfs_pq_files(self, tmpdir):
import pyarrow.parquet as pq
nfiles = 10
size = 5
test_data = []
for i in range(nfiles):
df = test_parquet._test_dataframe(size, seed=i)
df['index'] = np.arange(i * size, (i + 1) * size)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = pjoin(tmpdir, '{0}.parquet'.format(i))
table = pa.Table.from_pandas(df, preserve_index=False)
with self.hdfs.open(path, 'wb') as f:
pq.write_table(table, f)
test_data.append(table)
expected = pa.concat_tables(test_data)
return expected
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files(self):
tmpdir = pjoin(self.tmp_path, 'multi-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
result = self.hdfs.read_parquet(tmpdir)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'multi-parquet-uri-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
path = _get_hdfs_uri(tmpdir)
result = pq.read_table(path)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_write_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'uri-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
path = _get_hdfs_uri(pjoin(tmpdir, 'test.parquet'))
size = 5
df = test_parquet._test_dataframe(size, seed=0)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
table = pa.Table.from_pandas(df, preserve_index=False)
pq.write_table(table, path, filesystem=self.hdfs)
result = pq.read_table(path, filesystem=self.hdfs).to_pandas()
_pandas_api.assert_frame_equal(result, df)
@pytest.mark.parquet
def test_read_common_metadata_files(self):
tmpdir = pjoin(self.tmp_path, 'common-metadata-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_read_common_metadata_files(self.hdfs, tmpdir)
@pytest.mark.parquet
def test_write_to_dataset_with_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_with_partitions(
tmpdir, filesystem=self.hdfs)
@pytest.mark.parquet
def test_write_to_dataset_no_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-no_partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_no_partitions(
tmpdir, filesystem=self.hdfs)
class TestLibHdfs(HdfsTestCases, unittest.TestCase):
DRIVER = 'libhdfs'
@classmethod
def check_driver(cls):
if not pa.have_libhdfs():
pytest.skip('No libhdfs available on system')
def test_orphaned_file(self):
hdfs = hdfs_test_client()
file_path = self._make_test_file(hdfs, 'orphaned_file_test', 'fname',
b'foobarbaz')
f = hdfs.open(file_path)
hdfs = None
f = None # noqa
class TestLibHdfs3(HdfsTestCases, unittest.TestCase):
DRIVER = 'libhdfs3'
@classmethod
def check_driver(cls):
if not pa.have_libhdfs3():
pytest.skip('No libhdfs3 available on system')
def _get_hdfs_uri(path):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'localhost')
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
uri = "hdfs://{}:{}{}".format(host, port, path)
return uri
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.fastparquet
@pytest.mark.parametrize('client', ['libhdfs', 'libhdfs3'])
def test_fastparquet_read_with_hdfs(client):
from pandas.util.testing import assert_frame_equal, makeDataFrame
try:
import snappy # noqa
except ImportError:
pytest.skip('fastparquet test requires snappy')
import pyarrow.parquet as pq
fastparquet = pytest.importorskip('fastparquet')
fs = hdfs_test_client(client)
df = makeDataFrame()
table = pa.Table.from_pandas(df)
path = '/tmp/testing.parquet'
with fs.open(path, 'wb') as f:
pq.write_table(table, f)
parquet_file = fastparquet.ParquetFile(path, open_with=fs.open)
result = parquet_file.to_pandas()
assert_frame_equal(result, df)
| apache-2.0 |
JT5D/scikit-learn | examples/plot_hmm_sampling.py | 8 | 2045 | """
==================================
Demonstration of sampling from HMM
==================================
This script shows how to sample points from a Hiden Markov Model (HMM):
we use a 4-components with specified mean and covariance.
The plot show the sequence of observations generated with the transitions
between them. We can see that, as specified by our transition matrix,
there are no transition between component 1 and 3.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import hmm
##############################################################
# Prepare parameters for a 3-components HMM
# Initial population probability
start_prob = np.array([0.6, 0.3, 0.1, 0.0])
# The transition matrix, note that there are no transitions possible
# between component 1 and 4
trans_mat = np.array([[0.7, 0.2, 0.0, 0.1],
[0.3, 0.5, 0.2, 0.0],
[0.0, 0.3, 0.5, 0.2],
[0.2, 0.0, 0.2, 0.6]])
# The means of each component
means = np.array([[0.0, 0.0],
[0.0, 11.0],
[9.0, 10.0],
[11.0, -1.0],
])
# The covariance of each component
covars = .5 * np.tile(np.identity(2), (4, 1, 1))
# Build an HMM instance and set parameters
model = hmm.GaussianHMM(4, "full", start_prob, trans_mat,
random_state=42)
# Instead of fitting it from the data, we directly set the estimated
# parameters, the means and covariance of the components
model.means_ = means
model.covars_ = covars
###############################################################
# Generate samples
X, Z = model.sample(500)
# Plot the sampled data
plt.plot(X[:, 0], X[:, 1], "-o", label="observations", ms=6,
mfc="orange", alpha=0.7)
# Indicate the component numbers
for i, m in enumerate(means):
plt.text(m[0], m[1], 'Component %i' % (i + 1),
size=17, horizontalalignment='center',
bbox=dict(alpha=.7, facecolor='w'))
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/linear_model/__init__.py | 83 | 3139 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .huber import HuberRegressor
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
suensummit/Grasp-and-lift-EEG-challenge | lvl3/genFinal.py | 4 | 2915 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 15 14:12:12 2015
@author: rc, alex
"""
import os
import sys
if __name__ == '__main__' and __package__ is None:
filePath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(filePath)
import pandas as pd
import numpy as np
import yaml
from collections import OrderedDict
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import LeaveOneLabelOut
from preprocessing.aux import getEventNames
from utils.ensembles import createEnsFunc, loadPredictions
from ensembling.WeightedMean import WeightedMeanClassifier
yml = yaml.load(open(sys.argv[1]))
fileName = yml['fileName']
ensemble = yml['ensemble']
subsample = yml['subsample'] if 'subsample' in yml else 1
seed = yml['seed'] if 'seed' in yml else 4234521
mean_type = yml['mean_type'] if 'mean_type' in yml else 'arithmetic'
verbose = yml['verbose'] if 'verbose' in yml else True
print mean_type
print ensemble
np.random.seed(seed)
print 'Running weighted mean ensemble, results will be saved in submissions/%s.csv' % fileName
models = []
for m in mean_type:
models.append(WeightedMeanClassifier(ensemble, mean=m, verbose=verbose))
######
cols = getEventNames()
ids = np.load('../infos_test.npy')
subjects_test = ids[:, 1]
series_test = ids[:, 2]
ids = ids[:, 0]
labels = np.load('../infos_val.npy')
subjects = labels[:, -2]
series = labels[:, -1]
labels = labels[:, :-2]
allCols = range(len(cols))
# ## loading prediction ###
files = ensemble
preds_val = OrderedDict()
for f in files:
loadPredictions(preds_val, f, [f], lvl=2)
# ## train/test ###
aggr = createEnsFunc(ensemble)
dataTrain = aggr(preds_val)
preds_val = None
# do CV
aucs = []
cv = LeaveOneLabelOut(series)
p = np.zeros(labels.shape)
for train,test in cv:
currentSeries = np.unique(series[test])[0]
for m in range(len(models)):
models[m].fit(dataTrain[train][::subsample], labels[train][::subsample])
p[test] += models[m].predict_proba(dataTrain[test]) / len(mean_type)
aucs.append(np.mean([roc_auc_score(labels[test],p[test])]))
print 'score on series %d: %.5f' % (currentSeries, aucs[-1])
print 'CV score: %.5f / %.6f' % (np.mean(aucs), np.std(aucs))
np.save('val/val_%s.npy'%fileName,[p])
# train WMs on all training data
models = []
for m in mean_type:
wm = WeightedMeanClassifier(ensemble, mean=m, verbose=verbose)
wm.fit(dataTrain[::subsample], labels[::subsample])
models.append(wm)
dataTrain = None
# load test data
preds_test = OrderedDict()
for f in files:
loadPredictions(preds_test, f, [f], lvl=2, test=True)
dataTest = aggr(preds_test)
preds_test = None
# get predictions
p = 0
for m in range(len(models)):
p += models[m].predict_proba(dataTest) / len(models)
# generate submission
sub = pd.DataFrame(data=p,index=ids,columns=cols)
sub.to_csv('submissions/%s.csv'%fileName,index_label='id',float_format='%.8f') | bsd-3-clause |
mailhexu/pyDFTutils | pyDFTutils/vasp/plot_potential.py | 1 | 4451 | #! /usr/bin/env python
import os
import numpy as np
from numpy import maximum,minimum,NaN,Inf,arange,isscalar,array
from pyDFTutils.math.peakdetect import peakdetect
import sys
from numpy import *
from functools import reduce
def get_potential():
if not os.path.exists('./vplanar.txt'):
raise IOError('No data vplanar.txt found. Please run work_function')
data=np.loadtxt('vplanar.txt',skiprows=1)
pos=data[:,0]
pot=data[:,1]
return pos,pot
def periodic_average(data,step):
l=len(data)
avg=data.copy()
data=np.reshape(data,[1,l])
tri_data=np.repeat(data,3,axis=0).flatten()
for i in range(l):
print(i)
l1=-step/2+i+l
l2=step/2+i+l
avg[i]=np.average(tri_data[l1:l2])
return avg
def periodic_average_dynamic(data):
p=array(peakdetect(data,lookahead=5,delta=0.01))
N=len(data)
xmin=p[0][:,0][::1]
steps=[N-xmin[-1]+xmin[0]]
x_range=[(xmin[-1]-N,xmin[0])]
for ix in range(len(xmin)-1):
x_range.append((xmin[ix],xmin[ix+1]))
steps.append(xmin[ix+1]-xmin[ix])
x_range.append((xmin[-1],xmin[0]+N))
steps.append(xmin[0]+N-xmin[-1])
avg=data.copy()
data=np.reshape(data,[1,N])
tri_data=np.repeat(data,3,axis=0).flatten()
for i in range(N):
for xr,step in zip(x_range,steps):
if xr[0]<=i<xr[1]:
#l1=-step/2+i+N
#l2=step/2+i+N-1
l1=xr[0]+N
l2=xr[1]+1+N
avg[i]=np.average(tri_data[l1:l2])
return avg
def peaks(data, step):
n = len(data) - len(data)%step # ignore tail
slices = [ data[i:n:step] for i in range(step) ]
peak_max = reduce(maximum, slices)
peak_min = reduce(minimum, slices)
return np.transpose(np.array([peak_max, peak_min]))
def peakdet(v, delta, x = None):
"""
Converted from MATLAB script at http://billauer.co.il/peakdet.html
Returns two arrays
function [maxtab, mintab]=peakdet(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% Eli Billauer, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = array(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mxpos-delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn+delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab)
def test(step=None, ):
pos,pot=get_potential()
p=array(peakdetect(pot,lookahead=5,delta=0.01))
zero=p[0][0,1]
pot=pot-zero
N=len(pot)
xmin=p[1][:,0]
step=xmin[2]-xmin[0]-1
import matplotlib.pyplot as plt
plt.plot(pot,color='blue')
plt.plot(periodic_average(pot,step),color='red')
plt.plot(periodic_average_dynamic(pot),color='purple')
plt.scatter(p[0][:,0],p[0][:,1]-zero,color='red')
plt.scatter(p[1][:,0],p[1][:,1]-zero,color='green')
plt.xlim(0,N)
#plt.xticks(p[1][:,0][::2],xl)
plt.grid()
plt.show()
pos_S=p[1][:,0][1::2]
spos=list(pos_S[1:])
spos.append(240)
spos=np.array(spos)
print((spos-pos_S)/480*38.1814)
if __name__=='__main__':
test()
| lgpl-3.0 |
Adai0808/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
imito/odin | odin/visual/base.py | 1 | 3121 | from __future__ import absolute_import, division, print_function
import os
from collections import defaultdict
from typing import Dict, Text
_FIGURE_LIST = defaultdict(dict)
class Visualizer(object):
""" Visualizer """
def assert_figure(self, fig):
from matplotlib import pyplot as plt
assert isinstance(fig, plt.Figure), \
'fig must be instance of matplotlib.Figure, but given: %s' % str(type(fig))
return fig
def assert_axis(self, ax):
from matplotlib import pyplot as plt
from odin.visual.figures import to_axis
ax = to_axis(ax)
assert isinstance(ax, plt.Axes), \
'ax must be instance of matplotlib.Axes, but given: %s' % str(type(ax))
return ax
def get_figures(self) -> Dict[Text, 'Figure']:
return _FIGURE_LIST[id(self)]
def add_figure(self, name, fig):
from matplotlib import pyplot as plt
self.assert_figure(fig)
_FIGURE_LIST[id(self)][name] = fig
return self
def save_figures(self,
path='/tmp/tmp.pdf',
dpi=None,
separate_files=True,
clear_figures=True,
verbose=False):
from odin.utils import ctext
from matplotlib import pyplot as plt
# checking arguments
if os.path.isfile(path) or '.pdf' == path[-4:].lower():
separate_files = False
assert '.pdf' == path[-4:].lower(), \
"If a file is given, it must be PDF file"
figures = _FIGURE_LIST[id(self)]
n_figures = len(figures)
if n_figures == 0:
return self
# ====== saving PDF file ====== #
if verbose:
print("Saving %s figures to path: " % ctext(n_figures, 'lightcyan'),
ctext(path, 'lightyellow'))
if not separate_files:
if dpi is None:
dpi = 48
if '.pdf' not in path:
path = path + '.pdf'
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages(path)
for key, fig in figures.items():
try:
fig.savefig(pp, dpi=dpi, format='pdf', bbox_inches="tight")
if verbose:
print(" - Saved '%s' to pdf file" % ctext(key, 'cyan'))
except Exception as e:
if verbose:
print(" - Error '%s'" % ctext(key, 'cyan'))
print(" ", e)
pp.close()
# ====== saving PNG file ====== #
else:
if dpi is None:
dpi = 160
if not os.path.exists(path):
os.mkdir(path)
assert os.path.isdir(path), "'%s' must be path to a folder" % path
kwargs = dict(dpi=dpi, bbox_inches="tight")
for key, fig in figures.items():
out_path = os.path.join(path, key + '.png')
try:
fig.savefig(out_path, **kwargs)
if verbose:
print(" - Saved '%s' to %s" %
(ctext(key, 'cyan'), ctext(out_path, 'yellow')))
except Exception as e:
if verbose:
print(" - Error '%s'" % ctext(key, 'cyan'))
print(" ", e)
# ====== clear figures ====== #
if clear_figures:
for fig in figures.values():
plt.close(fig)
figures.clear()
return self
| mit |
Kismuz/btgym | btgym/datafeed/multi.py | 1 | 8394 | from logbook import Logger, StreamHandler, WARNING
import datetime
import random
from numpy.random import beta as random_beta
import copy
import os
import sys
import backtrader.feeds as btfeeds
import pandas as pd
from collections import OrderedDict
class BTgymMultiData:
"""
Multiply data streams wrapper.
"""
def __init__(
self,
data_class_ref=None,
data_config=None,
name='multi_data',
data_names=None,
task=0,
log_level=WARNING,
**kwargs
):
"""
Args:
data_class_ref: one of BTgym single-stream datafeed classes
data_config: nested dictionary of individual data streams sources, see notes below.
kwargs: shared parameters for all data streams, see base dataclass
Notes:
`Data_config` specifies all data sources consumed by strategy::
data_config = {
data_line_name_0: {
filename: [source csv filename string or list of strings],
[config: {optional dict of individual stream config. params},]
},
...,
data_line_name_n : {...}
}
Example::
data_config = {
'usd': {'filename': '.../DAT_ASCII_EURUSD_M1_2017.csv'},
'gbp': {'filename': '.../DAT_ASCII_EURGBP_M1_2017.csv'},
'jpy': {'filename': '.../DAT_ASCII_EURJPY_M1_2017.csv'},
'chf': {'filename': '.../DAT_ASCII_EURCHF_M1_2017.csv'},
}
It is user responsibility to correctly choose historic data conversion rates wrt cash currency (here - EUR).
"""
self.data_class_ref = data_class_ref
if data_config is None:
self.data_config = {}
else:
self.data_config = data_config
self.master_data = None
self.name = name
self.task = task
self.metadata = {'sample_num': 0, 'type': None}
self.filename = None
self.is_ready = False
self.global_timestamp = 0
self.log_level = log_level
self.params = {}
self.names = []
self.sample_num = 0
# Logging:
StreamHandler(sys.stdout).push_application()
self.log = Logger('{}_{}'.format(self.name, self.task), level=self.log_level)
if data_names is None:
# Infer from data configuration (at top-level):
self.data_names = list(self.data_config.keys())
else:
self.data_names = data_names
try:
assert len(self.data_names) > 0, 'At least one data_line should be provided'
except AssertionError:
self.log.error('At least one data_line should be provided')
raise ValueError
# Make dictionary of single-stream datasets:
self.data = OrderedDict()
for key, stream in self.data_config.items():
try:
stream['config'].update(kwargs)
except KeyError:
stream['config'] = kwargs
try:
if stream['dataframe'] is None:
pass
except KeyError:
stream['dataframe'] = None
self.data[key] = self.data_class_ref(
filename=stream['filename'],
dataframe=stream['dataframe'],
data_names=(key,),
task=task,
name='{}_{}'.format(name, key),
log_level=log_level,
**stream['config']
)
try:
# If master-data has been pointed explicitly by 'base' kwarg:
if stream['base']:
self.master_data = self.data[key]
except KeyError:
pass
def set_logger(self, level=None, task=None):
"""
Sets logbook logger.
Args:
level: logbook.level, int
task: task id, int
"""
if task is not None:
self.task = task
if level is not None:
for stream in self.data.values():
stream.log = Logger('{}_{}'.format(stream.name, stream.task), level=level)
self.log = Logger('{}_{}'.format(self.name, self.task), level=level)
def set_params(self, params_dict):
"""
Batch attribute setter.
Args:
params_dict: dictionary of parameters to be set as instance attributes.
"""
for key, value in params_dict.items():
for stream in self.data.values():
setattr(stream, key, value)
def read_csv(self, data_filename=None, force_reload=False):
# Load:
indexes = []
for stream in self.data.values():
stream.read_csv(force_reload=force_reload)
indexes.append(stream.data.index)
# Get indexes intersection:
if len(indexes) > 1:
idx_intersected = indexes[0]
for i in range(1, len(indexes)):
idx_intersected = idx_intersected.intersection(indexes[i])
# Truncate data to common index:
for stream in self.data.values():
stream.data = stream.data.loc[idx_intersected]
def reset(self, **kwargs):
indexes = []
for stream in self.data.values():
stream.reset(**kwargs)
indexes.append(stream.data.index)
# Get indexes intersection:
if len(indexes) > 1:
idx_intersected = indexes[0]
for i in range(1, len(indexes)):
idx_intersected = idx_intersected.intersection(indexes[i])
idx_intersected.drop_duplicates()
self.log.info('shared num. records: {}'.format(len(idx_intersected)))
# Truncate data to common index:
for stream in self.data.values():
stream.data = stream.data.loc[idx_intersected]
# Choose master_data
if self.master_data is None:
# Just choose first key:
all_keys = list(self.data.keys())
if len(all_keys) > 0:
self.master_data = self.data[all_keys[0]]
self.global_timestamp = self.master_data.global_timestamp
self.names = self.master_data.names
self.sample_num = 0
self.is_ready = True
def set_global_timestamp(self, timestamp):
for stream in self.data.values():
stream.set_global_timestamp(timestamp)
self.global_timestamp = self.master_data.global_timestamp
def describe(self):
return {key: stream.describe() for key, stream in self.data.items()}
def sample(self, **kwargs):
# Get sample to infer exact interval:
self.log.debug('Making master sample...')
master_sample = self.master_data.sample(**kwargs)
self.log.debug('Making master ok.')
# Prepare empty instance of multistream data:
sample = BTgymMultiData(
data_names=self.data_names,
task=self.task,
log_level=self.log_level,
name='sub_' + self.name,
)
sample.metadata = copy.deepcopy(master_sample.metadata)
interval = [master_sample.metadata['first_row'], master_sample.metadata['last_row']]
# Populate sample with data:
for key, stream in self.data.items():
self.log.debug('Sampling <{}> with interval: {}, kwargs: {}'.format(key, interval, kwargs))
sample.data[key] = stream.sample(interval=interval, force_interval=True, **kwargs)
sample.filename = {key: stream.filename for key, stream in self.data.items()}
self.sample_num += 1
return sample
def to_btfeed(self):
feed = OrderedDict()
for key, stream in self.data.items():
# Get single-dataline btfeed dict:
feed_dict = stream.to_btfeed()
assert len(list(feed_dict.keys())) == 1, \
'Expected base datafeed dictionary contain single data_line, got: {}'.format(feed_dict)
# Rename every base btfeed according to data_config keys:
feed[key] = feed_dict[list(feed_dict.keys())[0]]
return feed
| lgpl-3.0 |
jblackburne/scikit-learn | sklearn/feature_selection/variance_threshold.py | 123 | 2572 | # Author: Lars Buitinck
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/indexes/multi/test_missing.py | 2 | 4113 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
import pandas as pd
from pandas import Int64Index, MultiIndex, PeriodIndex, UInt64Index
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
def test_fillna(idx):
# GH 11343
# TODO: Remove or Refactor. Not Implemented for MultiIndex
for name, index in [('idx', idx), ]:
if len(index) == 0:
pass
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.fillna(idx[0])
else:
idx = index.copy()
result = idx.fillna(idx[0])
tm.assert_index_equal(result, idx)
assert result is not idx
msg = "'value' must be a scalar, passed: "
with pytest.raises(TypeError, match=msg):
idx.fillna([idx[0]])
idx = index.copy()
values = idx.values
if isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
continue
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = index.__class__(values, freq=index.freq)
else:
idx = index.__class__(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_dropna():
# GH 6194
idx = pd.MultiIndex.from_arrays([[1, np.nan, 3, np.nan, 5],
[1, 2, np.nan, np.nan, 5],
['a', 'b', 'c', np.nan, 'e']])
exp = pd.MultiIndex.from_arrays([[1, 5],
[1, 5],
['a', 'e']])
tm.assert_index_equal(idx.dropna(), exp)
tm.assert_index_equal(idx.dropna(how='any'), exp)
exp = pd.MultiIndex.from_arrays([[1, np.nan, 3, 5],
[1, 2, np.nan, 5],
['a', 'b', 'c', 'e']])
tm.assert_index_equal(idx.dropna(how='all'), exp)
msg = "invalid how option: xxx"
with pytest.raises(ValueError, match=msg):
idx.dropna(how='xxx')
def test_nulls(idx):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.isna()
@pytest.mark.xfail
def test_hasnans_isnans(idx):
# GH 11343, added tests for hasnans / isnans
index = idx.copy()
# cases in indices doesn't include NaN
expected = np.array([False] * len(index), dtype=bool)
tm.assert_numpy_array_equal(index._isnan, expected)
assert index.hasnans is False
index = idx.copy()
values = index.values
values[1] = np.nan
index = idx.__class__(values)
expected = np.array([False] * len(index), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(index._isnan, expected)
assert index.hasnans is True
def test_nan_stays_float():
# GH 7031
idx0 = pd.MultiIndex(levels=[["A", "B"], []],
codes=[[1, 0], [-1, -1]],
names=[0, 1])
idx1 = pd.MultiIndex(levels=[["C"], ["D"]],
codes=[[0], [0]],
names=[0, 1])
idxm = idx0.join(idx1, how='outer')
assert pd.isna(idx0.get_level_values(1)).all()
# the following failed in 0.14.1
assert pd.isna(idxm.get_level_values(1)[:-1]).all()
df0 = pd.DataFrame([[1, 2]], index=idx0)
df1 = pd.DataFrame([[3, 4]], index=idx1)
dfm = df0 - df1
assert pd.isna(df0.index.get_level_values(1)).all()
# the following failed in 0.14.1
assert pd.isna(dfm.index.get_level_values(1)[:-1]).all()
| bsd-3-clause |
sameersingh/onebusaway | ml/oba_ml/linear_regression.py | 1 | 1106 | from __future__ import division
import numpy as np
from common import *
from sklearn import linear_model
def main():
np.set_printoptions(threshold=np.nan)
feature_names = get_feature_names()
x_train, y_train, = get_data("training.dat")
clf = linear_model.LinearRegression()
clf.fit (x_train, y_train)
w = clf.coef_.reshape(clf.coef_.shape[1],1)
y_hat_train = x_train.dot(w)
rmse_our_train, rmse_oba_train = get_rmse(y_train, y_hat_train)
x_test, y_test = get_data("test.dat")
y_hat_test = x_test.dot(w)
rmse_our_test, rmse_oba_test = get_rmse(y_test, y_hat_test)
print "RMSE OUR Train ", rmse_our_train
print "RMSE OBA Train ", rmse_oba_train
print "RMSE OUR Test ", rmse_our_test
print "RMSE OBA Test ", rmse_oba_test
save_scatter_plot(y_train, y_hat_train, "train")
save_scatter_plot(y_test, y_hat_test, "test")
build_output_files(y_hat_train, y_hat_test, y_train, y_test)
print_weights(w, feature_names);
report_range(y_train)
report_range(y_test)
if __name__ == '__main__':
main() | apache-2.0 |
sibis-platform/ncanda-datacore | scripts/reporting/xnat_extractor.py | 2 | 11177 | #!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
NCANDA XNAT Extractor
Extract all experiment, scan, and reading data from NCANDA's XNAT server.
"""
from __future__ import print_function
import os
import glob
import json
import tempfile
import requests
import pandas as pd
from lxml import etree
# Verbose setting for cli
verbose = None
# Define global namespace for parsing XNAT XML files
ns = {'xnat': 'http://nrg.wustl.edu/xnat'}
def write_experiments(session):
"""
Write out a csv file representing all the experiments in the given XNAT
session.
:param config: dict
:param session: requests.session
:return: str
"""
experiments_filename = tempfile.mktemp()
experiments = session.xnat_http_get_all_experiments()
with open(experiments_filename, 'w') as fi:
fi.flush()
fi.write(experiments.text)
fi.close()
if verbose:
print("Writing list of experiment ids to temp: {0}".format(experiments_filename))
return experiments_filename
def extract_experiment_xml(session, experiment_dir, extract=None):
"""
Open an experiments csv file, then extract the XML representation,
and write it to disk.
:param config: dict
:param session: requests.session
:param experiment_dir: str
:param extract: int
:return: str
"""
experiments_file = write_experiments(session)
# make sure the output directory exists and is empty
outdir = os.path.abspath(experiment_dir)
if not os.path.exists(outdir):
os.mkdir(outdir)
else:
[os.remove(f) for f in glob.glob(os.path.join(outdir, '*'))]
df_experiments = pd.read_csv(experiments_file)
if not extract:
if verbose:
print("Running XML extraction for all sessions: {0} Total".format(df_experiments.shape[0]))
extract = df_experiments.shape[0]
experiment_ids = df_experiments.ID[:extract]
experiment_files = list()
for idx, experiment_id in list(experiment_ids.items()):
experiment = session.xnat_http_get_experiment_xml(experiment_id)
experiment_file = os.path.join(outdir, '{0}.xml'.format(experiment_id))
experiment_files.append(experiment_file)
with open(experiment_file, 'w') as fi:
fi.flush()
fi.write(experiment.text)
fi.close()
if verbose:
num = idx + 1
print("Writing XML file {0} of {1} to: {2}".format(num, extract, experiment_file))
return experiment_files
def parse_xml_file(experiment_xml_file):
try :
return etree.parse(experiment_xml_file)
except Exception as err:
print("ERROR: Failed to parse", experiment_xml_file)
print(err)
return None
def get_experiment_info(experiment_xml_file):
"""
Extract basic information from the experiment xml file and return a
dictionary
:param experiment_xml_file: str
:return: dict
"""
xml = parse_xml_file(experiment_xml_file)
if not xml :
return ""
root = xml.getroot()
site_experiment_id = root.attrib.get('label')
site_id = site_experiment_id[0:11]
site_experiment_date = site_experiment_id[12:20]
project = root.attrib.get('project')
experiment_id = root.attrib.get('ID')
try :
experiment_date = root.find('./xnat:date', namespaces=ns).text
subject_id = root.find('./xnat:subject_ID', namespaces=ns).text
result = dict(site_id=site_id,
subject_id=subject_id,
site_experiment_id=site_experiment_id,
site_experiment_date=site_experiment_date,
project=project,
experiment_id=experiment_id,
experiment_date=experiment_date)
if verbose:
print("Parsed experiment info for: {0}".format(result))
except :
print("ERROR: %s does not have xnat:date or xnat:subject_ID defined !" % (experiment_xml_file))
result = ""
return result
def get_experiments_dir_info(experiments_dir):
"""
Get a list of experiment dicts from all the experiment xml files in the
experiments directory
:param experiments_dir: str
:return: list
"""
results = list()
if os.path.exists(os.path.abspath(experiments_dir)):
glob_path = ''.join([os.path.abspath(experiments_dir), '/*'])
experiment_files = glob.glob(glob_path)
else:
experiment_files = list()
for path in experiment_files:
results.append(get_experiment_info(path))
return results
def get_scans_info(experiment_xml_file):
"""
Get a dict of dicts for each scan from an XNAT experiment XML document
:param experiment_xml_file: lxml.etree.Element
:return: list
"""
result = list()
xml = parse_xml_file(experiment_xml_file)
if not xml:
return result
root = xml.getroot()
experiment_id = root.attrib.get('ID')
scans = root.findall('./xnat:scans/xnat:scan', namespaces=ns)
for scan in scans:
values = dict()
scan_id = scan.attrib.get('ID')
scan_type = scan.attrib.get('type')
# handle null finds
values.update(quality=scan.find('./xnat:quality', namespaces=ns))
values.update(series_description=scan.find(
'./xnat:series_description', namespaces=ns))
values.update(coil=scan.find('./xnat:coil', namespaces=ns))
values.update(field_strength=scan.find('./xnat:fieldStrength',
namespaces=ns))
values.update(scan_note=scan.find('./xnat:note', namespaces=ns))
for k, v in list(values.items()):
try:
values[k] = v.text
except AttributeError as e:
values[k] = None
if verbose:
print((e, "for attribute {0} in scan {1} of experiment {2}".format(k, scan_id, experiment_id)))
scan_dict = dict(experiment_id=experiment_id,
scan_id=scan_id,
scan_type=scan_type,
quality=values.get('quality'),
scan_note=values.get('scan_note'),
series_description=values.get('series_description'),
coil=values.get('coil'),
field_strength=values.get('field_strength'))
result.append(scan_dict)
return result
def get_reading_info(experiment_xml_file):
"""
Get a dict of dicts for each reading from an XNAT experiment XML document
These are the visit specific information, e.g. DateToDVD, Subject ID , session notes, ....
(no individual scan info)
:param experiment_xml_file: lxml.etree.Element
:return: list
"""
xml = parse_xml_file(experiment_xml_file)
if not xml:
return None
root = xml.getroot()
experiment_id = root.attrib.get('ID')
try:
note = root.find('./xnat:note', namespaces=ns).text
except AttributeError:
note = None
pass
result = dict(experiment_id=experiment_id,
note=note,
datetodvd=None,
findings=None,
findingsdate=None,
excludefromanalysis=None,
physioproblemoverride=None,
dtimismatchoverride=None,
phantommissingoverride=None)
values = dict()
fields = root.findall('./xnat:fields/xnat:field', namespaces=ns)
for field in fields:
name = field.attrib.get('name')
value = root.xpath('.//xnat:field[@name="{0}"]/text()'.format(name),
namespaces=ns)
# handle null finds
values[name] = value
for k, v in list(values.items()):
try:
values[k] = v[1]
except IndexError:
values[k] = None
result.update(values)
return result
def get_experiments_dir_reading_info(experiments_dir):
"""
Get a list of reading dicts from all the experiment xml files in the
experiments directory
:param experiments_dir: str
:return: list
"""
results = list()
if os.path.exists(os.path.abspath(experiments_dir)):
glob_path = ''.join([os.path.abspath(experiments_dir), '/*'])
experiment_files = glob.glob(glob_path)
else:
experiment_files = list()
for path in experiment_files:
info=get_reading_info(path)
if info :
results.append(info)
return results
def get_experiments_dir_scan_info(experiments_dir):
"""
Get a list of scan dicts from all the experiment xml files in the
experiments directory
:param experiments_dir: str
:return: list
"""
results = list()
if os.path.exists(os.path.abspath(experiments_dir)):
glob_path = ''.join([os.path.abspath(experiments_dir), '/*'])
experiment_files = glob.glob(glob_path)
else:
experiment_files = list()
for path in experiment_files:
results.append(get_scans_info(path))
return results
def get_scans_by_type(scans, scan_type):
"""
Get scans based on their type
:param scans: dict
:param scan_type: str
:return:
"""
result = list()
for scan in scans:
if scan['scan_type'] == scan_type:
result.append(scan)
return result
def scans_to_dataframe(scans):
"""
Convert scan dict to a pandas.DataFrame
:param scans: dict
:return: pandas.DataFrame
"""
flat = [item for sublist in scans for item in sublist]
return pd.DataFrame(flat)
def experiments_to_dataframe(experiments):
"""
Convert a list of experiment dicts to a pandas.DataFrame
:param experiments: dict
:return: pandas.DataFrame
"""
return pd.DataFrame(experiments)
def reading_to_dataframe(reading):
"""
Convert a list of reading dicts to a pandas.DataFrame
:param reading: dict
:return: pandas.DataFrame
"""
return pd.DataFrame(reading)
def merge_experiments_scans_reading(experiments, scans, reading):
"""
Merge an experiments dataframe with a scan dataframe
:param experiments: dict
:param scans: dict
:return: pandas.DataFrame
"""
experiments_df = experiments_to_dataframe(experiments)
scans_df = scans_to_dataframe(scans)
reading_df = reading_to_dataframe(reading)
exp_scan = pd.merge(experiments_df, scans_df, how='inner')
merged = pd.merge(exp_scan, reading_df, how='inner')
# reindex using multi-index of subject, experiment, scan
result = merged.to_records(index=False)
idx = pd.MultiIndex.from_arrays([merged.subject_id.values,
merged.experiment_id.values,
merged.scan_id.values],
names=['subject_id',
'experiment_id',
'scan_id'])
return pd.DataFrame(result, index=idx)
| bsd-3-clause |
RobertABT/heightmap | build/matplotlib/lib/matplotlib/finance.py | 4 | 23630 | """
A collection of modules for collecting, analyzing and plotting
financial data. User contributions welcome!
"""
from __future__ import division, print_function
import contextlib, os, sys, warnings
from urllib2 import urlopen
if sys.version_info[0] < 3:
from hashlib import md5
else:
import hashlib
md5 = lambda x: hashlib.md5(x.encode())
import datetime
import numpy as np
from matplotlib import verbose, get_cachedir
from matplotlib.dates import date2num
from matplotlib.cbook import iterable, mkdirs
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.colors import colorConverter
from matplotlib.lines import Line2D, TICKLEFT, TICKRIGHT
from matplotlib.patches import Rectangle
from matplotlib.transforms import Affine2D
cachedir = get_cachedir()
# cachedir will be None if there is no writable directory.
if cachedir is not None:
cachedir = os.path.join(cachedir, 'finance.cache')
else:
# Should only happen in a restricted environment (such as Google App
# Engine). Deal with this gracefully by not caching finance data.
cachedir = None
stock_dt = np.dtype([('date', object),
('year', np.int16),
('month', np.int8),
('day', np.int8),
('d', np.float), # mpl datenum
('open', np.float),
('close', np.float),
('high', np.float),
('low', np.float),
('volume', np.float),
('aclose', np.float)])
def parse_yahoo_historical(fh, adjusted=True, asobject=False):
"""
Parse the historical data in file handle fh from yahoo finance.
*adjusted*
If True (default) replace open, close, high, and low prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
*asobject*
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, close, high, low, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, close, high, low,
volume, aclose
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
"""
lines = fh.readlines()
results = []
datefmt = '%Y-%m-%d'
for line in lines[1:]:
vals = line.split(',')
if len(vals)!=7:
continue # add warning?
datestr = vals[0]
#dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
# Using strptime doubles the runtime. With the present
# format, we don't need it.
dt = datetime.date(*[int(val) for val in datestr.split('-')])
dnum = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = float(vals[5])
aclose = float(vals[6])
results.append((dt, dt.year, dt.month, dt.day,
dnum, open, close, high, low, volume, aclose))
results.reverse()
d = np.array(results, dtype=stock_dt)
if adjusted:
scale = d['aclose'] / d['close']
scale[np.isinf(scale)] = np.nan
d['open'] *= scale
d['close'] *= scale
d['high'] *= scale
d['low'] *= scale
if not asobject:
# 2-D sequence; formerly list of tuples, now ndarray
ret = np.zeros((len(d), 6), dtype=np.float)
ret[:,0] = d['d']
ret[:,1] = d['open']
ret[:,2] = d['close']
ret[:,3] = d['high']
ret[:,4] = d['low']
ret[:,5] = d['volume']
if asobject is None:
return ret
return [tuple(row) for row in ret]
return d.view(np.recarray) # Close enough to former Bunch return
def fetch_historical_yahoo(ticker, date1, date2, cachename=None,dividends=False):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are date or datetime instances, or (year, month, day) sequences.
Ex:
fh = fetch_historical_yahoo('^GSPC', (2000, 1, 1), (2001, 12, 31))
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
set dividends=True to return dividends instead of price data. With
this option set, parse functions will not work
a file handle is returned
"""
ticker = ticker.upper()
if iterable(date1):
d1 = (date1[1]-1, date1[2], date1[0])
else:
d1 = (date1.month-1, date1.day, date1.year)
if iterable(date2):
d2 = (date2[1]-1, date2[2], date2[0])
else:
d2 = (date2.month-1, date2.day, date2.year)
if dividends:
g='v'
verbose.report('Retrieving dividends instead of prices')
else:
g='d'
urlFmt = 'http://ichart.yahoo.com/table.csv?a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=%s&ignore=.csv'
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker, g)
# Cache the finance data if cachename is supplied, or there is a writable
# cache directory.
if cachename is None and cachedir is not None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if cachename is not None:
if os.path.exists(cachename):
fh = open(cachename)
verbose.report('Using cachefile %s for %s'%(cachename, ticker))
else:
mkdirs(os.path.abspath(os.path.dirname(cachename)))
with contextlib.closing(urlopen(url)) as urlfh:
with open(cachename, 'wb') as fh:
fh.write(urlfh.read())
verbose.report('Saved %s data to cache file %s'%(ticker, cachename))
fh = open(cachename, 'r')
return fh
else:
return urlopen(url)
def quotes_historical_yahoo(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None):
"""
Get historical data for ticker between date1 and date2. date1 and
date2 are datetime instances or (year, month, day) sequences.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Ex:
sp = f.quotes_historical_yahoo('^GSPC', d1, d2,
asobject=True, adjusted=True)
returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
[n,bins,patches] = hist(returns, 100)
mu = mean(returns)
sigma = std(returns)
x = normpdf(bins, mu, sigma)
plot(bins, x, color='red', lw=2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
"""
# Maybe enable a warning later as part of a slow transition
# to using None instead of False.
#if asobject is False:
# warnings.warn("Recommend changing to asobject=None")
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try:
ret = parse_yahoo_historical(fh, asobject=asobject,
adjusted=adjusted)
if len(ret) == 0:
return None
except IOError as exc:
warnings.warn('fh failure\n%s'%(exc.strerror[1]))
return None
return ret
def plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""
quotes is a sequence of (time, open, close, high, low, ...) sequences
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
time must be in float date format - see date2num
ax : an Axes instance to plot to
ticksize : open/close tick marker in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
lines = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open : color = colorup
else : color = colordown
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(
xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(
xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
quotes is a sequence of (time, open, close, high, low, ...) sequences.
As long as the first 5 elements are these values,
the record can be as long as you want (eg it may store volume).
time must be in float days format - see date2num
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
ax : an Axes instance to plot to
width : fraction of a day for the rectangle width
colorup : the color of the rectangle where close >= open
colordown : the color of the rectangle where close < open
alpha : the rectangle alpha level
return value is lines, patches where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width/2.0
lines = []
patches = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open :
color = colorup
lower = open
height = close-open
else :
color = colordown
lower = close
height = open-close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color='k',
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy = (t-OFFSET, lower),
width = width,
height = height,
facecolor = color,
edgecolor = color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def plot_day_summary2(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
ax : an Axes instance to plot to
ticksize : size of open and close ticks in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [ ((-ticksize, 0), (0, 0)) ]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [ ((0, 0), (ticksize, 0)) ]
offsetsOpen = [ (i, open) for i, open in zip(xrange(len(opens)), opens) if open != -1 ]
offsetsClose = [ (i, close) for i, close in zip(xrange(len(closes)), closes) if close != -1 ]
scale = ax.figure.dpi * (1.0/72.0)
tickTransform = Affine2D().scale(scale, 0.0)
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,1
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,1
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(rangeSegments)==len(offsetsOpen))
assert(len(offsetsOpen)==len(offsetsClose))
assert(len(offsetsClose)==len(colors))
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors = colors,
linewidths = lw,
antialiaseds = useAA,
)
openCollection = LineCollection(openSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsOpen,
transOffset = ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsClose,
transOffset = ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""
Represent the open, close as a bar line and high low range as a
vertical line.
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
return value is lineCollection, barCollection
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
delta = width/2.
barVerts = [ ( (i-delta, open), (i-delta, close), (i+delta, close), (i+delta, open) ) for i, open, close in zip(xrange(len(opens)), opens, closes) if open != -1 and close!=-1 ]
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(barVerts)==len(rangeSegments))
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors = ( (0,0,0,1), ),
linewidths = lw,
antialiaseds = useAA,
)
barCollection = PolyCollection(barVerts,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
ax.add_collection(rangeCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
delta = width/2.
bars = [ ( (i-delta, 0), (i-delta, v), (i+delta, v), (i+delta, 0)) for i, v in enumerate(volumes) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = (0,),
linewidths = (0.5,),
)
ax.add_collection(barCollection)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
nb: first point is not displayed - it is used only for choosing the
right color
"""
return volume_overlay(ax,closes[:-1],closes[1:],volumes[1:],colorup,colordown,width,alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. quotes is a list of (d,
open, close, high, low, volume) and close-open is used to
determine the color of the bar
kwarg
width : the bar width in points
colorup : the color of the lines where close1 >= close0
colordown : the color of the lines where close1 < close0
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
dates, opens, closes, highs, lows, volumes = zip(*quotes)
colors = [colord[close1>=close0] for close0, close1 in zip(closes[:-1], closes[1:]) if close0!=-1 and close1 !=-1]
colors.insert(0,colord[closes[0]>=opens[0]])
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, volume), (right, volume), (right, 0)) for d, open, close, high, low, volume in quotes]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
dates = [d for d, open, close, high, low, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, close, high, low, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.bounds
#print 'viewlim', ax.viewLim.bounds
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""
Add a bar collection graph with height vals (-1 is missing).
ax : an Axes instance to plot to
width : the bar width in points
alpha : bar transparency
"""
facecolors = (colorConverter.to_rgba(facecolor, alpha),)
edgecolors = (colorConverter.to_rgba(edgecolor, alpha),)
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, v), (right, v), (right, 0)) for v in vals if v != -1 ]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
offsetsBars = [ (i, 0) for i,v in enumerate(vals) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = facecolors,
edgecolors = edgecolors,
antialiaseds = (0,),
linewidths = (0.5,),
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v!=-1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
| mit |
RedhawkSDR/FrequencyDivider | tests/test_FrequencyDivider.py | 1 | 5027 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK.
#
# REDHAWK is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# REDHAWK is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import time
from ossie.utils import sb
import numpy as np
import matplotlib.pyplot as plt
#Use sb helpers for producing and receiving data from component
src = sb.DataSource()
sink = sb.DataSink()
divider = sb.launch('../FrequencyDivider.spd.xml')
#Make connections
src.connect(divider)
divider.connect(sink)
#Start sandbox env
sb.start()
#generate message signal
numSamples = 1000
data = []
for i in xrange(numSamples):
data.append((float(0.3)*float(np.cos(2*np.pi*float(0.013)*i + float(0.0)))) +
(float(0.2)*float(np.cos(2*np.pi*float(0.021)*i + float(0.4)))) +
(float(0.4)*float(np.cos(2*np.pi*float(0.037)*i + float(1.7)))))
#Count zero crossings in message data
signCurrent = False
if data[0] > 0:
signLast = True
else:
signLast = False
d_zeroCross = 0
for i in xrange(1,len(data)):
if data[i] > 0:
signCurrent = True
else:
signCurrent = False
if signCurrent != signLast:
d_zeroCross += 1
signLast = signCurrent
#-----------------------------------------------
# Unit Test 1
# Testing Default Property Settings
#-----------------------------------------------
#Send data across the wave
src.push(data)
time.sleep(1)
received_data = sink.getData()
#Count zero-crossings in received data
signCurrent = False
if received_data[0] > 0:
signLast = True
else:
signLast = False
r_zeroCross = 0
for i in xrange(1,len(received_data)):
if received_data[i] > 0:
signCurrent = True
else:
signCurrent = False
if signCurrent != signLast:
r_zeroCross += 1
signLast = signCurrent
#Plots
#plt.plot(data)
#plt.plot(received_data)
#plt.plot([0,len(data)],[0,0],'k-',lw=1)
#plt.show()
#Check expected values against the received values
expected = d_zeroCross
passed = True
if r_zeroCross != expected:
passed = False
if passed:
print "Unit Test 1 .........................",u'\u2714'
else:
print "Unit Test 1 .........................",u'\u2718'
#-----------------------------------------------
# Unit Test 2
# Testing Setting the Divisor Property
#-----------------------------------------------
#Configure prop
divider.configure({'Divisor':16})
#Send data across the wave
src.push(data)
time.sleep(1)
received_data = sink.getData()
#Count zero-crossings in received data
signCurrent = False
if received_data[0] > 0:
signLast = True
else:
signLast = False
r_zeroCross = 0
for i in xrange(1,len(received_data)):
if received_data[i] > 0:
signCurrent = True
else:
signCurrent = False
if signCurrent != signLast:
r_zeroCross += 1
signLast = signCurrent
#Plots
#plt.plot(data)
#plt.plot(received_data)
#plt.plot([0,len(data)],[0,0],'k-',lw=1)
#plt.show()
#Check expected values against the received values
expected = d_zeroCross / divider.Divisor
passed = True
if r_zeroCross != expected:
passed = False
if passed:
print "Unit Test 2 .........................",u'\u2714'
else:
print "Unit Test 2 .........................",u'\u2718'
#-----------------------------------------------
# Unit Test 3
# Testing Zero Divisor
#-----------------------------------------------
#Configure prop
divider.configure({'Divisor':0})
#Send data across the wave
src.push(data)
time.sleep(1)
received_data = sink.getData()
#Count zero-crossings in received data
signCurrent = False
if received_data[0] > 0:
signLast = True
else:
signLast = False
r_zeroCross = 0
for i in xrange(1,len(received_data)):
if received_data[i] > 0:
signCurrent = True
else:
signCurrent = False
if signCurrent != signLast:
r_zeroCross += 1
signLast = signCurrent
#Plots
#plt.plot(data)
#plt.plot(received_data)
#plt.plot([0,len(data)],[0,0],'k-',lw=1)
#plt.show()
#Check expected values against the received values
expected = d_zeroCross / divider.Divisor
passed = True
if r_zeroCross != expected:
passed = False
if passed:
print "Unit Test 3 .........................",u'\u2714'
else:
print "Unit Test 3 .........................",u'\u2718'
#Stop sandbox env
sb.stop()
| lgpl-3.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/axes_grid/demo_curvelinear_grid.py | 16 | 4116 | import numpy as np
#from matplotlib.path import Path
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from mpl_toolkits.axisartist.grid_helper_curvelinear import GridHelperCurveLinear
from mpl_toolkits.axisartist import Subplot
from mpl_toolkits.axisartist import SubplotHost, \
ParasiteAxesAuxTrans
def curvelinear_test1(fig):
"""
grid for custom transform.
"""
def tr(x, y):
x, y = np.asarray(x), np.asarray(y)
return x, y-x
def inv_tr(x,y):
x, y = np.asarray(x), np.asarray(y)
return x, y+x
grid_helper = GridHelperCurveLinear((tr, inv_tr))
ax1 = Subplot(fig, 1, 2, 1, grid_helper=grid_helper)
# ax1 will have a ticks and gridlines defined by the given
# transform (+ transData of the Axes). Note that the transform of
# the Axes itself (i.e., transData) is not affected by the given
# transform.
fig.add_subplot(ax1)
xx, yy = tr([3, 6], [5.0, 10.])
ax1.plot(xx, yy)
ax1.set_aspect(1.)
ax1.set_xlim(0, 10.)
ax1.set_ylim(0, 10.)
ax1.axis["t"]=ax1.new_floating_axis(0, 3.)
ax1.axis["t2"]=ax1.new_floating_axis(1, 7.)
ax1.grid(True)
import mpl_toolkits.axisartist.angle_helper as angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 2, 2, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if 1:
fig = plt.figure(1, figsize=(7, 4))
fig.clf()
curvelinear_test1(fig)
curvelinear_test2(fig)
plt.draw()
plt.show()
| mit |
rosspalmer/DataTools | depr/0.2.5/dtools/holder.py | 1 | 2726 |
from .formatting import format
from .source import data_source
import pandas as pd
class data_holder(object):
def __init__(self):
self.ds = {}
self.x = None
self.y = None
self.current_ds = None
self.current_index = []
self.remain_index = []
self.subs = {}
self.default_sub = None
def load_csv(self, name, filepath, y_col=None, x_col=None,
id_col=None, sep=','):
df = pd.read_csv(filepath, sep=sep)
if id_col is not None:
df = df.set_index(id_col)
if y_col is not None:
x = df.drop(y_col, axis=1)
y = df[y_col]
else:
x = df
y = None
if x_col is not None:
x = x[x_col]
self.load_ds(name, x, y)
def load_ds(self, name, x, y=None):
ds = data_source()
ds.x = x
if y is not None:
ds.y = y
else:
ds.y = pd.DataFrame()
self.ds[name] = ds
def partition(self, ratio=1.0):
self.x, self.y, self.remain_index = \
self.ds[self.current_ds].partition(ratio, self.remain_index)
self.current_index = self.x.index
if self.default_sub is not None:
self.use_sub(self.default_sub)
def reset_ds(self):
self.x = self.ds[self.current_ds].x
self.y = self.ds[self.current_ds].y
def update_ds(self):
self.ds[self.current_ds].x = self.x
self.ds[self.current_ds].y = self.y
def use_ds(self, name, default_sub=None, new=False):
self.current_ds = name
if new:
self.remain_index = self.ds[name].x.index.tolist()
self.current_index = self.ds[name].x.index.tolist()
self.x = self.ds[name].x.loc[self.remain_index]
if self.ds[name].y is None:
self.y = None
else:
self.y = self.ds[name].y.loc[self.remain_index]
self.default_sub = default_sub
def format(self, mode):
self.x, self.y = format(mode, self.x, self.y)
def create_sub(self, sub_name, col_filter=None, row_filter=None,
col_dummy=None, col_normalize=None):
self.subs[sub_name] = {'col_filter':col_filter, 'row_filter':row_filter,
'col_dummy':col_dummy, 'col_normalize':col_normalize}
def use_sub(self, sub_name, output_only=False):
x, y = self.ds[self.current_ds].subset(self.subs[sub_name])
x = x.loc[self.current_index]
y = y.loc[self.current_index]
if output_only:
return x, y
if not output_only:
self.x = x
self.y = y
| mit |
subutai/nupic.research | nupic/research/frameworks/pytorch/tiny_imagenet_dataset.py | 3 | 5198 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import division, print_function
import os
import shutil
import pandas as pd
from torchvision.datasets.folder import ImageFolder
from torchvision.datasets.utils import check_integrity, download_and_extract_archive
ARCHIVE_DICT = {
"url": "http://cs231n.stanford.edu/tiny-imagenet-200.zip",
"md5": "90528d7ca1a48142e341f4ef8d21d0de",
}
VAL_ANNOTATIONS = "val_annotations.txt"
META_FILE = "words.txt"
DATASET_FOLDER = "tiny-imagenet-200"
class TinyImageNet(ImageFolder):
"""`Tiny ImageNet <https://tiny-imagenet.herokuapp.com/>`Classification Dataset.
Based on ImageNet <http://www.image-net.org/challenges/LSVRC/2014/>
Args:
root (string): Root directory of the TinyImageNet Dataset.
train (boolean, optional): If true, loads training set, otherwise validation set
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class_name, class_index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
imgs (list): List of (image path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, train=True, download=False, **kwargs):
# load different models whether training or validation
self.train = train
img_folder = os.path.join(os.path.expanduser(root), DATASET_FOLDER)
self.meta_file = os.path.join(img_folder, META_FILE)
# if for the first time, download
if download:
self.download(root, os.path.join(img_folder, "val"))
if self.train:
img_folder = os.path.join(img_folder, "train")
else:
img_folder = os.path.join(img_folder, "val")
super(TinyImageNet, self).__init__(img_folder, **kwargs)
# extra attributes for easy reference
wnid_to_classes = self._load_meta_file()
self.wnids = self.classes
self.wnid_to_idx = self.class_to_idx
self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
self.class_to_idx = {
cls: idx for idx, clss in enumerate(self.classes) for cls in clss
}
def download(self, root, val_folder):
# regular download
if not check_integrity(self.meta_file):
download_and_extract_archive(
ARCHIVE_DICT["url"], root, md5=ARCHIVE_DICT["md5"]
)
print("Rearranging validation folder.")
annotations = self._load_val_annotations(val_folder)
prepare_val_folder(val_folder, annotations)
else:
print("Dataset already downloaded.")
def _load_meta_file(self):
# TODO: make it faster
mapping = pd.read_csv(self.meta_file, sep="\t", index_col=None, header=None)
return {wnid: classes for _, (wnid, classes) in mapping.iterrows()}
def _load_val_annotations(self, val_folder):
annotations_file = os.path.join(val_folder, VAL_ANNOTATIONS)
return pd.read_csv(annotations_file, sep="\t", index_col=None, header=None)
def prepare_val_folder(val_folder, annotations):
# create folders
for wnid in annotations.iloc[:, 1].unique():
os.mkdir(os.path.join(val_folder, wnid))
# move files
for _, (img_file, wnid) in annotations.iloc[:, :2].iterrows():
img_path = os.path.join(val_folder, "images", img_file)
shutil.move(
img_path, os.path.join(val_folder, wnid, os.path.basename(img_file))
)
# delete images file
os.rmdir(os.path.join(val_folder, "images"))
| agpl-3.0 |
paladin74/neural-network-animation | matplotlib/tests/test_mathtext.py | 10 | 10359 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import matplotlib
from matplotlib.testing.decorators import image_comparison, knownfailureif
import matplotlib.pyplot as plt
from matplotlib import mathtext
math_tests = [
r'$a+b+\dots+\dot{s}+\ldots$',
r'$x \doteq y$',
r'\$100.00 $\alpha \_$',
r'$\frac{\$100.00}{y}$',
r'$x y$',
r'$x+y\ x=y\ x<y\ x:y\ x,y\ x@y$',
r'$100\%y\ x*y\ x/y x\$y$',
r'$x\leftarrow y\ x\forall y\ x-y$',
r'$x \sf x \bf x {\cal X} \rm x$',
r'$x\ x\,x\;x\quad x\qquad x\!x\hspace{ 0.5 }y$',
r'$\{ \rm braces \}$',
r'$\left[\left\lfloor\frac{5}{\frac{\left(3\right)}{4}} y\right)\right]$',
r'$\left(x\right)$',
r'$\sin(x)$',
r'$x_2$',
r'$x^2$',
r'$x^2_y$',
r'$x_y^2$',
r'$\prod_{i=\alpha_{i+1}}^\infty$',
r'$x = \frac{x+\frac{5}{2}}{\frac{y+3}{8}}$',
r'$dz/dt = \gamma x^2 + {\rm sin}(2\pi y+\phi)$',
r'Foo: $\alpha_{i+1}^j = {\rm sin}(2\pi f_j t_i) e^{-5 t_i/\tau}$',
r'$\mathcal{R}\prod_{i=\alpha_{i+1}}^\infty a_i \sin(2 \pi f x_i)$',
r'Variable $i$ is good',
r'$\Delta_i^j$',
r'$\Delta^j_{i+1}$',
r'$\ddot{o}\acute{e}\grave{e}\hat{O}\breve{\imath}\tilde{n}\vec{q}$',
r"$\arccos((x^i))$",
r"$\gamma = \frac{x=\frac{6}{8}}{y} \delta$",
r'$\limsup_{x\to\infty}$',
r'$\oint^\infty_0$',
r"$f'$",
r'$\frac{x_2888}{y}$',
r"$\sqrt[3]{\frac{X_2}{Y}}=5$",
r"$\sqrt[5]{\prod^\frac{x}{2\pi^2}_\infty}$",
r"$\sqrt[3]{x}=5$",
r'$\frac{X}{\frac{X}{Y}}$',
r"$W^{3\beta}_{\delta_1 \rho_1 \sigma_2} = U^{3\beta}_{\delta_1 \rho_1} + \frac{1}{8 \pi 2} \int^{\alpha_2}_{\alpha_2} d \alpha^\prime_2 \left[\frac{ U^{2\beta}_{\delta_1 \rho_1} - \alpha^\prime_2U^{1\beta}_{\rho_1 \sigma_2} }{U^{0\beta}_{\rho_1 \sigma_2}}\right]$",
r'$\mathcal{H} = \int d \tau \left(\epsilon E^2 + \mu H^2\right)$',
r'$\widehat{abc}\widetilde{def}$',
'$\\Gamma \\Delta \\Theta \\Lambda \\Xi \\Pi \\Sigma \\Upsilon \\Phi \\Psi \\Omega$',
'$\\alpha \\beta \\gamma \\delta \\epsilon \\zeta \\eta \\theta \\iota \\lambda \\mu \\nu \\xi \\pi \\kappa \\rho \\sigma \\tau \\upsilon \\phi \\chi \\psi$',
# The examples prefixed by 'mmltt' are from the MathML torture test here:
# http://www.mozilla.org/projects/mathml/demo/texvsmml.xhtml
r'${x}^{2}{y}^{2}$',
r'${}_{2}F_{3}$',
r'$\frac{x+{y}^{2}}{k+1}$',
r'$x+{y}^{\frac{2}{k+1}}$',
r'$\frac{a}{b/2}$',
r'${a}_{0}+\frac{1}{{a}_{1}+\frac{1}{{a}_{2}+\frac{1}{{a}_{3}+\frac{1}{{a}_{4}}}}}$',
r'${a}_{0}+\frac{1}{{a}_{1}+\frac{1}{{a}_{2}+\frac{1}{{a}_{3}+\frac{1}{{a}_{4}}}}}$',
r'$\binom{n}{k/2}$',
r'$\binom{p}{2}{x}^{2}{y}^{p-2}-\frac{1}{1-x}\frac{1}{1-{x}^{2}}$',
r'${x}^{2y}$',
r'$\sum _{i=1}^{p}\sum _{j=1}^{q}\sum _{k=1}^{r}{a}_{ij}{b}_{jk}{c}_{ki}$',
r'$\sqrt{1+\sqrt{1+\sqrt{1+\sqrt{1+\sqrt{1+\sqrt{1+\sqrt{1+x}}}}}}}$',
r'$\left(\frac{{\partial }^{2}}{\partial {x}^{2}}+\frac{{\partial }^{2}}{\partial {y}^{2}}\right){|\varphi \left(x+iy\right)|}^{2}=0$',
r'${2}^{{2}^{{2}^{x}}}$',
r'${\int }_{1}^{x}\frac{\mathrm{dt}}{t}$',
r'$\int {\int }_{D}\mathrm{dx} \mathrm{dy}$',
# mathtex doesn't support array
# 'mmltt18' : r'$f\left(x\right)=\left\{\begin{array}{cc}\hfill 1/3\hfill & \text{if_}0\le x\le 1;\hfill \\ \hfill 2/3\hfill & \hfill \text{if_}3\le x\le 4;\hfill \\ \hfill 0\hfill & \text{elsewhere.}\hfill \end{array}$',
# mathtex doesn't support stackrel
# 'mmltt19' : ur'$\stackrel{\stackrel{k\text{times}}{\ufe37}}{x+...+x}$',
r'${y}_{{x}^{2}}$',
# mathtex doesn't support the "\text" command
# 'mmltt21' : r'$\sum _{p\text{\prime}}f\left(p\right)={\int }_{t>1}f\left(t\right) d\pi \left(t\right)$',
# mathtex doesn't support array
# 'mmltt23' : r'$\left(\begin{array}{cc}\hfill \left(\begin{array}{cc}\hfill a\hfill & \hfill b\hfill \\ \hfill c\hfill & \hfill d\hfill \end{array}\right)\hfill & \hfill \left(\begin{array}{cc}\hfill e\hfill & \hfill f\hfill \\ \hfill g\hfill & \hfill h\hfill \end{array}\right)\hfill \\ \hfill 0\hfill & \hfill \left(\begin{array}{cc}\hfill i\hfill & \hfill j\hfill \\ \hfill k\hfill & \hfill l\hfill \end{array}\right)\hfill \end{array}\right)$',
# mathtex doesn't support array
# 'mmltt24' : u'$det|\\begin{array}{ccccc}\\hfill {c}_{0}\\hfill & \\hfill {c}_{1}\\hfill & \\hfill {c}_{2}\\hfill & \\hfill \\dots \\hfill & \\hfill {c}_{n}\\hfill \\\\ \\hfill {c}_{1}\\hfill & \\hfill {c}_{2}\\hfill & \\hfill {c}_{3}\\hfill & \\hfill \\dots \\hfill & \\hfill {c}_{n+1}\\hfill \\\\ \\hfill {c}_{2}\\hfill & \\hfill {c}_{3}\\hfill & \\hfill {c}_{4}\\hfill & \\hfill \\dots \\hfill & \\hfill {c}_{n+2}\\hfill \\\\ \\hfill \\u22ee\\hfill & \\hfill \\u22ee\\hfill & \\hfill \\u22ee\\hfill & \\hfill \\hfill & \\hfill \\u22ee\\hfill \\\\ \\hfill {c}_{n}\\hfill & \\hfill {c}_{n+1}\\hfill & \\hfill {c}_{n+2}\\hfill & \\hfill \\dots \\hfill & \\hfill {c}_{2n}\\hfill \\end{array}|>0$',
r'${y}_{{x}_{2}}$',
r'${x}_{92}^{31415}+\pi $',
r'${x}_{{y}_{b}^{a}}^{{z}_{c}^{d}}$',
r'${y}_{3}^{\prime \prime \prime }$',
r"$\left( \xi \left( 1 - \xi \right) \right)$", # Bug 2969451
r"$\left(2 \, a=b\right)$", # Sage bug #8125
r"$? ! &$", # github issue #466
r'$\operatorname{cos} x$', # github issue #553
r'$\sum _{\genfrac{}{}{0}{}{0\leq i\leq m}{0<j<n}}P\left(i,j\right)$'
]
digits = "0123456789"
uppercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
lowercase = "abcdefghijklmnopqrstuvwxyz"
uppergreek = ("\\Gamma \\Delta \\Theta \\Lambda \\Xi \\Pi \\Sigma \\Upsilon \\Phi \\Psi "
"\\Omega")
lowergreek = ("\\alpha \\beta \\gamma \\delta \\epsilon \\zeta \\eta \\theta \\iota "
"\\lambda \\mu \\nu \\xi \\pi \\kappa \\rho \\sigma \\tau \\upsilon "
"\\phi \\chi \\psi")
all = [digits, uppercase, lowercase, uppergreek, lowergreek]
font_test_specs = [
([], all),
(['mathrm'], all),
(['mathbf'], all),
(['mathit'], all),
(['mathtt'], [digits, uppercase, lowercase]),
(['mathcircled'], [digits, uppercase, lowercase]),
(['mathrm', 'mathcircled'], [digits, uppercase, lowercase]),
(['mathbf', 'mathcircled'], [digits, uppercase, lowercase]),
(['mathbb'], [digits, uppercase, lowercase,
r'\Gamma \Pi \Sigma \gamma \pi']),
(['mathrm', 'mathbb'], [digits, uppercase, lowercase,
r'\Gamma \Pi \Sigma \gamma \pi']),
(['mathbf', 'mathbb'], [digits, uppercase, lowercase,
r'\Gamma \Pi \Sigma \gamma \pi']),
(['mathcal'], [uppercase]),
(['mathfrak'], [uppercase, lowercase]),
(['mathbf', 'mathfrak'], [uppercase, lowercase]),
(['mathscr'], [uppercase, lowercase]),
(['mathsf'], [digits, uppercase, lowercase]),
(['mathrm', 'mathsf'], [digits, uppercase, lowercase]),
(['mathbf', 'mathsf'], [digits, uppercase, lowercase])
]
font_tests = []
for fonts, chars in font_test_specs:
wrapper = [' '.join(fonts), ' $']
for font in fonts:
wrapper.append(r'\%s{' % font)
wrapper.append('%s')
for font in fonts:
wrapper.append('}')
wrapper.append('$')
wrapper = ''.join(wrapper)
for set in chars:
font_tests.append(wrapper % set)
def make_set(basename, fontset, tests, extensions=None):
def make_test(filename, test):
@image_comparison(baseline_images=[filename], extensions=extensions,
tol=32)
def single_test():
matplotlib.rcParams['mathtext.fontset'] = fontset
fig = plt.figure(figsize=(5.25, 0.75))
fig.text(0.5, 0.5, test, horizontalalignment='center', verticalalignment='center')
func = single_test
func.__name__ = str("test_" + filename)
return func
# We inject test functions into the global namespace, rather than
# using a generator, so that individual tests can be run more
# easily from the commandline and so each test will have its own
# result.
for i, test in enumerate(tests):
filename = '%s_%s_%02d' % (basename, fontset, i)
globals()['test_%s' % filename] = make_test(filename, test)
make_set('mathtext', 'cm', math_tests)
make_set('mathtext', 'stix', math_tests)
make_set('mathtext', 'stixsans', math_tests)
make_set('mathfont', 'cm', font_tests, ['png'])
make_set('mathfont', 'stix', font_tests, ['png'])
make_set('mathfont', 'stixsans', font_tests, ['png'])
def test_fontinfo():
import matplotlib.font_manager as font_manager
import matplotlib.ft2font as ft2font
fontpath = font_manager.findfont("Bitstream Vera Sans")
font = ft2font.FT2Font(fontpath)
table = font.get_sfnt_table("head")
assert table['version'] == (1, 0)
def test_mathtext_exceptions():
errors = [
(r'$\hspace{}$', r'Expected \hspace{n}'),
(r'$\hspace{foo}$', r'Expected \hspace{n}'),
(r'$\frac$', r'Expected \frac{num}{den}'),
(r'$\frac{}{}$', r'Expected \frac{num}{den}'),
(r'$\stackrel$', r'Expected \stackrel{num}{den}'),
(r'$\stackrel{}{}$', r'Expected \stackrel{num}{den}'),
(r'$\binom$', r'Expected \binom{num}{den}'),
(r'$\binom{}{}$', r'Expected \binom{num}{den}'),
(r'$\genfrac$', r'Expected \genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}'),
(r'$\genfrac{}{}{}{}{}{}$', r'Expected \genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}'),
(r'$\sqrt$', r'Expected \sqrt{value}'),
(r'$\sqrt f$', r'Expected \sqrt{value}'),
(r'$\overline$', r'Expected \overline{value}'),
(r'$\overline{}$', r'Expected \overline{value}'),
(r'$\leftF$', r'Expected a delimiter'),
(r'$\rightF$', r'Unknown symbol: \rightF'),
(r'$\left(\right$', r'Expected a delimiter'),
(r'$\left($', r'Expected "\right"')
]
parser = mathtext.MathTextParser('agg')
for math, msg in errors:
try:
parser.parse(math)
except ValueError as e:
exc = str(e).split('\n')
print(e)
assert exc[3].startswith(msg)
else:
assert False, "Expected '%s', but didn't get it" % msg
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
agrimaldi/metaseq | metaseq/_genomic_signal.py | 2 | 9201 | """
The classes in this module enable random access to a variety of file formats
(BAM, bigWig, bigBed, BED) using a uniform syntax, and allow you to compute
coverage across many features in parallel or just a single feature.
Using classes in the :mod:`metaseq.integration` and :mod:`metaseq.minibrowser`
modules, you can connect these objects to matplotlib figures that show a window
into the data, making exploration easy and interactive.
Generally, the :func:`genomic_signal` function is all you need -- just provide
a filename and the format and it will take care of the rest, returning
a genomic signal of the proper type.
Adding support for a new format is straightforward:
* Write a new adapter for the format in :mod:`metaseq.filetype_adapters`
* Subclass one of the existing classes below, setting the `adapter`
attribute to be an instance of this new adapter
* Add the new class to the `_registry` dictionary to enable support for the
file format.
Note that to support parallel processing and to avoid repeating code, these
classes delegate their local_coverage methods to the
:func:`metaseq.array_helpers._local_coverage` function.
"""
import os
import sys
import subprocess
import numpy as np
from bx.bbi.bigwig_file import BigWigFile
import pybedtools
from array_helpers import _array, _array_parallel, _local_coverage, \
_local_count
import filetype_adapters
import helpers
from helpers import rebin
def supported_formats():
"""
Returns list of formats supported by metaseq's genomic signal objects.
"""
return _registry.keys()
def genomic_signal(fn, kind):
"""
Factory function that makes the right class for the file format.
Typically you'll only need this function to create a new genomic signal
object.
:param fn: Filename
:param kind:
String. Format of the file; see
metaseq.genomic_signal._registry.keys()
"""
try:
klass = _registry[kind.lower()]
except KeyError:
raise ValueError(
'No support for %s format, choices are %s'
% (kind, _registry.keys()))
m = klass(fn)
m.kind = kind
return m
class BaseSignal(object):
"""
Base class to represent objects from which genomic signal can be
calculated/extracted.
`__getitem__` uses the underlying adapter the instance was created with
(e.g., :class:`metaseq.filetype_adapters.BamAdapter` for
a :class:`BamSignal` object).
"""
def __init__(self, fn):
self.fn = fn
def array(self, features, processes=None, chunksize=1, ragged=False,
**kwargs):
"""
Creates an MxN NumPy array of genomic signal for the region defined by
each feature in `features`, where M=len(features) and N=(bins or
feature length)
Parameters
----------
features : iterable of interval-like objects
An iterable of interval-like objects; see docstring for
`local_coverage` method for more details.
processes : int or None
If not None, then create the array in parallel, giving each process
chunks of length `chunksize` to work on.
chunksize : int
`features` will be split into `chunksize` pieces, and each piece
will be given to a different process. The optimum value is
dependent on the size of the features and the underlying data set,
but `chunksize=100` is a good place to start.
ragged : bool
If False (default), then return a 2-D NumPy array. This requires
all rows to have the same number of columns, which you get when
supplying `bins` or if all features are of uniform length. If
True, then return a list of 1-D NumPy arrays
Notes
-----
Additional keyword args are passed to local_coverage() which performs
the work for each feature; see that method for more details.
"""
if processes is not None:
arrays = _array_parallel(
self.fn, self.__class__, features, processes=processes,
chunksize=chunksize, **kwargs)
else:
arrays = _array(self.fn, self.__class__, features, **kwargs)
if not ragged:
stacked_arrays = np.row_stack(arrays)
del arrays
return stacked_arrays
else:
return arrays
def local_coverage(self, features, *args, **kwargs):
processes = kwargs.pop('processes', None)
if not processes:
return _local_coverage(self.adapter, features, *args, **kwargs)
if isinstance(features, (list, tuple)):
raise ValueError(
"only single features are supported for parallel "
"local_coverage")
# we don't want to have self.array do the binning
bins = kwargs.pop('bins', None)
# since if we got here processes is not None, then this will trigger
# a parallel array creation
features = helpers.tointerval(features)
x = np.arange(features.start, features.stop)
features = list(helpers.split_feature(features, processes))
ys = self.array(
features, *args, bins=None, processes=processes, ragged=True,
**kwargs)
# now we ravel() and re-bin
y = np.column_stack(ys).ravel()
if bins:
xi, yi = rebin(x, y, bins)
del x, y
return xi, yi
return x, y
local_coverage.__doc__ = _local_coverage.__doc__
class BigWigSignal(BaseSignal):
def __init__(self, fn):
"""
Class for operating on bigWig files
"""
super(BigWigSignal, self).__init__(fn)
self.adapter = filetype_adapters.BigWigAdapter(fn)
class IntervalSignal(BaseSignal):
def __init__(self, fn):
"""
Abstract class for bed, BAM and bigBed files.
"""
BaseSignal.__init__(self, fn)
def local_count(self, *args, **kwargs):
return _local_count(self.adapter, *args, **kwargs)
local_count.__doc__ = _local_count.__doc__
class BamSignal(IntervalSignal):
def __init__(self, fn):
"""
Class for operating on BAM files.
"""
BaseSignal.__init__(self, fn)
self._readcount = None
self.adapter = filetype_adapters.BamAdapter(self.fn)
def genome(self):
"""
"genome" dictionary ready for pybedtools, based on the BAM header.
"""
# This gets the underlying pysam Samfile object
f = self.adapter.fileobj
d = {}
for ref, length in zip(f.references, f.lengths):
d[ref] = (0, length)
return d
def mapped_read_count(self, force=False):
"""
Counts total reads in a BAM file.
If a file self.bam + '.scale' exists, then just read the first line of
that file that doesn't start with a "#". If such a file doesn't exist,
then it will be created with the number of reads as the first and only
line in the file.
The result is also stored in self._readcount so that the time-consuming
part only runs once; use force=True to force re-count.
Parameters
----------
force : bool
If True, then force a re-count; otherwise use cached data if
available.
"""
# Already run?
if self._readcount and not force:
return self._readcount
if os.path.exists(self.fn + '.mmr') and not force:
for line in open(self.fn + '.mmr'):
if line.startswith('#'):
continue
self._readcount = float(line.strip())
return self._readcount
cmds = ['samtools',
'view',
'-c',
'-F', '0x4',
self.fn]
p = subprocess.Popen(
cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr:
sys.stderr.write('samtools says: %s' % stderr)
return None
mapped_reads = int(stdout)
# write to file so the next time you need the lib size you can access
# it quickly
if not os.path.exists(self.fn + '.mmr'):
fout = open(self.fn + '.mmr', 'w')
fout.write(str(mapped_reads) + '\n')
fout.close()
self._readcount = mapped_reads
return self._readcount
class BigBedSignal(IntervalSignal):
def __init__(self, fn):
"""
Class for operating on bigBed files.
"""
IntervalSignal.__init__(self, fn)
self.adapter = filetype_adapters.BigBedAdapter(fn)
class BedSignal(IntervalSignal):
def __init__(self, fn):
"""
Class for operating on BED files.
"""
IntervalSignal.__init__(self, fn)
self.adapter = filetype_adapters.BedAdapter(fn)
_registry = {
'bam': BamSignal,
'bed': BedSignal,
'gff': BedSignal,
'gtf': BedSignal,
'vcf': BedSignal,
'bigwig': BigWigSignal,
'bigbed': BigBedSignal,
}
| mit |
cloud-fan/spark | python/pyspark/pandas/tests/test_categorical.py | 14 | 16649 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
import pyspark.pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class CategoricalTest(PandasOnSparkTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(
["b", "a", "c", "c", "b", "a"], categories=["c", "b", "d", "a"]
),
},
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
return self.pdf, self.psdf
def test_categorical_frame(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b, pdf.b)
self.assert_eq(psdf.index, pdf.index)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
def test_categorical_series(self):
pser = pd.Series([1, 2, 3], dtype="category")
psser = ps.Series([1, 2, 3], dtype="category")
self.assert_eq(psser, pser)
self.assert_eq(psser.cat.categories, pser.cat.categories)
self.assert_eq(psser.cat.codes, pser.cat.codes)
self.assert_eq(psser.cat.ordered, pser.cat.ordered)
def test_astype(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(psser.astype("category"), pser.astype("category"))
self.assert_eq(
psser.astype(CategoricalDtype(["c", "a", "b"])),
pser.astype(CategoricalDtype(["c", "a", "b"])),
)
pcser = pser.astype(CategoricalDtype(["c", "a", "b"]))
kcser = psser.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(kcser.astype("category"), pcser.astype("category"))
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pcser.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pser.astype(CategoricalDtype(["b", "c", "a"])),
)
self.assert_eq(kcser.astype(str), pcser.astype(str))
def test_factorize(self):
pser = pd.Series(["a", "b", "c", None], dtype=CategoricalDtype(["c", "a", "d", "b"]))
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
pcodes, puniques = pser.factorize(na_sentinel=-2)
kcodes, kuniques = psser.factorize(na_sentinel=-2)
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
def test_frame_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.apply(lambda x: x).sort_index(), pdf.apply(lambda x: x).sort_index())
self.assert_eq(
psdf.apply(lambda x: x, axis=1).sort_index(),
pdf.apply(lambda x: x, axis=1).sort_index(),
)
def test_frame_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply()
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c"])
def categorize(ser) -> ps.Series[dtype]:
return ser.astype(dtype)
self.assert_eq(
psdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
pdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.transform(lambda x: x), pdf.transform(lambda x: x))
self.assert_eq(psdf.transform(lambda x: x.cat.codes), pdf.transform(lambda x: x.cat.codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.transform(lambda x: x.astype(dtype)).sort_index(),
pdf.transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_frame_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform()
pdf, psdf = self.df_pair
def codes(pser) -> ps.Series[np.int8]:
return pser.cat.codes
self.assert_eq(psdf.transform(codes), pdf.transform(codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.transform(to_category).sort_index(), pdf.transform(to_category).sort_index()
)
def test_series_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.apply(lambda x: x).sort_index(), pdf.a.apply(lambda x: x).sort_index()
)
def test_series_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_apply()
pdf, psdf = self.df_pair
ret = psdf.a.dtype
def identity(pser) -> ret:
return pser
self.assert_eq(psdf.a.apply(identity).sort_index(), pdf.a.apply(identity).sort_index())
# TODO: The return type is still category.
# def to_str(x) -> str:
# return str(x)
#
# self.assert_eq(
# psdf.a.apply(to_str).sort_index(), pdf.a.apply(to_str).sort_index()
# )
def test_groupby_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").apply(lambda df: df).sort_index(),
pdf.groupby("a").apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
pdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
pdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
pdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
)
self.assert_eq(
psdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
pdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
)
# TODO: grouping by a categorical type sometimes preserves unused categories.
# self.assert_eq(
# psdf.groupby("a").apply(len).sort_index(), pdf.groupby("a").apply(len).sort_index(),
# )
def test_groupby_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_apply()
pdf, psdf = self.df_pair
def identity(df) -> ps.DataFrame[zip(psdf.columns, psdf.dtypes)]:
return df
self.assert_eq(
psdf.groupby("a").apply(identity).sort_values(["a", "b"]).reset_index(drop=True),
pdf.groupby("a").apply(identity).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_groupby_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").transform(lambda x: x).sort_index(),
pdf.groupby("a").transform(lambda x: x).sort_index(),
)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.groupby("a").transform(lambda x: x.astype(dtype)).sort_index(),
pdf.groupby("a").transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_groupby_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_transform()
pdf, psdf = self.df_pair
def identity(x) -> ps.Series[psdf.b.dtype]: # type: ignore
return x
self.assert_eq(
psdf.groupby("a").transform(identity).sort_values("b").reset_index(drop=True),
pdf.groupby("a").transform(identity).sort_values("b").reset_index(drop=True),
)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def astype(x) -> ps.Series[dtype]:
return x.astype(dtype)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
pdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
)
else:
expected = pdf.groupby("a").transform(astype)
expected["b"] = dtype.categories.take(expected["b"].cat.codes).astype(dtype)
self.assert_eq(
psdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
expected.sort_values("b").reset_index(drop=True),
)
def test_frame_apply_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf.astype(str)).sort_index(),
pdf.astype(str).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf.astype(dtype)).sort_index(),
pdf.astype(dtype).sort_index(),
)
def test_frame_apply_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply_batch()
pdf, psdf = self.df_pair
def to_str(pdf) -> 'ps.DataFrame["a":str, "b":str]': # noqa: F405
return pdf.astype(str)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(to_str).sort_values(["a", "b"]).reset_index(drop=True),
to_str(pdf).sort_values(["a", "b"]).reset_index(drop=True),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
ret = ps.DataFrame["a":dtype, "b":dtype]
def to_category(pdf) -> ret:
return pdf.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(to_category)
.sort_values(["a", "b"])
.reset_index(drop=True),
to_category(pdf).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.astype(str)).sort_index(),
pdf.astype(str).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b.cat.codes).sort_index(),
pdf.b.cat.codes.sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.astype(dtype)).sort_index(),
pdf.astype(dtype).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b.astype(dtype)).sort_index(),
pdf.b.astype(dtype).sort_index(),
)
def test_frame_transform_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform_batch()
pdf, psdf = self.df_pair
def to_str(pdf) -> 'ps.DataFrame["a":str, "b":str]': # noqa: F405
return pdf.astype(str)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_str).sort_index(),
to_str(pdf).sort_index(),
)
def to_codes(pdf) -> ps.Series[np.int8]:
return pdf.b.cat.codes
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_codes).sort_index(),
to_codes(pdf).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
ret = ps.DataFrame["a":dtype, "b":dtype]
def to_category(pdf) -> ret:
return pdf.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf).sort_index(),
)
def to_category(pdf) -> ps.Series[dtype]:
return pdf.b.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf).rename().sort_index(),
)
def test_series_transform_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(lambda pser: pser.astype(str)).sort_index(),
pdf.a.astype(str).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(lambda pser: pser.astype(dtype)).sort_index(),
pdf.a.astype(dtype).sort_index(),
)
def test_series_transform_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_transform_batch()
pdf, psdf = self.df_pair
def to_str(pser) -> ps.Series[str]:
return pser.astype(str)
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(to_str).sort_index(), to_str(pdf.a).sort_index()
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf.a).sort_index(),
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_categorical import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
shyamalschandra/scikit-learn | examples/plot_kernel_ridge_regression.py | 39 | 6259 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors',
zorder=2)
plt.scatter(X[:100], y[:100], c='k', label='data', zorder=1)
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
ssujit/mca | setup.py | 2 | 1618 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
"scipy", "numpy", "pandas"
]
test_requirements = [
# "numpy", "pandas"
]
setup(
name='mca',
version='1.0.1',
description='Multiple correspondence analysis with pandas',
long_description=readme + '\n\n' + history,
author='Emre Safak',
author_email='[email protected]',
url='https://github.com/esafak/mca',
download_url = 'https://github.com/esafak/mca/tarball/master',
py_modules=['mca'],
package_dir={'': 'src'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords=['mca', 'statistics'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
) | bsd-3-clause |
restudToolbox/package | respy/python/process/process_python.py | 1 | 1157 | import pandas as pd
import numpy as np
from respy.python.process.process_auxiliary import check_process
from respy.python.shared.shared_auxiliary import check_dataset
def process(respy_obj):
""" This function processes the dataset from disk.
"""
# Antibugging
assert respy_obj.get_attr('is_locked')
# Distribute class attributes
num_agents_est = respy_obj.get_attr('num_agents_est')
num_periods = respy_obj.get_attr('num_periods')
file_est = respy_obj.get_attr('file_est')
is_debug = respy_obj.get_attr('is_debug')
# Construct auxiliary objects
num_rows = num_agents_est * num_periods
# Check integrity of processing request
if is_debug:
assert check_process(file_est, respy_obj)
# Process dataset from files.
data_frame = pd.read_csv(file_est, delim_whitespace=True,
header=-1, na_values='.', dtype={0: np.int, 1: np.int, 2: np.int,
3: np.float, 4: np.int, 5: np.int, 6: np.int, 7: np.int},
nrows=num_rows)
# Check the dataset against the initialization files.
check_dataset(data_frame, respy_obj, 'est')
# Finishing
return data_frame
| mit |
kaichogami/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
SiLab-Bonn/Scarce | scarce/examples/sensor_3D.py | 1 | 3615 | ''' Example that creates a 3D pixel array with a given geometry.
.. NOTE::
The calculation of a partially depleted 3D sensor is supported.
'''
import numpy as np
from scarce import plot, sensor
def sensor_3D():
n_pixel_x, n_pixel_y = 3, 3
width_x = 250.
width_y = 50.
radius = 6.
nD = 2 # Number of columns per pixel
n_eff = 1e12
temperature = 300
V_bias = -20.
V_readout = 0.
pot_descr, geom_descr = sensor.sensor_3D(n_eff=n_eff,
V_bias=V_bias,
V_readout=V_readout,
temperature=temperature,
n_pixel_x=n_pixel_x,
n_pixel_y=n_pixel_y,
width_x=width_x,
width_y=width_y,
radius=radius,
nD=nD,
selection='drift',
resolution=60,
smoothing=2)
# Plot potential and field in 2D and 1d
import matplotlib.pyplot as plt
fig = plt.figure()
plot.get_3D_sensor_plot(fig, width_x, width_y,
radius, nD,
n_pixel_x, n_pixel_y,
V_bias=V_bias, V_readout=V_readout,
pot_func=pot_descr.get_potential_smooth,
field_func=pot_descr.get_field,
# Comment in if you want to see the mesh
mesh=None, # pot_descr.pot_data.mesh,
title='Potential and field of a 3D sensor, '\
'%dx%d pixel matrix, numerical solution' % \
(n_pixel_x, n_pixel_y))
# Get line between readout and bias column
for x, y in geom_descr.get_ro_col_offsets():
if geom_descr.position_in_center_pixel(x, y):
x_ro, y_ro = x, y
break
for x, y in list(geom_descr.get_center_bias_col_offsets()) + geom_descr.get_edge_bias_col_offsets():
if geom_descr.position_in_center_pixel(x, y):
x_bias, y_bias = x, y
break
# Plot selected line between readout and bias column
N = 1000
x = np.linspace(x_ro, x_bias, N)
y = np.linspace(y_ro, y_bias, N)
# Deselect position that is within the columns
sel = ~geom_descr.position_in_column(x, y)
x, y = x[sel], y[sel]
ax = fig.get_axes()[0]
ax.plot(x, y, '-', color='black', linewidth=2)
plt.show()
# Plot potential and field along selected line
phi_smooth = pot_descr.get_potential_smooth(x, y)
field = pot_descr.get_field(x, y)
position = np.sqrt(x ** 2 + y ** 2) # [um]
plt.plot(position, phi_smooth, color='blue', linewidth=2,
label='Potential')
plt.legend(loc=1)
plt.twinx(plt.gca())
field_abs = np.sqrt(field[0] ** 2 + field[1] ** 2)
plt.plot(position, field_abs, color='red', linewidth=2, label='Field')
x_r = position.max()
plt.plot([x_r, x_r], plt.ylim(), '-', label='Bias column')
plt.grid()
plt.legend(loc=4)
plt.show()
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
sensor_3D()
| mit |
arahuja/scikit-learn | sklearn/feature_extraction/text.py | 4 | 49485 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document frequency
strictly lower than the given threshold.
This value is also called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/core/panel.py | 7 | 55175 | """
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
import warnings
import numpy as np
from pandas.types.cast import (_infer_dtype_from_scalar,
_possibly_cast_item)
from pandas.types.common import (is_integer, is_list_like,
is_string_like, is_scalar)
from pandas.types.missing import notnull
import pandas.computation.expressions as expressions
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.core.missing as missing
from pandas import compat
from pandas.compat import (map, zip, range, u, OrderedDict, OrderedDefaultdict)
from pandas.compat.numpy import function as nv
from pandas.core.common import PandasError, _try_sort, _default_index
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.formats.printing import pprint_thing
from pandas.core.indexing import maybe_droplevels
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.ops import _op_descriptions
from pandas.core.series import Series
from pandas.tools.util import cartesian_product
from pandas.util.decorators import (deprecate, Appender)
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
klass="Panel",
axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}")
_shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one"
"of\n%s" %
_shared_doc_kwargs['axes_single_arg'])
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> import numpy as np
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
if names is None:
names = ['time', 'panel']
time, panels = _ensure_like_indices(time, panels)
return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
class Panel(NDFrame):
"""
Represents wide format panel data, stored as 3-dimensional array
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=0
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
@property
def _constructor(self):
return type(self)
_constructor_sliced = DataFrame
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
"""
Generate ND initialization; axes are passed
as required objects to __init__
"""
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]
if kwargs:
raise TypeError('_init_data() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
elif is_scalar(data) and all(x is not None for x in passed_axes):
if dtype is None:
dtype, data = _infer_dtype_from_scalar(data)
values = np.empty([len(x) for x in passed_axes], dtype=dtype)
values.fill(data)
mgr = self._init_matrix(values, passed_axes, dtype=dtype,
copy=False)
copy = False
else: # pragma: no cover
raise PandasError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
def _init_dict(self, data, axes, dtype=None):
haxis = axes.pop(self._info_axis_number)
# prefilter if haxis passed
if haxis is not None:
haxis = _ensure_index(haxis)
data = OrderedDict((k, v)
for k, v in compat.iteritems(data)
if k in haxis)
else:
ks = list(data.keys())
if not isinstance(data, OrderedDict):
ks = _try_sort(ks)
haxis = Index(ks)
for k, v in compat.iteritems(data):
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
# extract axis for remaining axes & create the slicemap
raxes = [self._extract_axis(self, data, axis=i) if a is None else a
for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
# shallow copy
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
Panel
"""
orient = orient.lower()
if orient == 'minor':
new_data = OrderedDefaultdict(dict)
for col, df in compat.iteritems(data):
for item, s in compat.iteritems(df):
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
if not (is_list_like(key) or isinstance(key, slice)):
return super(Panel, self).__getitem__(key)
return self.ix[key]
def _getitem_multilevel(self, key):
info = self._info_axis
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = maybe_droplevels(new_index, key)
slices = [loc] + [slice(None) for x in range(self._AXIS_LEN - 1)]
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis_name] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to %s' % dtype)
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([values], fixed_axes)
# ----------------------------------------------------------------------
# Comparison methods
def _compare_constructor(self, other, func):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {}
for col in self._info_axis:
new_data[col] = func(self[col], other[col])
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
# ----------------------------------------------------------------------
# Magic methods
def __unicode__(self):
"""
Return a string representation for a particular Panel
Invoked by unicode(df) in py2 only.
Yields a Unicode String in both py2/py3.
"""
class_name = str(self.__class__)
shape = self.shape
dims = u('Dimensions: %s') % ' x '.join(
["%d (%s)" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return u('%s axis: %s to %s') % (a.capitalize(),
pprint_thing(v[0]),
pprint_thing(v[-1]))
else:
return u('%s axis: None') % a.capitalize()
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def _get_plane_axes_index(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes indexes
"""
axis_name = self._get_axis_name(axis)
if axis_name == 'major_axis':
index = 'minor_axis'
columns = 'items'
if axis_name == 'minor_axis':
index = 'major_axis'
columns = 'items'
elif axis_name == 'items':
index = 'major_axis'
columns = 'minor_axis'
return index, columns
def _get_plane_axes(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes
"""
return [self._get_axis(axi)
for axi in self._get_plane_axes_index(axis)]
fromDict = from_dict
def to_sparse(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as sparsifying is not
supported for Panel objects and will raise an error.
Convert to SparsePanel
"""
raise NotImplementedError("sparsifying is not supported "
"for Panel objects")
def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
Other Parameters
----------------
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
Notes
-----
Keyword arguments (and na_rep) are passed to the ``to_excel`` method
for each DataFrame written.
"""
from pandas.io.excel import ExcelWriter
if isinstance(path, compat.string_types):
writer = ExcelWriter(path, engine=engine)
else:
writer = path
kwargs['na_rep'] = na_rep
for item, df in self.iteritems():
name = str(item)
df.to_excel(writer, name, **kwargs)
writer.save()
def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args, **kwargs):
"""
Quickly retrieve single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
takeable : interpret the passed labels as indexers, default False
Returns
-------
value : scalar value
"""
nargs = len(args)
nreq = self._AXIS_LEN
# require an arg for each axis
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('get_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
return lower.get_value(*args[1:], takeable=takeable)
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
# require an arg for each axis and the value
nargs = len(args)
nreq = self._AXIS_LEN + 1
if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('set_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
try:
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
lower.set_value(*args[1:], takeable=takeable)
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(axes[0], self._info_axis)
# how to make this logic simpler?
if made_bigger:
_possibly_cast_item(result, args[0], likely_dtype)
return result.set_value(*args)
def _box_item_values(self, key, values):
if self.ndim == values.ndim:
result = self._constructor(values)
# a dup selection will yield a full ndim
if result._get_axis(0).is_unique:
result = result[key]
return result
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if value.shape != shape[1:]:
raise ValueError('shape of value must be {0}, shape of given '
'object was {1}'.format(
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif is_scalar(value):
dtype, value = _infer_dtype_from_scalar(value)
mat = np.empty(shape[1:], dtype=dtype)
mat.fill(value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
raise NotImplementedError
def tail(self, n=5):
raise NotImplementedError
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in Panel to a specified number of decimal places.
.. versionadded:: 0.18.0
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Panel object
See Also
--------
numpy.around
"""
nv.validate_round(args, kwargs)
if is_integer(decimals):
result = np.apply_along_axis(np.round, 0, self.values)
return self._wrap_result(result, axis=0)
raise TypeError("decimals must be an integer")
def _needs_reindex_multi(self, axes, method, level):
""" don't allow a multi reindex on Panel or above ndim """
return False
def align(self, other, **kwargs):
raise NotImplementedError
def dropna(self, axis=0, how='any', inplace=False):
"""
Drop 2D from panel, holding passed axis constant
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = notnull(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif is_scalar(other):
return self._combine_const(other, func)
else:
raise NotImplementedError("%s is not supported in combine "
"operation with %s" %
(str(type(other)), str(type(self))))
def _combine_const(self, other, func):
with np.errstate(all='ignore'):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
with np.errstate(all='ignore'):
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
with np.errstate(all='ignore'):
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
Notes
-----
major_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of major_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 2)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : DataFrame
index -> major axis, columns -> items
Notes
-----
minor_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of minor_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 1)
def xs(self, key, axis=1):
"""
Return slice of panel along selected axis
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result
_xs = xs
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
ax = self._get_axis(axis)
key = ax[i]
# xs cannot handle a non-scalar key, so just reindex here
# if we have a multi-index and a single tuple, then its a reduction
# (GH 7516)
if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):
if is_list_like(key):
indexer = {self._get_axis_name(axis): key}
return self.reindex(**indexer)
# a reduction
if axis == 0:
values = self._data.iget(i)
return self._box_item_values(key, values)
# xs by position
self._consolidate_inplace()
new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)
return self._construct_return_type(new_data)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame whose
columns are the Panel's items and whose index is a MultiIndex formed
of the Panel's major and minor axes.
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
# shaped like the return DataFrame
mask = notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
def construct_multi_parts(idx, n_repeat, n_shuffle=1):
axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)
labels = [x[selector] for x in axis_idx.labels]
levels = axis_idx.levels
names = axis_idx.names
return labels, levels, names
def construct_index_parts(idx, major=True):
levels = [idx]
if major:
labels = [np.arange(N).repeat(K)[selector]]
names = idx.name or 'major'
else:
labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
labels = [labels.ravel()[selector]]
names = idx.name or 'minor'
names = [names]
return labels, levels, names
if isinstance(self.major_axis, MultiIndex):
major_labels, major_levels, major_names = construct_multi_parts(
self.major_axis, n_repeat=K)
else:
major_labels, major_levels, major_names = construct_index_parts(
self.major_axis)
if isinstance(self.minor_axis, MultiIndex):
minor_labels, minor_levels, minor_names = construct_multi_parts(
self.minor_axis, n_repeat=N, n_shuffle=K)
else:
minor_labels, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
levels = major_levels + minor_levels
labels = major_labels + minor_labels
names = major_names + minor_names
index = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def apply(self, func, axis='major', **kwargs):
"""
Applies function along axis (or axes) of the Panel
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'),
DataFrames of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two
axes
Additional keyword arguments will be passed as keywords to the function
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4,3,2))
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1)
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='minor')
Return the shapes of each DataFrame over axis 2 (i.e the shapes of
items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1))
Returns
-------
result : Panel, DataFrame, or Series
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
with np.errstate(all='ignore'):
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis)
def _apply_1d(self, func, axis):
axis_name = self._get_axis_name(axis)
ndim = self.ndim
values = self.values
# iter thru the axes
slice_axis = self._get_axis(axis)
slice_indexer = [0] * (ndim - 1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
planes = [self._get_axis(axi) for axi in indlist]
shape = np.array(self.shape).take(indlist)
# all the iteration points
points = cartesian_product(planes)
results = []
for i in range(np.prod(shape)):
# construct the object
pts = tuple([p[i] for p in points])
indexer.put(indlist, slice_indexer)
obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)
result = func(obj)
results.append(result)
# increment the indexer
slice_indexer[-1] += 1
n = -1
while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):
slice_indexer[n - 1] += 1
slice_indexer[n] = 0
n -= 1
# empty object
if not len(results):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
if isinstance(results[0], Series):
arr = np.vstack([r.values for r in results])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
tranp = np.array([axis] + indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
return self._constructor(arr, **self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
return self._construct_return_type(results, planes)
def _apply_2d(self, func, axis):
""" handle 2-d slices, equiv to iterating over the other axis """
ndim = self.ndim
axis = [self._get_axis_number(a) for a in axis]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
for a in axis:
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
slicer = [slice(None, None)] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
results.append((e, obj))
return self._construct_return_type(dict(results))
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if numeric_only:
raise NotImplementedError('Panel.{0} does not implement '
'numeric_only.'.format(name))
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)
with np.errstate(all='ignore'):
result = f(self.values)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
def _construct_return_type(self, result, axes=None):
""" return the type for the ndim of the result """
ndim = getattr(result, 'ndim', None)
# need to assume they are the same
if ndim is None:
if isinstance(result, dict):
ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
ndim += 1
# scalar
if ndim == 0:
return Series(result)
# same as self
elif self.ndim == ndim:
# return the construction dictionary for these axes
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
# sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
return self._constructor_sliced(
result, **self._extract_axes_for_slice(self, axes))
raise PandasError('invalid _construct_return_type [self->%s] '
'[result->%s]' % (self, result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(Panel, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# check if a list of axes was passed in instead as a
# single *args element
if (len(args) == 1 and hasattr(args[0], '__iter__') and
not is_string_like(args[0])):
axes = args[0]
else:
axes = args
if 'axes' in kwargs and axes:
raise TypeError("transpose() got multiple values for "
"keyword argument 'axes'")
elif not axes:
axes = kwargs.pop('axes', ())
return super(Panel, self).transpose(*axes, **kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Panel, self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
def shift(self, periods=1, freq=None, axis='major'):
"""
Shift index by desired number of periods with an optional time freq.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original. This is different
from the behavior of DataFrame.shift()
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
shifted : Panel
"""
if freq:
return self.tshift(periods, freq, axis=axis)
return super(Panel, self).slice_shift(periods, axis=axis)
def tshift(self, periods=1, freq=None, axis='major'):
return super(Panel, self).tshift(periods, freq, axis)
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.tools.merge import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify Panel in place using non-NA values from passed
Panel, or object coercible to Panel. Aligns on items
Parameters
----------
other : Panel, or object coercible to Panel
join : How to join individual DataFrames
{'left', 'right', 'outer', 'inner'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling panel
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : bool
If True, will raise an error if a DataFrame and other both
contain data in the same place.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join, overwrite, filter_func,
raise_conflict)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
""" return a list of the axis indicies """
return [self._extract_axis(self, data, axis=i, **kwargs)
for i, a in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
return dict([(self._AXIS_SLICEMAP[i], a)
for i, a in zip(
self._AXIS_ORDERS[self._AXIS_LEN - len(axes):],
axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indicies
"""
result = dict()
# caller differs dict/ODict, presered type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in compat.iteritems(frames):
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))])
reindex_dict = dict(
[(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])
reindex_dict['copy'] = False
for key, frame in compat.iteritems(adj_frames):
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
axes_dict['dtype'] = dtype
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, self._constructor_sliced):
have_frames = True
indexes.append(v._get_axis(axis))
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_combined_index(indexes, intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on axis %d' % axis)
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return _ensure_index(index)
@classmethod
def _add_aggregate_operations(cls, use_numexpr=True):
""" add the operations to the cls; evaluate the doc strings again """
# doc strings substitors
_agg_doc = """
Wrapper method for %%s
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + "\n"
def _panel_arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True,
**eval_kwargs)
except TypeError:
result = op(x, y)
# handles discrepancy between numpy and numexpr on division/mod
# by 0 though, given that these are generally (always?)
# non-scalars, I'm not sure whether it's worth it at the moment
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' panel'
else:
equiv = 'panel ' + op_desc['op'] + ' other'
_op_doc = """
%%s of series and other, element-wise (binary operator `%%s`).
Equivalent to ``%%s``.
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__,
cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + """
See also
--------
""" + cls.__name__ + ".%s\n"
doc = _op_doc % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
else:
doc = _agg_doc % name
@Appender(doc)
def f(self, other, axis=0):
return self._combine(other, na_op, axis=axis)
f.__name__ = name
return f
# add `div`, `mul`, `pow`, etc..
ops.add_flex_arithmetic_methods(
cls, _panel_arith_method, use_numexpr=use_numexpr,
flex_comp_method=ops._comp_method_PANEL)
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,
stat_axis=1, aliases={'major': 'major_axis',
'minor': 'minor_axis'},
slicers={'major_axis': 'index',
'minor_axis': 'columns'})
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
# legacy
class WidePanel(Panel):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("WidePanel is deprecated. Please use Panel",
FutureWarning, stacklevel=2)
super(WidePanel, self).__init__(*args, **kwargs)
class LongPanel(DataFrame):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("LongPanel is deprecated. Please use DataFrame",
FutureWarning, stacklevel=2)
super(LongPanel, self).__init__(*args, **kwargs)
| mit |
Ziqi-Li/bknqgis | pandas/pandas/tests/io/json/test_normalize.py | 14 | 11514 | import pytest
import numpy as np
import json
import pandas.util.testing as tm
from pandas import compat, Index, DataFrame
from pandas.io.json import json_normalize
from pandas.io.json.normalize import nested_to_record
@pytest.fixture
def deep_nested():
# deeply nested data
return [{'country': 'USA',
'states': [{'name': 'California',
'cities': [{'name': 'San Francisco',
'pop': 12345},
{'name': 'Los Angeles',
'pop': 12346}]
},
{'name': 'Ohio',
'cities': [{'name': 'Columbus',
'pop': 1234},
{'name': 'Cleveland',
'pop': 1236}]}
]
},
{'country': 'Germany',
'states': [{'name': 'Bayern',
'cities': [{'name': 'Munich', 'pop': 12347}]
},
{'name': 'Nordrhein-Westfalen',
'cities': [{'name': 'Duesseldorf', 'pop': 1238},
{'name': 'Koeln', 'pop': 1239}]}
]
}
]
@pytest.fixture
def state_data():
return [
{'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}],
'info': {'governor': 'Rick Scott'},
'shortname': 'FL',
'state': 'Florida'},
{'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}],
'info': {'governor': 'John Kasich'},
'shortname': 'OH',
'state': 'Ohio'}]
class TestJSONNormalize(object):
def test_simple_records(self):
recs = [{'a': 1, 'b': 2, 'c': 3},
{'a': 4, 'b': 5, 'c': 6},
{'a': 7, 'b': 8, 'c': 9},
{'a': 10, 'b': 11, 'c': 12}]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties')
expected = []
for rec in state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties', meta='state')
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_empty_array(self):
result = json_normalize([])
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
# GH 14883
result = json_normalize({'A': {'A': 1, 'B': 2}})
expected = DataFrame([[1, 2]], columns=['A.A', 'A.B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({'A': {'A': 1, 'B': 2}}, sep='_')
expected = DataFrame([[1, 2]], columns=['A_A', 'A_B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({'A': {'A': 1, 'B': 2}}, sep=u'\u03c3')
expected = DataFrame([[1, 2]], columns=[u'A\u03c3A', u'A\u03c3B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize(deep_nested, ['states', 'cities'],
meta=['country', ['states', 'name']],
sep='_')
expected = Index(['name', 'pop',
'country', 'states_name']).sort_values()
assert result.columns.sort_values().equals(expected)
def test_more_deeply_nested(self, deep_nested):
result = json_normalize(deep_nested, ['states', 'cities'],
meta=['country', ['states', 'name']])
# meta_prefix={'states': 'state_'})
ex_data = {'country': ['USA'] * 4 + ['Germany'] * 3,
'states.name': ['California', 'California', 'Ohio', 'Ohio',
'Bayern', 'Nordrhein-Westfalen',
'Nordrhein-Westfalen'],
'name': ['San Francisco', 'Los Angeles', 'Columbus',
'Cleveland', 'Munich', 'Duesseldorf', 'Koeln'],
'pop': [12345, 12346, 1234, 1236, 12347, 1238, 1239]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [{'state': 'Florida',
'shortname': 'FL',
'info': {
'governor': 'Rick Scott'
},
'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {
'governor': 'John Kasich'
},
'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}]}]
result = json_normalize(data, 'counties',
['state', 'shortname',
['info', 'governor']])
ex_data = {'name': ['Dade', 'Broward', 'Palm Beach', 'Summit',
'Cuyahoga'],
'state': ['Florida'] * 3 + ['Ohio'] * 2,
'shortname': ['FL', 'FL', 'FL', 'OH', 'OH'],
'info.governor': ['Rick Scott'] * 3 + ['John Kasich'] * 2,
'population': [12345, 40000, 60000, 1234, 1337]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [{'foo': 'hello',
'bar': 'there',
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]
with pytest.raises(ValueError):
json_normalize(data, 'data', meta=['foo', 'bar'])
result = json_normalize(data, 'data', meta=['foo', 'bar'],
meta_prefix='meta')
for val in ['metafoo', 'metabar', 'foo', 'bar']:
assert val in result
def test_record_prefix(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties',
meta='state',
record_prefix='county_')
expected = []
for rec in state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
expected = expected.rename(columns=lambda x: 'county_' + x)
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_non_ascii_key(self):
if compat.PY3:
testjson = (
b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},' +
b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'
).decode('utf8')
else:
testjson = ('[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'
'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]')
testdata = {
u'sub.A': [1, 3],
u'sub.B': [2, 4],
b"\xc3\x9cnic\xc3\xb8de".decode('utf8'): [0, 1]
}
expected = DataFrame(testdata)
result = json_normalize(json.loads(testjson))
tm.assert_frame_equal(result, expected)
class TestNestedToRecord(object):
def test_flat_stays_flat(self):
recs = [dict(flat1=1, flat2=2),
dict(flat1=3, flat2=4),
]
result = nested_to_record(recs)
expected = recs
assert result == expected
def test_one_level_deep_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1}
assert result == expected
def test_nested_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2),
nested=dict(e=dict(c=1, d=2),
d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
assert result == expected
def test_json_normalize_errors(self):
# GH14583: If meta keys are not always present
# a new option to set errors='ignore' has been implemented
i = {
"Trades": [{
"general": {
"tradeid": 100,
"trade_version": 1,
"stocks": [{
"symbol": "AAPL",
"name": "Apple",
"price": "0"
}, {
"symbol": "GOOG",
"name": "Google",
"price": "0"
}
]
}
}, {
"general": {
"tradeid": 100,
"stocks": [{
"symbol": "AAPL",
"name": "Apple",
"price": "0"
}, {
"symbol": "GOOG",
"name": "Google",
"price": "0"
}
]
}
}
]
}
j = json_normalize(data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='ignore')
expected = {'general.trade_version': {0: 1.0, 1: 1.0, 2: '', 3: ''},
'general.tradeid': {0: 100, 1: 100, 2: 100, 3: 100},
'name': {0: 'Apple', 1: 'Google', 2: 'Apple', 3: 'Google'},
'price': {0: '0', 1: '0', 2: '0', 3: '0'},
'symbol': {0: 'AAPL', 1: 'GOOG', 2: 'AAPL', 3: 'GOOG'}}
assert j.fillna('').to_dict() == expected
pytest.raises(KeyError,
json_normalize, data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='raise'
)
| gpl-2.0 |
xwolf12/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
witcxc/scipy | doc/source/tutorial/stats/plots/kde_plot3.py | 132 | 1229 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
np.random.seed(12456)
x1 = np.random.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
| bsd-3-clause |
breznak/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/collections.py | 69 | 39876 | """
Classes for the efficient drawing of large collections of objects that
share most properties, e.g. a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g. you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g. a bunch of solid
line segemnts)
"""
import copy, math, warnings
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as _colors # avoid conflict with kwarg
import matplotlib.cm as cm
import matplotlib.transforms as transforms
import matplotlib.artist as artist
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
import matplotlib.mlab as mlab
class Collection(artist.Artist, cm.ScalarMappable):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets).
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(ie a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_offsets = np.array([], np.float_)
_transOffset = transforms.IdentityTransform()
_transforms = []
zorder = 1
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds = None,
offsets = None,
transOffset = None,
norm = None, # optional for ScalarMappable
cmap = None, # ditto
pickradius = 5.0,
urls = None,
**kwargs
):
"""
Create a Collection
%(Collection)s
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_urls(urls)
self._uniform_offsets = None
self._offsets = np.array([], np.float_)
if offsets is not None:
offsets = np.asarray(offsets)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._pickradius = pickradius
self.update(kwargs)
def _get_value(self, val):
try: return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: float(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a float or nonzero sequence of floats')
def _get_bool(self, val):
try: return (bool(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: bool(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a bool or nonzero sequence of them')
def get_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asarray(offsets, np.float_)
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
return result
def get_window_extent(self, renderer):
bbox = self.get_datalim(transforms.IdentityTransform())
#TODO:check to ensure that this does not fail for
#cases other than scatter plot legend
return bbox
def _prepare_points(self):
"""Point prep for drawing and hit testing"""
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(zip(xs, ys), path.codes))
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path) for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
return transform, transOffset, offsets, paths
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
transform, transOffset, offsets, paths = self._prepare_points()
renderer.draw_path_collection(
transform.frozen(), self.clipbox, clippath, clippath_trans,
paths, self.get_transforms(),
offsets, transOffset,
self.get_facecolor(), self.get_edgecolor(), self._linewidths,
self._linestyles, self._antialiaseds, self._urls)
renderer.close_group(self.__class__.__name__)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns True | False, ``dict(ind=itemlist)``, where every
item in itemlist contains the event.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible(): return False,{}
transform, transOffset, offsets, paths = self._prepare_points()
ind = mpath.point_in_path_collection(
mouseevent.x, mouseevent.y, self._pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, len(self._facecolors)>0)
return len(ind)>0,dict(ind=ind)
def set_pickradius(self,pickradius): self.pickradius = 5
def get_pickradius(self): return self.pickradius
def set_urls(self, urls):
if urls is None:
self._urls = [None,]
else:
self._urls = urls
def get_urls(self): return self._urls
def set_offsets(self, offsets):
"""
Set the offsets for the collection. *offsets* can be a scalar
or a sequence.
ACCEPTS: float or sequence of floats
"""
offsets = np.asarray(offsets, np.float_)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
#This decision is based on how they are initialized above
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
def get_offsets(self):
"""
Return the offsets for the collection.
"""
#This decision is based on how they are initialized above in __init__()
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
ACCEPTS: float or sequence of floats
"""
if lw is None: lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
def set_linewidths(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) ]
"""
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
if ls in dashd:
dashes = [dashd[ls]]
elif ls in cbook.ls_mapper:
dashes = [dashd[cbook.ls_mapper[ls]]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
if x in dashd:
dashes.append(dashd[x])
elif x in cbook.ls_mapper:
dashes.append(dashd[cbook.ls_mapper[x]])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls)==2:
dashes = ls
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes'%ls)
self._linestyles = dashes
def set_linestyles(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_dashes(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
ACCEPTS: Boolean or sequence of booleans
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
def set_antialiaseds(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c is None: c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_facecolors(self, c):
"""alias for set_facecolor"""
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if self._edgecolors == 'face':
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'face', the edge color will always be the same as
the face color.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
else:
if c is None: c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_edgecolors(self, c):
"""alias for set_edgecolor"""
return self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float.
ACCEPTS: float
"""
try: float(alpha)
except TypeError: raise TypeError('alpha must be a float')
else:
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = _colors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if self._edgecolors_original != 'face':
self._edgecolors = _colors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
"""
If the scalar mappable array is not none, update colors
from scalar data
"""
if self._A is None: return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if len(self._facecolors):
self._facecolors = self.to_rgba(self._A, self._alpha)
else:
self._edgecolors = self.to_rgba(self._A, self._alpha)
def update_from(self, other):
'copy properties from other to self'
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Collection'] = """\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
"""
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
"""
def __init__(self, meshWidth, meshHeight, coordinates, showedges, antialiased=True):
Collection.__init__(self)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._showedges = showedges
self._antialiased = antialiased
self._paths = None
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
# By converting to floats now, we can avoid that on every draw.
self._coordinates = self._coordinates.reshape((meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self, dataTrans=None):
if self._paths is None:
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
return self._paths
#@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support quadmeshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1: ],
c[1: , 1: ],
c[1: , 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
convert_mesh_to_paths = staticmethod(convert_mesh_to_paths)
def get_datalim(self, transData):
return self._bbox
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if self.check_update('array'):
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
renderer.draw_quad_mesh(
transform.frozen(), self.clipbox, clippath, clippath_trans,
self._meshWidth, self._meshHeight, coordinates,
offsets, transOffset, self.get_facecolor(), self._antialiased,
self._showedges)
renderer.close_group(self.__class__.__name__)
class PolyCollection(Collection):
def __init__(self, verts, sizes = None, closed = True, **kwargs):
"""
*verts* is a sequence of ( *verts0*, *verts1*, ...) where
*verts_i* is a sequence of *xy* tuples of vertices, or an
equivalent :mod:`numpy` array of shape (*nv*, 2).
*sizes* is *None* (default) or a sequence of floats that
scale the corresponding *verts_i*. The scaling is applied
before the Artist master transform; if the latter is an identity
transform, then the overall scaling is such that if
*verts_i* specify a unit square, then *sizes_i* is the area
of that square in points^2.
If len(*sizes*) < *nv*, the additional values will be
taken cyclically from the array.
*closed*, when *True*, will explicitly close the polygon.
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_verts(verts, closed)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def set_verts(self, verts, closed=True):
'''This allows one to delay initialization of the vertices.'''
if closed:
self._paths = []
for xy in verts:
if np.ma.isMaskedArray(xy):
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.ma.concatenate([xy, [xy[0]]])
else:
xy = np.asarray(xy)
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
def get_paths(self):
return self._paths
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
def __init__(self, xranges, yrange, **kwargs):
"""
*xranges*
sequence of (*xmin*, *xwidth*)
*yrange*
*ymin*, *ywidth*
%(Collection)s
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [ [(xmin, ymin), (xmin, ymax), (xmin+xwidth, ymax), (xmin+xwidth, ymin), (xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
"""
Create a BrokenBarHCollection to plot horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
A :class:`BrokenBarHCollection` is returned.
*kwargs* are passed on to the collection
"""
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1]-xslice[0]))
collection = BrokenBarHCollection(xranges, [ymin, ymax-ymin], **kwargs)
return collection
class RegularPolyCollection(Collection):
"""Draw a collection of regular polygons with *numsides*."""
_path_generator = mpath.Path.unit_regular_polygon
def __init__(self,
numsides,
rotation = 0 ,
sizes = (1,),
**kwargs):
"""
*numsides*
the number of sides of the polygon
*rotation*
the rotation of the polygon in radians
*sizes*
gives the area of the circle circumscribing the
regular polygon in points^2
%(Collection)s
Example: see :file:`examples/dynamic_collection.py` for
complete example::
offsets = np.random.rand(20,2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
black = (0,0,0,1)
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors = facecolors,
edgecolors = (black,),
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
self._transforms = [
transforms.Affine2D().rotate(-self._rotation).scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
def get_sizes(self):
return self._sizes
class StarPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
zorder = 2
def __init__(self, segments, # Can be None.
linewidths = None,
colors = None,
antialiaseds = None,
linestyles = 'solid',
offsets = None,
transOffset = None,
norm = None,
cmap = None,
pickradius = 5,
**kwargs
):
"""
*segments*
a sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can be a different length.
*colors*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed).
*antialiaseds*
must be a sequence of ones or zeros
*linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
a string or dash tuple. The dash tuple is::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *linewidths*, *colors*, or *antialiaseds* is None, they
default to their rcParams setting, in sequence form.
If *offsets* and *transOffset* are not None, then
*offsets* are transformed by *transOffset* and applied after
the segments have been transformed to display coordinates.
If *offsets* is not None but *transOffset* is None, then the
*offsets* are added to the segments before any transformation.
In this case, a single offset can be specified as::
offsets=(xo,yo)
and this value will be added cumulatively to each successive
segment, so as to produce a set of successively offset curves.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
"""
if colors is None: colors = mpl.rcParams['lines.color']
if linewidths is None: linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None: antialiaseds = (mpl.rcParams['lines.antialiased'],)
self.set_linestyles(linestyles)
colors = _colors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
**kwargs)
self.set_facecolors([])
self.set_segments(segments)
def get_paths(self):
return self._paths
def set_segments(self, segments):
if segments is None: return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(seg) for seg in _segments]
set_verts = set_segments # for compatibility with PolyCollection
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i%Noffs
segs[i] = segs[i] + offsets[io:io+1]
return segs
def set_color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._edgecolors = _colors.colorConverter.to_rgba_array(c)
def color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
warnings.warn('LineCollection.color deprecated; use set_color instead')
return self.set_color(c)
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class CircleCollection(Collection):
"""
A collection of circles, drawn using splines.
"""
def __init__(self, sizes, **kwargs):
"""
*sizes*
Gives the area of the circle in points^2
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
# sizes is the area of the circle circumscribing the polygon
# in points^2
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class EllipseCollection(Collection):
"""
A collection of ellipses, drawn using splines.
"""
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
*widths*: sequence
half-lengths of first axes (e.g., semi-major axis lengths)
*heights*: sequence
half-lengths of second axes
*angles*: sequence
angles of first axes, degrees CCW from the X-axis
*units*: ['points' | 'inches' | 'dots' | 'width' | 'height' | 'x' | 'y']
units in which majors and minors are given; 'width' and 'height'
refer to the dimensions of the axes, while 'x' and 'y'
refer to the *offsets* data units.
Additional kwargs inherited from the base :class:`Collection`:
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._widths = np.asarray(widths).ravel()
self._heights = np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() *(np.pi/180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = []
self._paths = [mpath.Path.unit_circle()]
self._initialized = False
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _init(self):
def on_dpi_change(fig):
self._transforms = []
self.figure.callbacks.connect('dpi_changed', on_dpi_change)
self._initialized = True
def set_transforms(self):
if not self._initialized:
self._init()
self._transforms = []
ax = self.axes
fig = self.figure
if self._units in ('x', 'y'):
if self._units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
sc = dx1/dx0
else:
if self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
_affine = transforms.Affine2D
for x, y, a in zip(self._widths, self._heights, self._angles):
trans = _affine().scale(x * sc, y * sc).rotate(a)
self._transforms.append(trans)
def draw(self, renderer):
if True: ###not self._transforms:
self.set_transforms()
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*,
*antialiaseds* are None, they default to their
:data:`matplotlib.rcParams` patch setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
None (ie a call to set_array has been made), at draw time a
call to scalar mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.fill:
return patch.get_facecolor()
return [0, 0, 0, 0]
facecolors = [determine_facecolor(p) for p in patches]
edgecolors = [p.get_edgecolor() for p in patches]
linewidths = [p.get_linewidths() for p in patches]
antialiaseds = [p.get_antialiased() for p in patches]
Collection.__init__(
self,
edgecolors=edgecolors,
facecolors=facecolors,
linewidths=linewidths,
linestyles='solid',
antialiaseds = antialiaseds)
else:
Collection.__init__(self, **kwargs)
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
def get_paths(self):
return self._paths
artist.kwdocd['Collection'] = patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'PolyCollection', 'BrokenBarHCollection', 'RegularPolyCollection',
'StarPolygonCollection', 'PatchCollection', 'CircleCollection'):
artist.kwdocd[k] = patchstr
artist.kwdocd['LineCollection'] = artist.kwdoc(LineCollection)
| agpl-3.0 |
kinoc/opencog | opencog/python/spatiotemporal/temporal_events/__init__.py | 33 | 9273 | from scipy.stats.distributions import rv_frozen
from spatiotemporal.temporal_events.relation_formulas import FormulaCreator, RelationFormulaGeometricMean, BaseRelationFormula, RelationFormulaConvolution
from spatiotemporal.temporal_events.util import calculate_bounds_of_probability_distribution
from spatiotemporal.time_intervals import check_is_time_interval, TimeInterval
from spatiotemporal.temporal_events.membership_function import MembershipFunction, ProbabilityDistributionPiecewiseLinear
from spatiotemporal.unix_time import UnixTime
from utility.generic import convert_dict_to_sorted_lists
from utility.functions import FunctionPiecewiseLinear, FUNCTION_ZERO
__author__ = 'keyvan'
class TemporalEvent(list, TimeInterval):
_distribution_beginning = None
_distribution_ending = None
_beginning = None
_ending = None
_dict = None
def __init__(self, distribution_beginning, distribution_ending,
bins=50, relation_formula=None):
if not isinstance(distribution_beginning, rv_frozen):
raise TypeError("'distribution_beginning' should be a scipy frozen distribution")
if not isinstance(distribution_ending, rv_frozen):
raise TypeError("'distribution_ending' should be a scipy frozen distribution")
self._distribution_beginning = distribution_beginning
self._distribution_ending = distribution_ending
a, beginning = calculate_bounds_of_probability_distribution(distribution_beginning)
ending, b = calculate_bounds_of_probability_distribution(distribution_ending)
self._beginning = UnixTime(beginning)
self._ending = UnixTime(ending)
self.membership_function = MembershipFunction(self)
bins_beginning = bins / 2
bins_ending = bins - bins_beginning
self.interval_beginning = TimeInterval(a, beginning, bins_beginning)
self.interval_ending = TimeInterval(ending, b, bins_ending)
list.__init__(self, self.interval_beginning + self.interval_ending)
TimeInterval.__init__(self, a, b, bins)
if relation_formula is None:
relation_formula = RelationFormulaConvolution()
elif not isinstance(relation_formula, BaseRelationFormula):
raise TypeError("'relation_formula' should be of type 'BaseRelationFormula'")
relation_formula.bounds[distribution_beginning] = self.a, self.beginning
relation_formula.bounds[distribution_ending] = self.ending, self.b
self._formula_creator = FormulaCreator(relation_formula)
def degree(self, time_step=None, a=None, b=None, interval=None):
"""
usage: provide 'time_step' or 'a' and 'b' or 'interval'
"""
if time_step is not None:
return self.membership_function(time_step)
if interval is None:
if (a, b) == (None, None):
interval = self
else:
interval = TimeInterval(a, b)
else:
check_is_time_interval(interval)
return integral(self.membership_function, interval.a, interval.b)
def temporal_relations_with(self, other):
return self._formula_creator.temporal_relations_between(self, other)
def instance(self):
return TemporalInstance(self.distribution_beginning.rvs(), self.distribution_ending.rvs())
def to_dict(self):
if self._dict is None:
self._dict = {}
for time_step in self.to_list():
self._dict[time_step] = self.membership_function(time_step)
return self._dict
# def plot(self, show_distributions=False):
# import matplotlib.pyplot as plt
# plt.plot(self.to_datetime_list(), self.membership_function())
# if show_distributions:
# if hasattr(self.distribution_beginning, 'plot'):
# self.distribution_beginning.plot()
# else:
# plt.plot(self.interval_beginning.to_datetime_list(),
# self.distribution_beginning.pdf(self.interval_beginning))
# if hasattr(self.distribution_ending, 'plot'):
# self.distribution_ending.plot()
# else:
# plt.plot(self.interval_ending.to_datetime_list(),
# self.distribution_ending.pdf(self.interval_ending))
# return plt
def plot(self, plt=None, show_distributions=False):
if plt is None:
import matplotlib.pyplot as plt
plt.plot(self.to_float_list(), self.membership_function())
if show_distributions:
if hasattr(self.distribution_beginning, 'plot'):
self.distribution_beginning.plot()
else:
plt.plot(self.interval_beginning.to_float_list(),
self.distribution_beginning.pdf(self.interval_beginning))
if hasattr(self.distribution_ending, 'plot'):
self.distribution_ending.plot()
else:
plt.plot(self.interval_ending.to_float_list(),
self.distribution_ending.pdf(self.interval_ending))
return plt
@property
def distribution_beginning(self):
return self._distribution_beginning
@property
def distribution_ending(self):
return self._distribution_ending
@property
def beginning(self):
return self._beginning
@property
def ending(self):
return self._ending
def __getitem__(self, portion_index):
if portion_index not in [0, 1]:
raise IndexError("TemporalEvent object only accepts '0' or '1' as index")
if portion_index == 0:
return self.distribution_beginning
return self.distribution_ending
def __mul__(self, other):
return self.temporal_relations_with(other)
def __str__(self):
return repr(self)
# use TemporalEventTrapezium instead
class TemporalEventPiecewiseLinear(TemporalEvent):
def __init__(self, dictionary_beginning, dictionary_ending, bins=50):
input_list_beginning, output_list_beginning = convert_dict_to_sorted_lists(dictionary_beginning)
for i in xrange(1, len(input_list_beginning)):
if not dictionary_beginning[input_list_beginning[i]] > dictionary_beginning[input_list_beginning[i - 1]]:
raise TypeError("values of 'dictionary_beginning' should be increasing in time")
input_list_ending, output_list_ending = convert_dict_to_sorted_lists(dictionary_ending)
for i in xrange(1, len(input_list_ending)):
if not dictionary_ending[input_list_ending[i]] < dictionary_ending[input_list_ending[i - 1]]:
raise TypeError("values of 'dictionary_ending' should be decreasing in time")
dictionary_ending = {}
for i, time_step in enumerate(input_list_ending):
dictionary_ending[time_step] = output_list_ending[len(input_list_ending) - i - 1]
input_list_ending, output_list_ending = convert_dict_to_sorted_lists(dictionary_ending)
distribution_beginning = ProbabilityDistributionPiecewiseLinear(dictionary_beginning)
distribution_ending = ProbabilityDistributionPiecewiseLinear(dictionary_ending)
TemporalEvent.__init__(self, distribution_beginning, distribution_ending, bins=bins)
self._list = sorted(set(input_list_beginning + input_list_ending))
self.membership_function = FunctionPiecewiseLinear(self.to_dict(), FUNCTION_ZERO)
def __getitem__(self, index):
return self._list.__getitem__(index)
def __len__(self):
return len(self._list)
def __iter__(self):
return iter(self._list)
def __reversed__(self):
return reversed(self._list)
def __repr__(self):
pairs = ['{0}: {1}'.format(self[i], self.membership_function[i]) for i in xrange(len(self))]
return '{0}({1})'.format(self.__class__.__name__, ', '.join(pairs))
class TemporalInstance(TimeInterval):
def __init__(self, a, b):
TimeInterval.__init__(self, a, b, 1)
def plot(self):
import matplotlib.pyplot as plt
from spatiotemporal.unix_time import UnixTime
plt.plot([UnixTime(self.a).to_datetime(), UnixTime(self.b).to_datetime()], [1, 1])
return plt
if __name__ == '__main__':
from utility.functions import integral
from scipy.stats import norm
import matplotlib.pyplot as plt
#event = TemporalInstance(datetime(2010, 1, 1), datetime(2011, 2, 1))
#plt = event.plot()
#plt.show()
events = [
# TemporalEvent(norm(loc=10, scale=2), norm(loc=30, scale=2), 100),
# TemporalEvent(norm(loc=5, scale=2), norm(loc=15, scale=4), 100),
TemporalEventPiecewiseLinear({1: 0, 2: 0.1, 3: 0.3, 4: 0.7, 5: 1}, {6: 1, 7: 0.9, 8: 0.6, 9: 0.1, 10: 0}),
TemporalEventPiecewiseLinear({1: 0, 2: 0.1, 3: 0.3, 4: 0.7, 5: 1}, {3.5: 1, 4.5: 0.9, 8: 0.6, 9: 0.1, 10: 0})
]
print type(events[0])
print events[0] * events[1]
for event in events:
plt = event.plot()
print integral(event.distribution_beginning.pdf, event.a, event.beginning)
print event.distribution_beginning.rvs(10)
plt.ylim(ymax=1.1)
#plt.figure()
plt.show()
| agpl-3.0 |
xiaoxiamii/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
xuzhuoran0106/cuda-convnet2 | convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.