repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
plotly/python-api | packages/python/plotly/plotly/graph_objs/isosurface/slices/_x.py | 2 | 6186 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class X(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "isosurface.slices"
_path_str = "isosurface.slices.x"
_valid_props = {"fill", "locations", "locationssrc", "show"}
# fill
# ----
@property
def fill(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the `slices` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# locations
# ---------
@property
def locations(self):
"""
Specifies the location(s) of slices on the axis. When not
specified slices would be created for all points of the axis x
except start and end.
The 'locations' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["locations"]
@locations.setter
def locations(self, val):
self["locations"] = val
# locationssrc
# ------------
@property
def locationssrc(self):
"""
Sets the source reference on Chart Studio Cloud for locations
.
The 'locationssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["locationssrc"]
@locationssrc.setter
def locationssrc(self, val):
self["locationssrc"] = val
# show
# ----
@property
def show(self):
"""
Determines whether or not slice planes about the x dimension
are drawn.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `slices`. The default fill
value of the `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
locations
Specifies the location(s) of slices on the axis. When
not specified slices would be created for all points of
the axis x except start and end.
locationssrc
Sets the source reference on Chart Studio Cloud for
locations .
show
Determines whether or not slice planes about the x
dimension are drawn.
"""
def __init__(
self,
arg=None,
fill=None,
locations=None,
locationssrc=None,
show=None,
**kwargs
):
"""
Construct a new X object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.slices.X`
fill
Sets the fill ratio of the `slices`. The default fill
value of the `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
locations
Specifies the location(s) of slices on the axis. When
not specified slices would be created for all points of
the axis x except start and end.
locationssrc
Sets the source reference on Chart Studio Cloud for
locations .
show
Determines whether or not slice planes about the x
dimension are drawn.
Returns
-------
X
"""
super(X, self).__init__("x")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.slices.X
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.slices.X`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("locations", None)
_v = locations if locations is not None else _v
if _v is not None:
self["locations"] = _v
_v = arg.pop("locationssrc", None)
_v = locationssrc if locationssrc is not None else _v
if _v is not None:
self["locationssrc"] = _v
_v = arg.pop("show", None)
_v = show if show is not None else _v
if _v is not None:
self["show"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
Cisco-Talos/fnc-1 | tree_model/CountFeatureGenerator.py | 1 | 6699 | from FeatureGenerator import *
import ngram
import cPickle
import pandas as pd
from nltk.tokenize import sent_tokenize
from helpers import *
import hashlib
class CountFeatureGenerator(FeatureGenerator):
def __init__(self, name='countFeatureGenerator'):
super(CountFeatureGenerator, self).__init__(name)
def process(self, df):
grams = ["unigram", "bigram", "trigram"]
feat_names = ["Headline", "articleBody"]
print "generate counting features"
for feat_name in feat_names:
for gram in grams:
df["count_of_%s_%s" % (feat_name, gram)] = list(df.apply(lambda x: len(x[feat_name + "_" + gram]), axis=1))
df["count_of_unique_%s_%s" % (feat_name, gram)] = \
list(df.apply(lambda x: len(set(x[feat_name + "_" + gram])), axis=1))
df["ratio_of_unique_%s_%s" % (feat_name, gram)] = \
map(try_divide, df["count_of_unique_%s_%s"%(feat_name,gram)], df["count_of_%s_%s"%(feat_name,gram)])
# overlapping n-grams count
for gram in grams:
df["count_of_Headline_%s_in_articleBody" % gram] = \
list(df.apply(lambda x: sum([1. for w in x["Headline_" + gram] if w in set(x["articleBody_" + gram])]), axis=1))
df["ratio_of_Headline_%s_in_articleBody" % gram] = \
map(try_divide, df["count_of_Headline_%s_in_articleBody" % gram], df["count_of_Headline_%s" % gram])
# number of sentences in headline and body
for feat_name in feat_names:
#df['len_sent_%s' % feat_name] = df[feat_name].apply(lambda x: len(sent_tokenize(x.decode('utf-8').encode('ascii', errors='ignore'))))
df['len_sent_%s' % feat_name] = df[feat_name].apply(lambda x: len(sent_tokenize(x)))
#print df['len_sent_%s' % feat_name]
# dump the basic counting features into a file
feat_names = [ n for n in df.columns \
if "count" in n \
or "ratio" in n \
or "len_sent" in n]
# binary refuting features
_refuting_words = [
'fake',
'fraud',
'hoax',
'false',
'deny', 'denies',
# 'refute',
'not',
'despite',
'nope',
'doubt', 'doubts',
'bogus',
'debunk',
'pranks',
'retract'
]
_hedging_seed_words = [
'alleged', 'allegedly',
'apparently',
'appear', 'appears',
'claim', 'claims',
'could',
'evidently',
'largely',
'likely',
'mainly',
'may', 'maybe', 'might',
'mostly',
'perhaps',
'presumably',
'probably',
'purported', 'purportedly',
'reported', 'reportedly',
'rumor', 'rumour', 'rumors', 'rumours', 'rumored', 'rumoured',
'says',
'seem',
'somewhat',
# 'supposedly',
'unconfirmed'
]
#df['refuting_words_in_headline'] = df['Headline'].map(lambda x: 1 if w in x else 0 for w in _refuting_words)
#df['hedging_words_in_headline'] = df['Headline'].map(lambda x: 1 if w in x else 0 for w in _refuting_words)
#check_words = _refuting_words + _hedging_seed_words
check_words = _refuting_words
for rf in check_words:
fname = '%s_exist' % rf
feat_names.append(fname)
df[fname] = df['Headline'].map(lambda x: 1 if rf in x else 0)
# number of body texts paired up with the same headline
#df['headline_hash'] = df['Headline'].map(lambda x: hashlib.md5(x).hexdigest())
#nb_dict = df.groupby(['headline_hash'])['Body ID'].nunique().to_dict()
#df['n_bodies'] = df['headline_hash'].map(lambda x: nb_dict[x])
#feat_names.append('n_bodies')
# number of headlines paired up with the same body text
#nh_dict = df.groupby(['Body ID'])['headline_hash'].nunique().to_dict()
#df['n_headlines'] = df['Body ID'].map(lambda x: nh_dict[x])
#feat_names.append('n_headlines')
print 'BasicCountFeatures:'
print df
# split into train, test portion and save in separate files
train = df[~df['target'].isnull()]
print 'train:'
print train[['Headline_unigram','Body ID', 'count_of_Headline_unigram']]
xBasicCountsTrain = train[feat_names].values
outfilename_bcf_train = "train.basic.pkl"
with open(outfilename_bcf_train, "wb") as outfile:
cPickle.dump(feat_names, outfile, -1)
cPickle.dump(xBasicCountsTrain, outfile, -1)
print 'basic counting features for training saved in %s' % outfilename_bcf_train
test = df[df['target'].isnull()]
print 'test:'
print test[['Headline_unigram','Body ID', 'count_of_Headline_unigram']]
#return 1
if test.shape[0] > 0:
# test set exists
print 'saving test set'
xBasicCountsTest = test[feat_names].values
outfilename_bcf_test = "test.basic.pkl"
with open(outfilename_bcf_test, 'wb') as outfile:
cPickle.dump(feat_names, outfile, -1)
cPickle.dump(xBasicCountsTest, outfile, -1)
print 'basic counting features for test saved in %s' % outfilename_bcf_test
return 1
def read(self, header='train'):
filename_bcf = "%s.basic.pkl" % header
with open(filename_bcf, "rb") as infile:
feat_names = cPickle.load(infile)
xBasicCounts = cPickle.load(infile)
print 'feature names: '
print feat_names
print 'xBasicCounts.shape:'
print xBasicCounts.shape
#print type(xBasicCounts)
return [xBasicCounts]
if __name__ == '__main__':
cf = CountFeatureGenerator()
cf.read()
# Copyright 2017 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| apache-2.0 |
Clyde-fare/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 8 | 12701 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method, random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method', LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=3,
learning_method=method, random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=-1, learning_offset=5.,
total_samples=30, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X, invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X, invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
assert_allclose(_dirichlet_expectation_1d(x),
np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| bsd-3-clause |
Aasmi/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
wazeerzulfikar/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 27 | 27335 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Using size_threshold argument should raise
# a deprecation warning
assert_warns(DeprecationWarning,
manhattan_distances, X, Y, size_threshold=10)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean scikit-learn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
def test_cosine_distances():
# Check the pairwise Cosine distances computation
rng = np.random.RandomState(1337)
x = np.abs(rng.rand(910))
XA = np.vstack([x, x])
D = cosine_distances(XA)
assert_array_almost_equal(D, [[0., 0.], [0., 0.]])
# check that all elements are in [0, 2]
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0., 0.])
XB = np.vstack([x, -x])
D2 = cosine_distances(XB)
# check that all elements are in [0, 2]
assert_true(np.all(D2 >= 0.))
assert_true(np.all(D2 <= 2.))
# check that diagonal elements are equal to 0 and non diagonal to 2
assert_array_almost_equal(D2, [[0., 2.], [2., 0.]])
# check large random matrix
X = np.abs(rng.rand(1000, 5000))
D = cosine_distances(X)
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0.] * D.shape[0])
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
simon-pepin/scikit-learn | sklearn/linear_model/logistic.py | 105 | 56686 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in ['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
hehongliang/tensorflow | tensorflow/contrib/timeseries/examples/predict_test.py | 80 | 2487 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the prediction example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from tensorflow.contrib.timeseries.examples import predict
from tensorflow.python.platform import test
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/period_trend.csv")
class PeriodTrendExampleTest(test.TestCase):
def test_shapes_and_variance_structural(self):
(times, observed, all_times, mean, upper_limit, lower_limit
) = predict.structural_ensemble_train_and_predict(_DATA_FILE)
# Just check that plotting will probably be OK. We can't actually run the
# plotting code since we don't want to pull in matplotlib as a dependency
# for this test.
self.assertAllEqual([500], times.shape)
self.assertAllEqual([500], observed.shape)
self.assertAllEqual([700], all_times.shape)
self.assertAllEqual([700], mean.shape)
self.assertAllEqual([700], upper_limit.shape)
self.assertAllEqual([700], lower_limit.shape)
# Check that variance hasn't blown up too much. This is a relatively good
# indication that training was successful.
self.assertLess(upper_limit[-1] - lower_limit[-1],
1.5 * (upper_limit[0] - lower_limit[0]))
def test_ar(self):
(times, observed, all_times, mean,
upper_limit, lower_limit) = predict.ar_train_and_predict(_DATA_FILE)
self.assertAllEqual(times.shape, observed.shape)
self.assertAllEqual(all_times.shape, mean.shape)
self.assertAllEqual(all_times.shape, upper_limit.shape)
self.assertAllEqual(all_times.shape, lower_limit.shape)
self.assertLess((upper_limit - lower_limit).mean(), 4.)
if __name__ == "__main__":
test.main()
| apache-2.0 |
herilalaina/scikit-learn | sklearn/feature_extraction/hashing.py | 29 | 6866 | # Author: Lars Buitinck
# License: BSD 3 clause
import numbers
import warnings
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
input_type : string, optional, default "dict"
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
dtype : numpy type, optional, default np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
alternate_sign : boolean, optional, default True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
non_negative : boolean, optional, default False
When True, an absolute value is applied to the features matrix prior to
returning it. When used in conjunction with alternate_sign=True, this
significantly reduces the inner product preservation property.
.. deprecated:: 0.19
This option will be removed in 0.21.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, alternate_sign=True, non_negative=False):
self._validate_params(n_features, input_type)
if non_negative:
warnings.warn("the option non_negative=True has been deprecated"
" in 0.19 and will be removed"
" in version 0.21.", DeprecationWarning)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.alternate_sign = alternate_sign
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Parameters
----------
X : array-like
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype,
self.alternate_sign)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
mhue/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
Peratham/tensor-sc | scripts/create_mat_file.py | 2 | 1701 |
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.io as sio
import sys
'''
Create a .mat file of all the data so that we can plot things in Matlab.
'''
def get_best(num_cut_sweep, suffix):
best = {}
total = len(num_cut_sweep)
for i, num_cut in enumerate(num_cut_sweep):
min_size = min(i + 1, total - (i + 1))
if suffix == 'density' and num_cut > 1.0:
val = 0.0
else:
val = num_cut
if min_size in best:
if suffix == 'density':
best[min_size] = max(val, best[min_size])
else:
best[min_size] = min(val, best[min_size])
else:
best[min_size] = val
return zip(*sorted(best.items()))
all_data = {}
def gather_data(data, suffix):
for cut_type in ['msc', 'dl', 'lap', 'alap', 'cocluster_u', 'cocluster_v', 'random']:
key = '%s-filter_%s_%s' % (data, cut_type, suffix)
with open(key + '.txt') as f:
num_cut_sweep = [float(line) for line in f]
# Format for Matlab
key = key.split('/')[-1]
key = key.replace('-', '_')
all_data[key] = get_best(num_cut_sweep, suffix)
for data_set in ['soc-Slashdot0811', 'wiki-Vote', 'as-caida20071105',
'email-Enron', 'soc-Epinions1', 'amazon0312',
'twitter_combined', 'email-EuAll', 'cit-HepPh',
'web-Stanford', 'wiki-RfA-net', 'wiki-Talk']:
gather_data('d3c_cond_results/' + data_set, 'd3c_cond')
gather_data('num_cut_results/' + data_set, 'num_cut')
gather_data('density_results/' + data_set, 'density')
sio.savemat('cut_data.mat', all_data)
| bsd-2-clause |
mojoboss/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
theoryno3/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
annahs/atmos_research | WHI_long_term_coating_histos_by_cluster-from_raw.py | 1 | 8287 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
from datetime import timedelta
import math
#id INTEGER PRIMARY KEY AUTOINCREMENT,
#sp2b_file TEXT,
#file_index INT,
#instr TEXT,
#instr_locn TEXT,
#particle_type TEXT,
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#coat_thickness_from_actual_scat_amp FLOAT
#UNIQUE (sp2b_file, file_index, instr)
timezone = -8
######get spike times these are in local time
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/')
file = open('WHI_rBC_record_2009to2013-spike_times.rbcpckl', 'r')
spike_times_full = pickle.load(file)
file.close()
spike_times = []
for spike in spike_times_full:
if spike.year >= 2012:
if spike < datetime(2012,06,01):
spike_times.append(spike)
#fire times
fire_time1 = [datetime.strptime('2009/07/27 00:00', '%Y/%m/%d %H:%M'), datetime.strptime('2009/08/08 00:00', '%Y/%m/%d %H:%M')] #row_datetimes following Takahama et al (2011) doi:10.5194/acp-11-6367-2011 #PST in LT
fire_time2 = [datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M'), datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')] #jason's BC clear report #PST in LT
#open cluslist and read into a python list
cluslist = []
CLUSLIST_file = 'C:/hysplit4/working/WHI/2hrly_HYSPLIT_files/all_with_sep_GBPS/CLUSLIST_6-mod-precip_amount_added'
with open(CLUSLIST_file,'r') as f:
for line in f:
newline = line.split()
cluster_no = int(newline[0])
traj_time = datetime(int(newline[2])+2000,int(newline[3]),int(newline[4]),int(newline[5]))+timedelta(hours = timezone) #convert UTC->LT here
significant_rainfall = float(newline[8])
if traj_time.year >=2012:
cluslist.append([traj_time,cluster_no,significant_rainfall])
# sort cluslist by row_datetime in place
cluslist.sort(key=lambda clus_info: clus_info[0])
print len(cluslist)
#connect to database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
instrument_locn = 'WHI'
type_particle = 'incand'
start_date = datetime.strptime('20120101','%Y%m%d')
end_date = datetime.strptime('20120601','%Y%m%d')
rBC_density = 1.8
incand_sat = 3750
LF_max = 45000 #above this is unreasonable
min_rBC_mass = 2.6#100-#0.94#1.63-#120 2.6-#140 3.86-#160nm 0.25-#65
min_BC_VED = (((min_rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7
max_rBC_mass = 5.6#140 3.86-160 5.5-#180nm 7.55-#200 10.05-#220
max_BC_VED = (((max_rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7
begin_data = calendar.timegm(start_date.timetuple())
end_data = calendar.timegm(end_date.timetuple())
cluster_1 = []
cluster_2= []
cluster_3= []
cluster_4 = []
cluster_5= []
cluster_6= []
cluster_GBPS= []
spikes = []
particles=0
no_scat=0
fit_failure=0
early_evap=0
flat_fit=0
LF_high=0
count_155_180 = 0
no_scat_155_180 = 0
early_evap_155_180 = 0
LOG_EVERY_N = 10000
i=0
for row in c.execute('''SELECT rBC_mass_fg, coat_thickness_nm, unix_ts_utc, LF_scat_amp, LF_baseline_pct_diff, sp2b_file, file_index, instr,actual_scat_amp
FROM SP2_coating_analysis
WHERE instr_locn=? and particle_type=? and rBC_mass_fg>=? and rBC_mass_fg<? and unix_ts_utc>=? and unix_ts_utc<?
ORDER BY unix_ts_utc''',
(instrument_locn,type_particle, min_rBC_mass, max_rBC_mass, begin_data,end_data)):
particles+=1
i+=1
rBC_mass = row[0]
coat_thickness = row[1]
event_time = datetime.utcfromtimestamp(row[2])+timedelta(hours = timezone) #db is UTC, convert to LT here
LEO_amp = row[3]
LF_baseline_pctdiff = row[4]
file = row[5]
index = row[6]
instrt = row[7]
meas_scat_amp = row[8]
rBC_VED = (((rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
if rBC_VED >=155 and rBC_VED <=180:
count_155_180 += 1
#skip if an undesired record
if meas_scat_amp < 6 :
no_scat +=1
if rBC_VED >=155 and rBC_VED <=180:
no_scat_155_180 += 1
if meas_scat_amp >= 6 and meas_scat_amp <= 20 and LEO_amp == 0.0 and LF_baseline_pctdiff == None:
early_evap +=1
if rBC_VED >=155 and rBC_VED <=180:
early_evap_155_180 +=1
continue
if LEO_amp == -2:
early_evap +=1
if rBC_VED >=155 and rBC_VED <=180:
early_evap_155_180 +=1
continue
if LEO_amp == -1:
fit_failure +=1
continue
if LEO_amp == 0.0 and LF_baseline_pctdiff != None:
flat_fit +=1
continue
if LEO_amp > LF_max:
LF_high +=1
continue
if meas_scat_amp < 6:
coat_thickness = (91-rBC_VED)/2
#spike times(local time)
event_in_spike = False
spike_half_interval = 2
if len(spike_times):
earliest_spike_time = spike_times[0]
while event_time > earliest_spike_time+timedelta(minutes=spike_half_interval) and len(spike_times):
spike_times.pop(0)
if len(spike_times):
earliest_spike_time = spike_times[0]
print 'spikes left', len(spike_times)
if len(spike_times):
current_spike = spike_times[0]
spike_start = current_spike-timedelta(minutes=spike_half_interval)
spike_end = current_spike+timedelta(minutes=spike_half_interval)
if (spike_start <= event_time < spike_end):
event_in_spike = True
if meas_scat_amp < 6 or LEO_amp > 0:
spikes.append([rBC_VED,coat_thickness,event_time, False])
if event_in_spike == True:
continue
##if in a BB time, put this data in BB dict and continue
#if (fire_time1[0] <= event_time <= fire_time1[1]) or (fire_time2[0] <= event_time <= fire_time2[1]):
# #BB.append([rBC_VED,coat_thickness,event_time])
# continue
#trajectory clusters
earliest_traj_time = cluslist[0][0]
while event_time > (earliest_traj_time+timedelta(hours=1)):
cluslist.pop(0)
earliest_traj_time = cluslist[0][0]
print 'clusters left', len(cluslist)
#data for current trajectory
traj_time_PST = cluslist[0][0]
cluster_no = cluslist[0][1]
rain = cluslist[0][2]
#if rainy == 'True':
# rain = True
#else:
# rain = False
if ((traj_time_PST-timedelta(hours=1)) <= event_time < (traj_time_PST+timedelta(hours=1))):
if meas_scat_amp < 6 or LEO_amp > 0:
if cluster_no == 1:
cluster_1.append([rBC_VED,coat_thickness,event_time,rain])
if cluster_no == 2:
cluster_2.append([rBC_VED,coat_thickness,event_time,rain])
if cluster_no == 3:
cluster_3.append([rBC_VED,coat_thickness,event_time,rain])
if cluster_no == 4:
cluster_4.append([rBC_VED,coat_thickness,event_time,rain])
if cluster_no == 5:
cluster_5.append([rBC_VED,coat_thickness,event_time,rain])
if cluster_no == 6:
cluster_6.append([rBC_VED,coat_thickness,event_time,rain])
if cluster_no == 7:
cluster_GBPS.append([rBC_VED,coat_thickness,event_time,rain])
if (i % LOG_EVERY_N) == 0:
print 'record: ', i
conn.close()
print '# of particles', particles
print 'no_scat', no_scat
print 'fit_failure', fit_failure
print 'early_evap', early_evap
print 'flat_fit', flat_fit
print 'LF_high', LF_high
print '155-180', count_155_180, no_scat_155_180, early_evap_155_180
evap_pct = (early_evap)*100.0/particles
no_scat_pct = (no_scat)*100.0/particles
print evap_pct, no_scat_pct,
lists = [['cluster_1',cluster_1],['cluster_2',cluster_2],['cluster_3', cluster_3],['cluster_4',cluster_4],['cluster_5', cluster_5],['cluster_6',cluster_6],['cluster_GBPS',cluster_GBPS],['fresh',spikes]]
data_to_pickle = {}
for list in lists:
air_mass_name = list[0]
air_mass_info = list[1]
#print air_mass_name, np.median(air_mass_info), len(list[1])
data_to_pickle[air_mass_name] = air_mass_info
#save data
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/')
file = open('coating thicknesses by air mass for '+str(round(min_BC_VED,2)) +'nm to ' + str(round(max_BC_VED,2))+'nm-spikes_fixed-2hr_clusters-precip_amt.binpickl', 'w')
pickle.dump(data_to_pickle, file)
file.close()
| mit |
andaag/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
adammenges/statsmodels | statsmodels/tsa/vector_ar/var_model.py | 25 | 50516 | """
Vector Autoregression (VAR) processes
References
----------
Lutkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
from __future__ import division, print_function
from statsmodels.compat.python import (range, lrange, string_types, StringIO, iteritems,
cStringIO)
from collections import defaultdict
import numpy as np
import numpy.linalg as npl
from numpy.linalg import cholesky as chol, solve
import scipy.stats as stats
import scipy.linalg as L
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.tools import chain_dot
from statsmodels.tools.linalg import logdet_symm
from statsmodels.tsa.tsatools import vec, unvec
from statsmodels.tsa.vector_ar.irf import IRAnalysis
from statsmodels.tsa.vector_ar.output import VARSummary
import statsmodels.tsa.tsatools as tsa
import statsmodels.tsa.vector_ar.output as output
import statsmodels.tsa.vector_ar.plotting as plotting
import statsmodels.tsa.vector_ar.util as util
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
mat = np.array
#-------------------------------------------------------------------------------
# VAR process routines
def ma_rep(coefs, maxn=10):
r"""
MA(\infty) representation of VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
maxn : int
Number of MA matrices to compute
Notes
-----
VAR(p) process as
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
can be equivalently represented as
.. math:: y_t = \mu + \sum_{i=0}^\infty \Phi_i u_{t-i}
e.g. can recursively compute the \Phi_i matrices with \Phi_0 = I_k
Returns
-------
phis : ndarray (maxn + 1 x k x k)
"""
p, k, k = coefs.shape
phis = np.zeros((maxn+1, k, k))
phis[0] = np.eye(k)
# recursively compute Phi matrices
for i in range(1, maxn + 1):
for j in range(1, i+1):
if j > p:
break
phis[i] += np.dot(phis[i-j], coefs[j-1])
return phis
def is_stable(coefs, verbose=False):
"""
Determine stability of VAR(p) system by examining the eigenvalues of the
VAR(1) representation
Parameters
----------
coefs : ndarray (p x k x k)
Returns
-------
is_stable : bool
"""
A_var1 = util.comp_matrix(coefs)
eigs = np.linalg.eigvals(A_var1)
if verbose:
print('Eigenvalues of VAR(1) rep')
for val in np.abs(eigs):
print(val)
return (np.abs(eigs) <= 1).all()
def var_acf(coefs, sig_u, nlags=None):
"""
Compute autocovariance function ACF_y(h) up to nlags of stable VAR(p)
process
Parameters
----------
coefs : ndarray (p x k x k)
Coefficient matrices A_i
sig_u : ndarray (k x k)
Covariance of white noise process u_t
nlags : int, optional
Defaults to order p of system
Notes
-----
Ref: Lutkepohl p.28-29
Returns
-------
acf : ndarray, (p, k, k)
"""
p, k, _ = coefs.shape
if nlags is None:
nlags = p
# p x k x k, ACF for lags 0, ..., p-1
result = np.zeros((nlags + 1, k, k))
result[:p] = _var_acf(coefs, sig_u)
# yule-walker equations
for h in range(p, nlags + 1):
# compute ACF for lag=h
# G(h) = A_1 G(h-1) + ... + A_p G(h-p)
for j in range(p):
result[h] += np.dot(coefs[j], result[h-j-1])
return result
def _var_acf(coefs, sig_u):
"""
Compute autocovariance function ACF_y(h) for h=1,...,p
Notes
-----
Lutkepohl (2005) p.29
"""
p, k, k2 = coefs.shape
assert(k == k2)
A = util.comp_matrix(coefs)
# construct VAR(1) noise covariance
SigU = np.zeros((k*p, k*p))
SigU[:k,:k] = sig_u
# vec(ACF) = (I_(kp)^2 - kron(A, A))^-1 vec(Sigma_U)
vecACF = L.solve(np.eye((k*p)**2) - np.kron(A, A), vec(SigU))
acf = unvec(vecACF)
acf = acf[:k].T.reshape((p, k, k))
return acf
def forecast(y, coefs, intercept, steps):
"""
Produce linear MSE forecast
Parameters
----------
y :
coefs :
intercept :
steps :
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lutkepohl p. 37
Also used by DynamicVAR class
"""
p = len(coefs)
k = len(coefs[0])
# initial value
forcs = np.zeros((steps, k)) + intercept
# h=0 forecast should be latest observation
# forcs[0] = y[-1]
# make indices easier to think about
for h in range(1, steps + 1):
# y_t(h) = intercept + sum_1^p A_i y_t_(h-i)
f = forcs[h - 1]
for i in range(1, p + 1):
# slightly hackish
if h - i <= 0:
# e.g. when h=1, h-1 = 0, which is y[-1]
prior_y = y[h - i - 1]
else:
# e.g. when h=2, h-1=1, which is forcs[0]
prior_y = forcs[h - i - 1]
# i=1 is coefs[0]
f = f + np.dot(coefs[i - 1], prior_y)
forcs[h - 1] = f
return forcs
def forecast_cov(ma_coefs, sig_u, steps):
"""
Compute theoretical forecast error variance matrices
Parameters
----------
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
k = len(sig_u)
forc_covs = np.zeros((steps, k, k))
prior = np.zeros((k, k))
for h in range(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = chain_dot(phi, sig_u, phi.T)
forc_covs[h] = prior = prior + var
return forc_covs
def var_loglike(resid, omega, nobs):
r"""
Returns the value of the VAR(p) log-likelihood.
Parameters
----------
resid : ndarray (T x K)
omega : ndarray
Sigma hat matrix. Each element i,j is the average product of the
OLS residual for variable i and the OLS residual for variable j or
np.dot(resid.T,resid)/nobs. There should be no correction for the
degrees of freedom.
nobs : int
Returns
-------
llf : float
The value of the loglikelihood function for a VAR(p) model
Notes
-----
The loglikelihood function for the VAR(p) is
.. math::
-\left(\frac{T}{2}\right)
\left(\ln\left|\Omega\right|-K\ln\left(2\pi\right)-K\right)
"""
logdet = logdet_symm(np.asarray(omega))
neqs = len(omega)
part1 = - (nobs * neqs / 2) * np.log(2 * np.pi)
part2 = - (nobs / 2) * (logdet + neqs)
return part1 + part2
def _reordered(self, order):
#Create new arrays to hold rearranged results from .fit()
endog = self.endog
endog_lagged = self.endog_lagged
params = self.params
sigma_u = self.sigma_u
names = self.names
k_ar = self.k_ar
endog_new = np.zeros([np.size(endog,0),np.size(endog,1)])
endog_lagged_new = np.zeros([np.size(endog_lagged,0), np.size(endog_lagged,1)])
params_new_inc, params_new = [np.zeros([np.size(params,0), np.size(params,1)])
for i in range(2)]
sigma_u_new_inc, sigma_u_new = [np.zeros([np.size(sigma_u,0), np.size(sigma_u,1)])
for i in range(2)]
num_end = len(self.params[0])
names_new = []
#Rearrange elements and fill in new arrays
k = self.k_trend
for i, c in enumerate(order):
endog_new[:,i] = self.endog[:,c]
if k > 0:
params_new_inc[0,i] = params[0,i]
endog_lagged_new[:,0] = endog_lagged[:,0]
for j in range(k_ar):
params_new_inc[i+j*num_end+k,:] = self.params[c+j*num_end+k,:]
endog_lagged_new[:,i+j*num_end+k] = endog_lagged[:,c+j*num_end+k]
sigma_u_new_inc[i,:] = sigma_u[c,:]
names_new.append(names[c])
for i, c in enumerate(order):
params_new[:,i] = params_new_inc[:,c]
sigma_u_new[:,i] = sigma_u_new_inc[:,c]
return VARResults(endog=endog_new, endog_lagged=endog_lagged_new,
params=params_new, sigma_u=sigma_u_new,
lag_order=self.k_ar, model=self.model,
trend='c', names=names_new, dates=self.dates)
#-------------------------------------------------------------------------------
# VARProcess class: for known or unknown VAR process
class VAR(tsbase.TimeSeriesModel):
r"""
Fit VAR(p) process and do lag order selection
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
Parameters
----------
endog : array-like
2-d endogenous response variable. The independent variable.
dates : array-like
must match number of rows of endog
References
----------
Lutkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
def __init__(self, endog, dates=None, freq=None, missing='none'):
super(VAR, self).__init__(endog, None, dates, freq, missing=missing)
if self.endog.ndim == 1:
raise ValueError("Only gave one variable to VAR")
self.y = self.endog #keep alias for now
self.neqs = self.endog.shape[1]
def _get_predict_start(self, start, k_ar):
if start is None:
start = k_ar
return super(VAR, self)._get_predict_start(start)
def predict(self, params, start=None, end=None, lags=1, trend='c'):
"""
Returns in-sample predictions or forecasts
"""
start = self._get_predict_start(start, lags)
end, out_of_sample = self._get_predict_end(end)
if end < start:
raise ValueError("end is before start")
if end == start + out_of_sample:
return np.array([])
k_trend = util.get_trendorder(trend)
k = self.neqs
k_ar = lags
predictedvalues = np.zeros((end + 1 - start + out_of_sample, k))
if k_trend != 0:
intercept = params[:k_trend]
predictedvalues += intercept
y = self.y
X = util.get_var_endog(y, lags, trend=trend, has_constant='raise')
fittedvalues = np.dot(X, params)
fv_start = start - k_ar
pv_end = min(len(predictedvalues), len(fittedvalues) - fv_start)
fv_end = min(len(fittedvalues), end-k_ar+1)
predictedvalues[:pv_end] = fittedvalues[fv_start:fv_end]
if not out_of_sample:
return predictedvalues
# fit out of sample
y = y[-k_ar:]
coefs = params[k_trend:].reshape((k_ar, k, k)).swapaxes(1,2)
predictedvalues[pv_end:] = forecast(y, coefs, intercept, out_of_sample)
return predictedvalues
def fit(self, maxlags=None, method='ols', ic=None, trend='c',
verbose=False):
"""
Fit the VAR model
Parameters
----------
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend, str {"c", "ct", "ctt", "nc"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"nc" - co constant, no trend
Note that these are prepended to the columns of the dataset.
Notes
-----
Lutkepohl pp. 146-153
Returns
-------
est : VARResults
"""
lags = maxlags
if trend not in ['c', 'ct', 'ctt', 'nc']:
raise ValueError("trend '{}' not supported for VAR".format(trend))
if ic is not None:
selections = self.select_order(maxlags=maxlags, verbose=verbose)
if ic not in selections:
raise Exception("%s not recognized, must be among %s"
% (ic, sorted(selections)))
lags = selections[ic]
if verbose:
print('Using %d based on %s criterion' % (lags, ic))
else:
if lags is None:
lags = 1
k_trend = util.get_trendorder(trend)
self.exog_names = util.make_lag_names(self.endog_names, lags, k_trend)
self.nobs = len(self.endog) - lags
return self._estimate_var(lags, trend=trend)
def _estimate_var(self, lags, offset=0, trend='c'):
"""
lags : int
offset : int
Periods to drop from beginning-- for order selection so it's an
apples-to-apples comparison
trend : string or None
As per above
"""
# have to do this again because select_order doesn't call fit
self.k_trend = k_trend = util.get_trendorder(trend)
if offset < 0: # pragma: no cover
raise ValueError('offset must be >= 0')
y = self.y[offset:]
z = util.get_var_endog(y, lags, trend=trend, has_constant='raise')
y_sample = y[lags:]
# Lutkepohl p75, about 5x faster than stated formula
params = np.linalg.lstsq(z, y_sample)[0]
resid = y_sample - np.dot(z, params)
# Unbiased estimate of covariance matrix $\Sigma_u$ of the white noise
# process $u$
# equivalent definition
# .. math:: \frac{1}{T - Kp - 1} Y^\prime (I_T - Z (Z^\prime Z)^{-1}
# Z^\prime) Y
# Ref: Lutkepohl p.75
# df_resid right now is T - Kp - 1, which is a suggested correction
avobs = len(y_sample)
df_resid = avobs - (self.neqs * lags + k_trend)
sse = np.dot(resid.T, resid)
omega = sse / df_resid
varfit = VARResults(y, z, params, omega, lags, names=self.endog_names,
trend=trend, dates=self.data.dates, model=self)
return VARResultsWrapper(varfit)
def select_order(self, maxlags=None, verbose=True):
"""
Compute lag order selections based on each of the available information
criteria
Parameters
----------
maxlags : int
if None, defaults to 12 * (nobs/100.)**(1./4)
verbose : bool, default True
If True, print table of info criteria and selected orders
Returns
-------
selections : dict {info_crit -> selected_order}
"""
if maxlags is None:
maxlags = int(round(12*(len(self.endog)/100.)**(1/4.)))
ics = defaultdict(list)
for p in range(maxlags + 1):
# exclude some periods to same amount of data used for each lag
# order
result = self._estimate_var(p, offset=maxlags-p)
for k, v in iteritems(result.info_criteria):
ics[k].append(v)
selected_orders = dict((k, mat(v).argmin())
for k, v in iteritems(ics))
if verbose:
output.print_ic_table(ics, selected_orders)
return selected_orders
class VARProcess(object):
"""
Class represents a known VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
intercept : ndarray (length k)
sigma_u : ndarray (k x k)
names : sequence (length k)
Returns
-------
**Attributes**:
"""
def __init__(self, coefs, intercept, sigma_u, names=None):
self.k_ar = len(coefs)
self.neqs = coefs.shape[1]
self.coefs = coefs
self.intercept = intercept
self.sigma_u = sigma_u
self.names = names
def get_eq_index(self, name):
"Return integer position of requested equation name"
return util.get_index(self.names, name)
def __str__(self):
output = ('VAR(%d) process for %d-dimensional response y_t'
% (self.k_ar, self.neqs))
output += '\nstable: %s' % self.is_stable()
output += '\nmean: %s' % self.mean()
return output
def is_stable(self, verbose=False):
"""Determine stability based on model coefficients
Parameters
----------
verbose : bool
Print eigenvalues of the VAR(1) companion
Notes
-----
Checks if det(I - Az) = 0 for any mod(z) <= 1, so all the eigenvalues of
the companion matrix must lie outside the unit circle
"""
return is_stable(self.coefs, verbose=verbose)
def plotsim(self, steps=1000):
"""
Plot a simulation from the VAR(p) process for the desired number of
steps
"""
Y = util.varsim(self.coefs, self.intercept, self.sigma_u, steps=steps)
plotting.plot_mts(Y)
def mean(self):
r"""Mean of stable process
Lutkepohl eq. 2.1.23
.. math:: \mu = (I - A_1 - \dots - A_p)^{-1} \alpha
"""
return solve(self._char_mat, self.intercept)
def ma_rep(self, maxn=10):
r"""Compute MA(:math:`\infty`) coefficient matrices
Parameters
----------
maxn : int
Number of coefficient matrices to compute
Returns
-------
coefs : ndarray (maxn x k x k)
"""
return ma_rep(self.coefs, maxn=maxn)
def orth_ma_rep(self, maxn=10, P=None):
r"""Compute Orthogonalized MA coefficient matrices using P matrix such
that :math:`\Sigma_u = PP^\prime`. P defaults to the Cholesky
decomposition of :math:`\Sigma_u`
Parameters
----------
maxn : int
Number of coefficient matrices to compute
P : ndarray (k x k), optional
Matrix such that Sigma_u = PP', defaults to Cholesky descomp
Returns
-------
coefs : ndarray (maxn x k x k)
"""
if P is None:
P = self._chol_sigma_u
ma_mats = self.ma_rep(maxn=maxn)
return mat([np.dot(coefs, P) for coefs in ma_mats])
def long_run_effects(self):
"""Compute long-run effect of unit impulse
.. math::
\Psi_\infty = \sum_{i=0}^\infty \Phi_i
"""
return L.inv(self._char_mat)
@cache_readonly
def _chol_sigma_u(self):
return chol(self.sigma_u)
@cache_readonly
def _char_mat(self):
return np.eye(self.neqs) - self.coefs.sum(0)
def acf(self, nlags=None):
"""Compute theoretical autocovariance function
Returns
-------
acf : ndarray (p x k x k)
"""
return var_acf(self.coefs, self.sigma_u, nlags=nlags)
def acorr(self, nlags=None):
"""Compute theoretical autocorrelation function
Returns
-------
acorr : ndarray (p x k x k)
"""
return util.acf_to_acorr(self.acf(nlags=nlags))
def plot_acorr(self, nlags=10, linewidth=8):
"Plot theoretical autocorrelation function"
plotting.plot_full_acorr(self.acorr(nlags=nlags), linewidth=linewidth)
def forecast(self, y, steps):
"""Produce linear minimum MSE forecasts for desired number of steps
ahead, using prior values y
Parameters
----------
y : ndarray (p x k)
steps : int
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lutkepohl pp 37-38
"""
return forecast(y, self.coefs, self.intercept, steps)
def mse(self, steps):
"""
Compute theoretical forecast error variance matrices
Parameters
----------
steps : int
Number of steps ahead
Notes
-----
.. math:: \mathrm{MSE}(h) = \sum_{i=0}^{h-1} \Phi \Sigma_u \Phi^T
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
ma_coefs = self.ma_rep(steps)
k = len(self.sigma_u)
forc_covs = np.zeros((steps, k, k))
prior = np.zeros((k, k))
for h in range(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = chain_dot(phi, self.sigma_u, phi.T)
forc_covs[h] = prior = prior + var
return forc_covs
forecast_cov = mse
def _forecast_vars(self, steps):
covs = self.forecast_cov(steps)
# Take diagonal for each cov
inds = np.arange(self.neqs)
return covs[:, inds, inds]
def forecast_interval(self, y, steps, alpha=0.05):
"""Construct forecast interval estimates assuming the y are Gaussian
Parameters
----------
Notes
-----
Lutkepohl pp. 39-40
Returns
-------
(lower, mid, upper) : (ndarray, ndarray, ndarray)
"""
assert(0 < alpha < 1)
q = util.norm_signif_level(alpha)
point_forecast = self.forecast(y, steps)
sigma = np.sqrt(self._forecast_vars(steps))
forc_lower = point_forecast - q * sigma
forc_upper = point_forecast + q * sigma
return point_forecast, forc_lower, forc_upper
#-------------------------------------------------------------------------------
# VARResults class
class VARResults(VARProcess):
"""Estimate VAR(p) process with fixed number of lags
Parameters
----------
endog : array
endog_lagged : array
params : array
sigma_u : array
lag_order : int
model : VAR model instance
trend : str {'nc', 'c', 'ct'}
names : array-like
List of names of the endogenous variables in order of appearance in `endog`.
dates
Returns
-------
**Attributes**
aic
bic
bse
coefs : ndarray (p x K x K)
Estimated A_i matrices, A_i = coefs[i-1]
cov_params
dates
detomega
df_model : int
df_resid : int
endog
endog_lagged
fittedvalues
fpe
intercept
info_criteria
k_ar : int
k_trend : int
llf
model
names
neqs : int
Number of variables (equations)
nobs : int
n_totobs : int
params
k_ar : int
Order of VAR process
params : ndarray (Kp + 1) x K
A_i matrices and intercept in stacked form [int A_1 ... A_p]
pvalues
names : list
variables names
resid
roots : array
The roots of the VAR process are the solution to
(I - coefs[0]*z - coefs[1]*z**2 ... - coefs[p-1]*z**k_ar) = 0.
Note that the inverse roots are returned, and stability requires that
the roots lie outside the unit circle.
sigma_u : ndarray (K x K)
Estimate of white noise process variance Var[u_t]
sigma_u_mle
stderr
trenorder
tvalues
y :
ys_lagged
"""
_model_type = 'VAR'
def __init__(self, endog, endog_lagged, params, sigma_u, lag_order,
model=None, trend='c', names=None, dates=None):
self.model = model
self.y = self.endog = endog #keep alias for now
self.ys_lagged = self.endog_lagged = endog_lagged #keep alias for now
self.dates = dates
self.n_totobs, neqs = self.y.shape
self.nobs = self.n_totobs - lag_order
k_trend = util.get_trendorder(trend)
if k_trend > 0: # make this the polynomial trend order
trendorder = k_trend - 1
else:
trendorder = None
self.k_trend = k_trend
self.trendorder = trendorder
self.exog_names = util.make_lag_names(names, lag_order, k_trend)
self.params = params
# Initialize VARProcess parent class
# construct coefficient matrices
# Each matrix needs to be transposed
reshaped = self.params[self.k_trend:]
reshaped = reshaped.reshape((lag_order, neqs, neqs))
# Need to transpose each coefficient matrix
intercept = self.params[0]
coefs = reshaped.swapaxes(1, 2).copy()
super(VARResults, self).__init__(coefs, intercept, sigma_u, names=names)
def plot(self):
"""Plot input time series
"""
plotting.plot_mts(self.y, names=self.names, index=self.dates)
@property
def df_model(self):
"""Number of estimated parameters, including the intercept / trends
"""
return self.neqs * self.k_ar + self.k_trend
@property
def df_resid(self):
"Number of observations minus number of estimated parameters"
return self.nobs - self.df_model
@cache_readonly
def fittedvalues(self):
"""The predicted insample values of the response variables of the model.
"""
return np.dot(self.ys_lagged, self.params)
@cache_readonly
def resid(self):
"""Residuals of response variable resulting from estimated coefficients
"""
return self.y[self.k_ar:] - self.fittedvalues
def sample_acov(self, nlags=1):
return _compute_acov(self.y[self.k_ar:], nlags=nlags)
def sample_acorr(self, nlags=1):
acovs = self.sample_acov(nlags=nlags)
return _acovs_to_acorrs(acovs)
def plot_sample_acorr(self, nlags=10, linewidth=8):
"Plot theoretical autocorrelation function"
plotting.plot_full_acorr(self.sample_acorr(nlags=nlags),
linewidth=linewidth)
def resid_acov(self, nlags=1):
"""
Compute centered sample autocovariance (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
return _compute_acov(self.resid, nlags=nlags)
def resid_acorr(self, nlags=1):
"""
Compute sample autocorrelation (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
acovs = self.resid_acov(nlags=nlags)
return _acovs_to_acorrs(acovs)
@cache_readonly
def resid_corr(self):
"Centered residual correlation matrix"
return self.resid_acorr(0)[0]
@cache_readonly
def sigma_u_mle(self):
"""(Biased) maximum likelihood estimate of noise process covariance
"""
return self.sigma_u * self.df_resid / self.nobs
@cache_readonly
def cov_params(self):
"""Estimated variance-covariance of model coefficients
Notes
-----
Covariance of vec(B), where B is the matrix
[intercept, A_1, ..., A_p] (K x (Kp + 1))
Adjusted to be an unbiased estimator
Ref: Lutkepohl p.74-75
"""
z = self.ys_lagged
return np.kron(L.inv(np.dot(z.T, z)), self.sigma_u)
def cov_ybar(self):
r"""Asymptotically consistent estimate of covariance of the sample mean
.. math::
\sqrt(T) (\bar{y} - \mu) \rightarrow {\cal N}(0, \Sigma_{\bar{y}})\\
\Sigma_{\bar{y}} = B \Sigma_u B^\prime, \text{where } B = (I_K - A_1
- \cdots - A_p)^{-1}
Notes
-----
Lutkepohl Proposition 3.3
"""
Ainv = L.inv(np.eye(self.neqs) - self.coefs.sum(0))
return chain_dot(Ainv, self.sigma_u, Ainv.T)
#------------------------------------------------------------
# Estimation-related things
@cache_readonly
def _zz(self):
# Z'Z
return np.dot(self.ys_lagged.T, self.ys_lagged)
@property
def _cov_alpha(self):
"""
Estimated covariance matrix of model coefficients ex intercept
"""
# drop intercept and trend
return self.cov_params[self.k_trend*self.neqs:, self.k_trend*self.neqs:]
@cache_readonly
def _cov_sigma(self):
"""
Estimated covariance matrix of vech(sigma_u)
"""
D_K = tsa.duplication_matrix(self.neqs)
D_Kinv = npl.pinv(D_K)
sigxsig = np.kron(self.sigma_u, self.sigma_u)
return 2 * chain_dot(D_Kinv, sigxsig, D_Kinv.T)
@cache_readonly
def llf(self):
"Compute VAR(p) loglikelihood"
return var_loglike(self.resid, self.sigma_u_mle, self.nobs)
@cache_readonly
def stderr(self):
"""Standard errors of coefficients, reshaped to match in size
"""
stderr = np.sqrt(np.diag(self.cov_params))
return stderr.reshape((self.df_model, self.neqs), order='C')
bse = stderr # statsmodels interface?
@cache_readonly
def tvalues(self):
"""Compute t-statistics. Use Student-t(T - Kp - 1) = t(df_resid) to test
significance.
"""
return self.params / self.stderr
@cache_readonly
def pvalues(self):
"""Two-sided p-values for model coefficients from Student t-distribution
"""
return stats.t.sf(np.abs(self.tvalues), self.df_resid)*2
def plot_forecast(self, steps, alpha=0.05, plot_stderr=True):
"""
Plot forecast
"""
mid, lower, upper = self.forecast_interval(self.y[-self.k_ar:], steps,
alpha=alpha)
plotting.plot_var_forc(self.y, mid, lower, upper, names=self.names,
plot_stderr=plot_stderr)
# Forecast error covariance functions
def forecast_cov(self, steps=1):
r"""Compute forecast covariance matrices for desired number of steps
Parameters
----------
steps : int
Notes
-----
.. math:: \Sigma_{\hat y}(h) = \Sigma_y(h) + \Omega(h) / T
Ref: Lutkepohl pp. 96-97
Returns
-------
covs : ndarray (steps x k x k)
"""
mse = self.mse(steps)
omegas = self._omega_forc_cov(steps)
return mse + omegas / self.nobs
#Monte Carlo irf standard errors
def irf_errband_mc(self, orth=False, repl=1000, T=10,
signif=0.05, seed=None, burn=100, cum=False):
"""
Compute Monte Carlo integrated error bands assuming normally
distributed for impulse response functions
Parameters
----------
orth: bool, default False
Compute orthoganalized impulse response error bands
repl: int
number of Monte Carlo replications to perform
T: int, default 10
number of impulse response periods
signif: float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed: int
np.random.seed for replications
burn: int
number of initial observations to discard for simulation
cum: bool, default False
produce cumulative irf error bands
Notes
-----
Lutkepohl (2005) Appendix D
Returns
-------
Tuple of lower and upper arrays of ma_rep monte carlo standard errors
"""
neqs = self.neqs
mean = self.mean()
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
df_model = self.df_model
nobs = self.nobs
ma_coll = np.zeros((repl, T+1, neqs, neqs))
if (orth == True and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T).cumsum(axis=0)
elif (orth == True and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T)
elif (orth == False and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T).cumsum(axis=0)
elif (orth == False and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T)
for i in range(repl):
#discard first hundred to eliminate correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u, steps=nobs+burn)
sim = sim[burn:]
ma_coll[i,:,:,:] = fill_coll(sim)
ma_sort = np.sort(ma_coll, axis=0) #sort to get quantiles
index = round(signif/2*repl)-1,round((1-signif/2)*repl)-1
lower = ma_sort[index[0],:, :, :]
upper = ma_sort[index[1],:, :, :]
return lower, upper
def irf_resim(self, orth=False, repl=1000, T=10,
seed=None, burn=100, cum=False):
"""
Simulates impulse response function, returning an array of simulations.
Used for Sims-Zha error band calculation.
Parameters
----------
orth: bool, default False
Compute orthoganalized impulse response error bands
repl: int
number of Monte Carlo replications to perform
T: int, default 10
number of impulse response periods
signif: float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed: int
np.random.seed for replications
burn: int
number of initial observations to discard for simulation
cum: bool, default False
produce cumulative irf error bands
Notes
-----
Sims, Christoper A., and Tao Zha. 1999. "Error Bands for Impulse Response." Econometrica 67: 1113-1155.
Returns
-------
Array of simulated impulse response functions
"""
neqs = self.neqs
mean = self.mean()
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
df_model = self.df_model
nobs = self.nobs
if seed is not None:
np.random.seed(seed=seed)
ma_coll = np.zeros((repl, T+1, neqs, neqs))
if (orth == True and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T).cumsum(axis=0)
elif (orth == True and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T)
elif (orth == False and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T).cumsum(axis=0)
elif (orth == False and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T)
for i in range(repl):
#discard first hundred to eliminate correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u, steps=nobs+burn)
sim = sim[burn:]
ma_coll[i,:,:,:] = fill_coll(sim)
return ma_coll
def _omega_forc_cov(self, steps):
# Approximate MSE matrix \Omega(h) as defined in Lut p97
G = self._zz
Ginv = L.inv(G)
# memoize powers of B for speedup
# TODO: see if can memoize better
B = self._bmat_forc_cov()
_B = {}
def bpow(i):
if i not in _B:
_B[i] = np.linalg.matrix_power(B, i)
return _B[i]
phis = self.ma_rep(steps)
sig_u = self.sigma_u
omegas = np.zeros((steps, self.neqs, self.neqs))
for h in range(1, steps + 1):
if h == 1:
omegas[h-1] = self.df_model * self.sigma_u
continue
om = omegas[h-1]
for i in range(h):
for j in range(h):
Bi = bpow(h - 1 - i)
Bj = bpow(h - 1 - j)
mult = np.trace(chain_dot(Bi.T, Ginv, Bj, G))
om += mult * chain_dot(phis[i], sig_u, phis[j].T)
omegas[h-1] = om
return omegas
def _bmat_forc_cov(self):
# B as defined on p. 96 of Lut
upper = np.zeros((1, self.df_model))
upper[0,0] = 1
lower_dim = self.neqs * (self.k_ar - 1)
I = np.eye(lower_dim)
lower = np.column_stack((np.zeros((lower_dim, 1)), I,
np.zeros((lower_dim, self.neqs))))
return np.vstack((upper, self.params.T, lower))
def summary(self):
"""Compute console output summary of estimates
Returns
-------
summary : VARSummary
"""
return VARSummary(self)
def irf(self, periods=10, var_decomp=None, var_order=None):
"""Analyze impulse responses to shocks in system
Parameters
----------
periods : int
var_decomp : ndarray (k x k), lower triangular
Must satisfy Omega = P P', where P is the passed matrix. Defaults to
Cholesky decomposition of Omega
var_order : sequence
Alternate variable order for Cholesky decomposition
Returns
-------
irf : IRAnalysis
"""
if var_order is not None:
raise NotImplementedError('alternate variable order not implemented'
' (yet)')
return IRAnalysis(self, P=var_decomp, periods=periods)
def fevd(self, periods=10, var_decomp=None):
"""
Compute forecast error variance decomposition ("fevd")
Returns
-------
fevd : FEVD instance
"""
return FEVD(self, P=var_decomp, periods=periods)
def reorder(self, order):
"""Reorder variables for structural specification
"""
if len(order) != len(self.params[0,:]):
raise ValueError("Reorder specification length should match number of endogenous variables")
#This convert order to list of integers if given as strings
if isinstance(order[0], string_types):
order_new = []
for i, nam in enumerate(order):
order_new.append(self.names.index(order[i]))
order = order_new
return _reordered(self, order)
#-------------------------------------------------------------------------------
# VAR Diagnostics: Granger-causality, whiteness of residuals, normality, etc.
def test_causality(self, equation, variables, kind='f', signif=0.05,
verbose=True):
"""Compute test statistic for null hypothesis of Granger-noncausality,
general function to test joint Granger-causality of multiple variables
Parameters
----------
equation : string or int
Equation to test for causality
variables : sequence (of strings or ints)
List, tuple, etc. of variables to test for Granger-causality
kind : {'f', 'wald'}
Perform F-test or Wald (chi-sq) test
signif : float, default 5%
Significance level for computing critical values for test,
defaulting to standard 0.95 level
Notes
-----
Null hypothesis is that there is no Granger-causality for the indicated
variables. The degrees of freedom in the F-test are based on the
number of variables in the VAR system, that is, degrees of freedom
are equal to the number of equations in the VAR times degree of freedom
of a single equation.
Returns
-------
results : dict
"""
if isinstance(variables, (string_types, int, np.integer)):
variables = [variables]
k, p = self.neqs, self.k_ar
# number of restrictions
N = len(variables) * self.k_ar
# Make restriction matrix
C = np.zeros((N, k ** 2 * p + k), dtype=float)
eq_index = self.get_eq_index(equation)
vinds = mat([self.get_eq_index(v) for v in variables])
# remember, vec is column order!
offsets = np.concatenate([k + k ** 2 * j + k * vinds + eq_index
for j in range(p)])
C[np.arange(N), offsets] = 1
# Lutkepohl 3.6.5
Cb = np.dot(C, vec(self.params.T))
middle = L.inv(chain_dot(C, self.cov_params, C.T))
# wald statistic
lam_wald = statistic = chain_dot(Cb, middle, Cb)
if kind.lower() == 'wald':
df = N
dist = stats.chi2(df)
elif kind.lower() == 'f':
statistic = lam_wald / N
df = (N, k * self.df_resid)
dist = stats.f(*df)
else:
raise Exception('kind %s not recognized' % kind)
pvalue = dist.sf(statistic)
crit_value = dist.ppf(1 - signif)
conclusion = 'fail to reject' if statistic < crit_value else 'reject'
results = {
'statistic' : statistic,
'crit_value' : crit_value,
'pvalue' : pvalue,
'df' : df,
'conclusion' : conclusion,
'signif' : signif
}
if verbose:
summ = output.causality_summary(results, variables, equation, kind)
print(summ)
return results
def test_whiteness(self, nlags=10, plot=True, linewidth=8):
"""
Test white noise assumption. Sample (Y) autocorrelations are compared
with the standard :math:`2 / \sqrt(T)` bounds.
Parameters
----------
plot : boolean, default True
Plot autocorrelations with 2 / sqrt(T) bounds
"""
acorrs = self.sample_acorr(nlags)
bound = 2 / np.sqrt(self.nobs)
# TODO: this probably needs some UI work
if (np.abs(acorrs) > bound).any():
print('FAIL: Some autocorrelations exceed %.4f bound. '
'See plot' % bound)
else:
print('PASS: No autocorrelations exceed %.4f bound' % bound)
if plot:
fig = plotting.plot_full_acorr(acorrs[1:],
xlabel=np.arange(1, nlags+1),
err_bound=bound,
linewidth=linewidth)
fig.suptitle(r"ACF plots with $2 / \sqrt{T}$ bounds "
"for testing whiteness assumption")
def test_normality(self, signif=0.05, verbose=True):
"""
Test assumption of normal-distributed errors using Jarque-Bera-style
omnibus Chi^2 test
Parameters
----------
signif : float
Test significance threshold
Notes
-----
H0 (null) : data are generated by a Gaussian-distributed process
"""
Pinv = npl.inv(self._chol_sigma_u)
w = np.array([np.dot(Pinv, u) for u in self.resid])
b1 = (w ** 3).sum(0) / self.nobs
lam_skew = self.nobs * np.dot(b1, b1) / 6
b2 = (w ** 4).sum(0) / self.nobs - 3
lam_kurt = self.nobs * np.dot(b2, b2) / 24
lam_omni = lam_skew + lam_kurt
omni_dist = stats.chi2(self.neqs * 2)
omni_pvalue = omni_dist.sf(lam_omni)
crit_omni = omni_dist.ppf(1 - signif)
conclusion = 'fail to reject' if lam_omni < crit_omni else 'reject'
results = {
'statistic' : lam_omni,
'crit_value' : crit_omni,
'pvalue' : omni_pvalue,
'df' : self.neqs * 2,
'conclusion' : conclusion,
'signif' : signif
}
if verbose:
summ = output.normality_summary(results)
print(summ)
return results
@cache_readonly
def detomega(self):
r"""
Return determinant of white noise covariance with degrees of freedom
correction:
.. math::
\hat \Omega = \frac{T}{T - Kp - 1} \hat \Omega_{\mathrm{MLE}}
"""
return L.det(self.sigma_u)
@cache_readonly
def info_criteria(self):
"information criteria for lagorder selection"
nobs = self.nobs
neqs = self.neqs
lag_order = self.k_ar
free_params = lag_order * neqs ** 2 + neqs * self.k_trend
ld = logdet_symm(self.sigma_u_mle)
# See Lutkepohl pp. 146-150
aic = ld + (2. / nobs) * free_params
bic = ld + (np.log(nobs) / nobs) * free_params
hqic = ld + (2. * np.log(np.log(nobs)) / nobs) * free_params
fpe = ((nobs + self.df_model) / self.df_resid) ** neqs * np.exp(ld)
return {
'aic' : aic,
'bic' : bic,
'hqic' : hqic,
'fpe' : fpe
}
@property
def aic(self):
"Akaike information criterion"
return self.info_criteria['aic']
@property
def fpe(self):
"""Final Prediction Error (FPE)
Lutkepohl p. 147, see info_criteria
"""
return self.info_criteria['fpe']
@property
def hqic(self):
"Hannan-Quinn criterion"
return self.info_criteria['hqic']
@property
def bic(self):
"Bayesian a.k.a. Schwarz info criterion"
return self.info_criteria['bic']
@cache_readonly
def roots(self):
neqs = self.neqs
k_ar = self.k_ar
p = neqs * k_ar
arr = np.zeros((p,p))
arr[:neqs,:] = np.column_stack(self.coefs)
arr[neqs:,:-neqs] = np.eye(p-neqs)
roots = np.linalg.eig(arr)[0]**-1
idx = np.argsort(np.abs(roots))[::-1] # sort by reverse modulus
return roots[idx]
class VARResultsWrapper(wrap.ResultsWrapper):
_attrs = {'bse' : 'columns_eq', 'cov_params' : 'cov',
'params' : 'columns_eq', 'pvalues' : 'columns_eq',
'tvalues' : 'columns_eq', 'sigma_u' : 'cov_eq',
'sigma_u_mle' : 'cov_eq',
'stderr' : 'columns_eq'}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
_wrap_methods.pop('cov_params') # not yet a method in VARResults
wrap.populate_wrapper(VARResultsWrapper, VARResults)
class FEVD(object):
"""
Compute and plot Forecast error variance decomposition and asymptotic
standard errors
"""
def __init__(self, model, P=None, periods=None):
self.periods = periods
self.model = model
self.neqs = model.neqs
self.names = model.model.endog_names
self.irfobj = model.irf(var_decomp=P, periods=periods)
self.orth_irfs = self.irfobj.orth_irfs
# cumulative impulse responses
irfs = (self.orth_irfs[:periods] ** 2).cumsum(axis=0)
rng = lrange(self.neqs)
mse = self.model.mse(periods)[:, rng, rng]
# lag x equation x component
fevd = np.empty_like(irfs)
for i in range(periods):
fevd[i] = (irfs[i].T / mse[i]).T
# switch to equation x lag x component
self.decomp = fevd.swapaxes(0, 1)
def summary(self):
buf = StringIO()
rng = lrange(self.periods)
for i in range(self.neqs):
ppm = output.pprint_matrix(self.decomp[i], rng, self.names)
buf.write('FEVD for %s\n' % self.names[i])
buf.write(ppm + '\n')
print(buf.getvalue())
def cov(self):
"""Compute asymptotic standard errors
Returns
-------
"""
raise NotImplementedError
def plot(self, periods=None, figsize=(10,10), **plot_kwds):
"""Plot graphical display of FEVD
Parameters
----------
periods : int, default None
Defaults to number originally specified. Can be at most that number
"""
import matplotlib.pyplot as plt
k = self.neqs
periods = periods or self.periods
fig, axes = plt.subplots(nrows=k, figsize=figsize)
fig.suptitle('Forecast error variance decomposition (FEVD)')
colors = [str(c) for c in np.arange(k, dtype=float) / k]
ticks = np.arange(periods)
limits = self.decomp.cumsum(2)
for i in range(k):
ax = axes[i]
this_limits = limits[i].T
handles = []
for j in range(k):
lower = this_limits[j - 1] if j > 0 else 0
upper = this_limits[j]
handle = ax.bar(ticks, upper - lower, bottom=lower,
color=colors[j], label=self.names[j],
**plot_kwds)
handles.append(handle)
ax.set_title(self.names[i])
# just use the last axis to get handles for plotting
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right')
plotting.adjust_subplots(right=0.85)
#-------------------------------------------------------------------------------
def _compute_acov(x, nlags=1):
x = x - x.mean(0)
result = []
for lag in range(nlags + 1):
if lag > 0:
r = np.dot(x[lag:].T, x[:-lag])
else:
r = np.dot(x.T, x)
result.append(r)
return np.array(result) / len(x)
def _acovs_to_acorrs(acovs):
sd = np.sqrt(np.diag(acovs[0]))
return acovs / np.outer(sd, sd)
if __name__ == '__main__':
import statsmodels.api as sm
from statsmodels.tsa.vector_ar.util import parse_lutkepohl_data
import statsmodels.tools.data as data_util
np.set_printoptions(linewidth=140, precision=5)
sdata, dates = parse_lutkepohl_data('data/%s.dat' % 'e1')
names = sdata.dtype.names
data = data_util.struct_to_ndarray(sdata)
adj_data = np.diff(np.log(data), axis=0)
# est = VAR(adj_data, p=2, dates=dates[1:], names=names)
model = VAR(adj_data[:-16], dates=dates[1:-16], names=names)
# model = VAR(adj_data[:-16], dates=dates[1:-16], names=names)
est = model.fit(maxlags=2)
irf = est.irf()
y = est.y[-2:]
"""
# irf.plot_irf()
# i = 2; j = 1
# cv = irf.cum_effect_cov(orth=True)
# print np.sqrt(cv[:, j * 3 + i, j * 3 + i]) / 1e-2
# data = np.genfromtxt('Canada.csv', delimiter=',', names=True)
# data = data.view((float, 4))
"""
'''
mdata = sm.datasets.macrodata.load().data
mdata2 = mdata[['realgdp','realcons','realinv']]
names = mdata2.dtype.names
data = mdata2.view((float,3))
data = np.diff(np.log(data), axis=0)
import pandas as pn
df = pn.DataFrame.fromRecords(mdata)
df = np.log(df.reindex(columns=names))
df = (df - df.shift(1)).dropna()
model = VAR(df)
est = model.fit(maxlags=2)
irf = est.irf()
'''
| bsd-3-clause |
saiwing-yeung/scikit-learn | examples/plot_kernel_ridge_regression.py | 39 | 6259 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors',
zorder=2)
plt.scatter(X[:100], y[:100], c='k', label='data', zorder=1)
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
466152112/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 105 | 26588 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/users/plotting/examples/custom_boxstyle02.py | 6 | 1983 | from matplotlib.path import Path
from matplotlib.patches import BoxStyle
import matplotlib.pyplot as plt
# we may derive from matplotlib.patches.BoxStyle._Base class.
# You need to overide transmute method in this case.
class MyStyle(BoxStyle._Base):
"""
A simple box.
"""
def __init__(self, pad=0.3):
"""
The arguments need to be floating numbers and need to have
default values.
*pad*
amount of padding
"""
self.pad = pad
super(MyStyle, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
Often, the *mutation_size* is the font size of the text.
You don't need to worry about the rotation as it is
automatically taken care of.
"""
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2.*pad, \
height + 2.*pad,
# boundary of the padded box
x0, y0 = x0-pad, y0-pad,
x1, y1 = x0+width, y0 + height
cp = [(x0, y0),
(x1, y0), (x1, y1), (x0, y1),
(x0-pad, (y0+y1)/2.), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
# register the custom style
BoxStyle._style_list["angled"] = MyStyle
plt.figure(1, figsize=(3,3))
ax = plt.subplot(111)
ax.text(0.5, 0.5, "Test", size=30, va="center", ha="center", rotation=30,
bbox=dict(boxstyle="angled,pad=0.5", alpha=0.2))
del BoxStyle._style_list["angled"]
plt.show()
| gpl-2.0 |
omartinsky/PYBOR | yc_convention.py | 1 | 2249 | # Copyright © 2017 Ondrej Martinsky, All rights reserved
# http://github.com/omartinsky/pybor
from dataclasses import dataclass
from os.path import join, dirname
from yc_date import *
from pandas import *
import enum, os
import pandas as pd
from yc_helpers import enum_from_string, assert_type
class DCC(enum.Enum):
ACT365 = 0
ACT360 = 1
def get_denominator(self):
if self == DCC.ACT360:
return 360.
elif self == DCC.ACT365:
return 365.
assert False
class CalculationType(enum.Enum):
PLAIN = 0
AVERAGING = 1
NONE = 2
class CouponFreq(enum.Enum):
CONTINUOUS = 0
DAILY = 1
QUARTERLY = 2
ZEROFREQ = 3
CONTINUOUS = CouponFreq.CONTINUOUS
DAILY = CouponFreq.DAILY
QUARTERLY = CouponFreq.QUARTERLY
ZEROFREQ = CouponFreq.ZEROFREQ
@dataclass
class Convention:
reset_frequency: Tenor
calculation_frequency: Tenor
payment_frequency: Tenor
dcc: DCC
class Conventions:
def __init__(self):
self.map = dict()
def get(self, convention_name):
if convention_name not in self.map:
raise BaseException("Unable to get convention %s" % convention_name)
return self.map[convention_name]
def conventions_from_file(file: str) -> Conventions:
"""
Notes:
Reset Frequency < Calculation Period Frequency indicates averaging / OIS leg
Calculation Period Frequency < Payment Frequency indicates compounting leg
"""
conventions = Conventions()
conventions.map = dict()
assert os.path.exists(file)
dataframe = pd.read_csv(file, delimiter='\t')
for index, row in dataframe.iterrows():
conv = Convention(
reset_frequency=Tenor(row['Reset Frequency']),
calculation_frequency=Tenor(row['Calculation Period Frequency']),
payment_frequency=Tenor(row['Payment Frequency']),
dcc=enum_from_string(DCC, row['Day Count Convention']),
)
assert index not in conventions.map
conventions.map[row['Index']] = conv
return conventions
global_conventions = conventions_from_file(join(dirname(__file__), 'conventions.txt'))
| mit |
q1ang/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
vermouthmjl/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
HeraclesHX/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
bsipocz/scikit-image | doc/examples/plot_radon_transform.py | 17 | 8432 | """
===============
Radon transform
===============
In computed tomography, the tomography reconstruction problem is to obtain
a tomographic slice image from a set of projections [1]_. A projection is
formed by drawing a set of parallel rays through the 2D object of interest,
assigning the integral of the object's contrast along each ray to a single
pixel in the projection. A single projection of a 2D object is one dimensional.
To enable computed tomography reconstruction of the object, several projections
must be acquired, each of them corresponding to a different angle between the
rays with respect to the object. A collection of projections at several angles
is called a sinogram, which is a linear transform of the original image.
The inverse Radon transform is used in computed tomography to reconstruct
a 2D image from the measured projections (the sinogram). A practical, exact
implementation of the inverse Radon transform does not exist, but there are
several good approximate algorithms available.
As the inverse Radon transform reconstructs the object from a set of
projections, the (forward) Radon transform can be used to simulate a
tomography experiment.
This script performs the Radon transform to simulate a tomography experiment
and reconstructs the input image based on the resulting sinogram formed by
the simulation. Two methods for performing the inverse Radon transform
and reconstructing the original image are compared: The Filtered Back
Projection (FBP) and the Simultaneous Algebraic Reconstruction
Technique (SART).
For further information on tomographic reconstruction, see
- AC Kak, M Slaney, "Principles of Computerized Tomographic Imaging",
http://www.slaney.org/pct/pct-toc.html
- http://en.wikipedia.org/wiki/Radon_transform
The forward transform
=====================
As our original image, we will use the Shepp-Logan phantom. When calculating
the Radon transform, we need to decide how many projection angles we wish
to use. As a rule of thumb, the number of projections should be about the
same as the number of pixels there are across the object (to see why this
is so, consider how many unknown pixel values must be determined in the
reconstruction process and compare this to the number of measurements
provided by the projections), and we follow that rule here. Below is the
original image and its Radon transform, often known as its _sinogram_:
"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage import data_dir
from skimage.transform import radon, rescale
image = imread(data_dir + "/phantom.png", as_grey=True)
image = rescale(image, scale=0.4)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
ax1.set_title("Original")
ax1.imshow(image, cmap=plt.cm.Greys_r)
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=True)
ax2.set_title("Radon transform\n(Sinogram)")
ax2.set_xlabel("Projection angle (deg)")
ax2.set_ylabel("Projection position (pixels)")
ax2.imshow(sinogram, cmap=plt.cm.Greys_r,
extent=(0, 180, 0, sinogram.shape[0]), aspect='auto')
fig.subplots_adjust(hspace=0.4, wspace=0.5)
plt.show()
"""
.. image:: PLOT2RST.current_figure
Reconstruction with the Filtered Back Projection (FBP)
======================================================
The mathematical foundation of the filtered back projection is the Fourier
slice theorem [2]_. It uses Fourier transform of the projection and
interpolation in Fourier space to obtain the 2D Fourier transform of the image,
which is then inverted to form the reconstructed image. The filtered back
projection is among the fastest methods of performing the inverse Radon
transform. The only tunable parameter for the FBP is the filter, which is
applied to the Fourier transformed projections. It may be used to suppress
high frequency noise in the reconstruction. ``skimage`` provides a few
different options for the filter.
"""
from skimage.transform import iradon
reconstruction_fbp = iradon(sinogram, theta=theta, circle=True)
error = reconstruction_fbp - image
print('FBP rms reconstruction error: %.3g' % np.sqrt(np.mean(error**2)))
imkwargs = dict(vmin=-0.2, vmax=0.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
ax1.set_title("Reconstruction\nFiltered back projection")
ax1.imshow(reconstruction_fbp, cmap=plt.cm.Greys_r)
ax2.set_title("Reconstruction error\nFiltered back projection")
ax2.imshow(reconstruction_fbp - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
"""
.. image:: PLOT2RST.current_figure
Reconstruction with the Simultaneous Algebraic Reconstruction Technique
=======================================================================
Algebraic reconstruction techniques for tomography are based on a
straightforward idea: for a pixelated image the value of a single ray in a
particular projection is simply a sum of all the pixels the ray passes through
on its way through the object. This is a way of expressing the forward Radon
transform. The inverse Radon transform can then be formulated as a (large) set
of linear equations. As each ray passes through a small fraction of the pixels
in the image, this set of equations is sparse, allowing iterative solvers for
sparse linear systems to tackle the system of equations. One iterative method
has been particularly popular, namely Kaczmarz' method [3]_, which has the
property that the solution will approach a least-squares solution of the
equation set.
The combination of the formulation of the reconstruction problem as a set
of linear equations and an iterative solver makes algebraic techniques
relatively flexible, hence some forms of prior knowledge can be incorporated
with relative ease.
``skimage`` provides one of the more popular variations of the algebraic
reconstruction techniques: the Simultaneous Algebraic Reconstruction Technique
(SART) [1]_ [4]_. It uses Kaczmarz' method [3]_ as the iterative solver. A good
reconstruction is normally obtained in a single iteration, making the method
computationally effective. Running one or more extra iterations will normally
improve the reconstruction of sharp, high frequency features and reduce the
mean squared error at the expense of increased high frequency noise (the user
will need to decide on what number of iterations is best suited to the problem
at hand. The implementation in ``skimage`` allows prior information of the
form of a lower and upper threshold on the reconstructed values to be supplied
to the reconstruction.
"""
from skimage.transform import iradon_sart
reconstruction_sart = iradon_sart(sinogram, theta=theta)
error = reconstruction_sart - image
print('SART (1 iteration) rms reconstruction error: %.3g'
% np.sqrt(np.mean(error**2)))
fig, ax = plt.subplots(2, 2, figsize=(8, 8.5))
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.set_title("Reconstruction\nSART")
ax1.imshow(reconstruction_sart, cmap=plt.cm.Greys_r)
ax2.set_title("Reconstruction error\nSART")
ax2.imshow(reconstruction_sart - image, cmap=plt.cm.Greys_r, **imkwargs)
# Run a second iteration of SART by supplying the reconstruction
# from the first iteration as an initial estimate
reconstruction_sart2 = iradon_sart(sinogram, theta=theta,
image=reconstruction_sart)
error = reconstruction_sart2 - image
print('SART (2 iterations) rms reconstruction error: %.3g'
% np.sqrt(np.mean(error**2)))
ax3.set_title("Reconstruction\nSART, 2 iterations")
ax3.imshow(reconstruction_sart2, cmap=plt.cm.Greys_r)
ax4.set_title("Reconstruction error\nSART, 2 iterations")
ax4.imshow(reconstruction_sart2 - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
"""
.. image:: PLOT2RST.current_figure
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic Imaging",
IEEE Press 1988. http://www.slaney.org/pct/pct-toc.html
.. [2] Wikipedia, Radon transform,
http://en.wikipedia.org/wiki/Radon_transform#Relationship_with_the_Fourier_transform
.. [3] S Kaczmarz, "Angenaeherte Aufloesung von Systemen linearer
Gleichungen", Bulletin International de l'Academie Polonaise des
Sciences et des Lettres 35 pp 355--357 (1937)
.. [4] AH Andersen, AC Kak, "Simultaneous algebraic reconstruction technique
(SART): a superior implementation of the ART algorithm", Ultrasonic
Imaging 6 pp 81--94 (1984)
"""
| bsd-3-clause |
dwnguyen/Titanic | titanictflearn.py | 1 | 8536 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 16 11:16:15 2017
@author: davidnguyen
Capable of achieving a .79904 accuracy on the test set of the Titanic Kaggle competition
"""
import tensorflow as tf
import math
import tflearn as tfl
import numpy as np
import pandas as pd
'''
#Get SigOP experiment for optimization purposes
from sigopt import Connection
conn = Connection(client_token= INSERT CLIENT TOKEN HERE )
experiment = conn.experiments(22981).fetch()
'''
tf.reset_default_graph()
# Load Data
df_data = pd.read_csv('train.csv', sep=',', usecols = [0, 2, 3, 4, 5, 6, 7, 8 ,9 ,10,11])
df_test = pd.read_csv('test.csv', sep=',', header = 0)
#Loads labels (Survived data)
_, labelsT = tfl.data_utils.load_csv('train.csv', target_column=1, columns_to_ignore = range(1), has_header = True,
categorical_labels=True, n_classes=2 )
labels = labelsT[0:591][:]
labelsCV = labelsT[591:][:]
#Fills any remaining unknown ages and replaces ages with bins of age ranges
def simplify_ages(df):
df.Age = df.Age.fillna(-0.5)
bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)
group_names = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']
categories = pd.cut(df.Age, bins, labels=group_names)
df.Age = categories
return df
#Fills in unknown cabin values
def simplify_cabins(df):
df.Cabin = df.Cabin.fillna('N')
df.Cabin = df.Cabin.apply(lambda x: x[0])
return df
#Replaces fare floats with quartile ranges
def simplify_fares(df):
df.Fare = df.Fare.fillna(-0.5)
bins = (-1, 0, 8, 15, 31, 1000)
group_names = ['Unknown', '1_quartile', '2_quartile', '3_quartile', '4_quartile']
categories = pd.cut(df.Fare, bins, labels=group_names)
df.Fare = categories
return df
#Creates new column for number of family members
def simplify_fam(df):
df['numFam'] = df['SibSp'] +df['Parch']
return df
#Uses name prefixes to guess unknown ages
def format_name(df):
df['NamePrefix'] = df.Name.apply(lambda x: x.split(' ')[1])
for i in range(df['Pclass'].size):
if math.isnan(df.iloc[i]['Age']):
if df.iloc[i]['NamePrefix'] == 'Miss.':
df = df.set_value(i, 'Age', 21.784417475728155)
elif df.iloc[i]['NamePrefix'] == 'Mr.':
df = df.set_value(i, 'Age', 32.282918149466191)
elif df.iloc[i]['NamePrefix'] == 'Mrs.':
df = df.set_value(i, 'Age', 37.231707317073173)
elif df.iloc[i]['NamePrefix'] == 'Master.':
df = df.set_value(i, 'Age', 5.3669230769230776)
elif df.iloc[i]['NamePrefix'] == 'Rev.':
df = df.set_value(i, 'Age', 41.25)
elif df.iloc[i]['NamePrefix'] == 'Dr.':
df = df.set_value(i, 'Age', 43.571)
elif df.iloc[i]['NamePrefix'] == 'Major.':
df = df.set_value(i, 'Age', 48.5)
elif df.iloc[i]['NamePrefix'] == 'Col.':
df = df.set_value(i, 'Age', 54.0)
elif df.iloc[i]['NamePrefix'] == 'Mlle.':
df = df.set_value(i, 'Age', 24.0)
return df
#Removes unneccessary features
def drop_features(df):
return df.drop(['Ticket', 'Name', 'Embarked', 'PassengerId', 'NamePrefix', 'SibSp', 'Parch'], axis=1)
#Feature engineers in preparation for encoding
def transform_features(df):
df = format_name(df)
simplify_fam(df)
df = simplify_ages(df)
df = simplify_cabins(df)
df = simplify_fares(df)
df = drop_features(df)
return df
from sklearn import preprocessing
#Encodes all features
def encode_features(df_train, df_test):
features = ['Fare', 'Cabin', 'Age', 'Sex']
df_combined = pd.concat([df_train[features], df_test[features]])
for feature in features:
le = preprocessing.LabelEncoder()
le = le.fit(df_combined[feature])
df_train[feature] = le.transform(df_train[feature])
df_test[feature] = le.transform(df_test[feature])
return df_train, df_test
#Feature engineers and encodes data
df_data = transform_features(df_data)
df_test = transform_features(df_test)
df_data, df_test = encode_features(df_data,df_test)
#Extracts numpy arrays from dataFrames to feed into neural network
data = df_data.iloc[:591,:].as_matrix()
dataCV = df_data.iloc[591:,:].as_matrix()
dataTest = df_test.as_matrix()
#Trains and evaluates neural network with sigOpt recommended values
def evaluate_model(assignments):
layer_size = 50
net = tfl.input_data(shape=[None, 6])
net = tfl.fully_connected(net, layer_size,activation = 'relu')
net = tfl.dropout(net,assignments['dropout'])
net = tfl.fully_connected(net, layer_size,activation = 'relu')
net = tfl.dropout(net, assignments['dropout'])
net = tfl.fully_connected(net, layer_size,activation = 'relu')
net = tfl.dropout(net, assignments['dropout'])
net = tfl.fully_connected(net, 2, activation='softmax')
net = tfl.regression(net, optimizer = tfl.optimizers.Adam (learning_rate=0.001, beta1=0.9, beta2=.999, epsilon=1e-08, use_locking=False, name='Adam'))
model = tfl.DNN(net)
model.fit(data, labels, n_epoch=assignments['epochs'], batch_size=16, show_metric=True)
results = model.evaluate(data, labels)
print('Training data accuracy: ' + str(results[0]))
resultsCV = model.evaluate(dataCV, labelsCV)
print('CV accuracy: ' + str(resultsCV[0]))
print(resultsCV)
tf.reset_default_graph()
return resultsCV[0]
#Rounds neural network output to 1 or 0
def makePrediction(prediction, threshold):
a = [0]*len(prediction)
for i in range(len(prediction)):
if prediction[i]< threshold:
a[i] = 0
else:
a[i] = 1
return a
'''
#Optimizes neural network with sigOpt
def optimize():
conn.experiments(experiment.id).suggestions().delete()
for _ in range(100):
tf.reset_default_graph()
suggestion = conn.experiments(experiment.id).suggestions().create()
value = evaluate_model(suggestion.assignments)
conn.experiments(experiment.id).observations().create(
suggestion=suggestion.id,
value=value,
)
#Clears observations from sigOpt experiment in prepartion for new optimization session
def clearObservations():
conn.experiments(experiment.id).observations().delete()
'''
'''
Optimal values
.79904
exportPredictions(50,0.001,.9,.999,0.7,1000)
.78469
exportPredictions(52, 0.0014885321593006061, .9, .999, 0.40, 249)
exportPredictions(52, 0.001, .9, .999, 0.40, 249)
Beta1 ?
Beta2 .85-.9
Layer Size 20-70
Learning Rate .006-.03
Dropout .9-1
Epochs 100-300
exportPredictions(52, 0.0014885321593006061, .15, .7, 0.9, 249)
(layer_size, learning_rate, beta1, beta2)
exportPredictions(52, 0.014885321593006061, .15, .7, 0.962923647182921, 249) Epochs = 249, Dropout = 0.962923647182921
exportPredictions(20, 0.012949984921935807,0.17574611159754028,0.8,1, 200 )
exportPredictions(41, 0.0016451543576259664,0.1389314441775895,0.6233181098845707,1, 200)
'''
#Trains and evaluates neural network with given values. Also outputs predictions on test data
def exportPredictions(layer_size, learning_rate, beta1, beta2,dropout,epochs):
tf.reset_default_graph()
net = tfl.input_data(shape=[None, 6])
net = tfl.fully_connected(net, layer_size,activation = 'relu')
net = tfl.dropout(net, dropout)
net = tfl.fully_connected(net, layer_size,activation = 'relu')
net = tfl.dropout(net, dropout)
net = tfl.fully_connected(net, layer_size,activation = 'relu')
net = tfl.dropout(net, dropout)
net = tfl.fully_connected(net, 2, activation='softmax')
net = tfl.regression(net, optimizer = tfl.optimizers.Adam (learning_rate=learning_rate, beta1=beta1, beta2=beta2, epsilon=1e-08, use_locking=False, name='Adam'))
model = tfl.DNN(net)
model.fit(data, labels, n_epoch=epochs, batch_size=16, show_metric=True)
results = model.evaluate(data, labels)
print('Training data accuracy: ' + str(results[0]))
resultsCV = model.evaluate(dataCV, labelsCV)
print('CV accuracy: ' + str(resultsCV[0]))
testPredictPerc = model.predict(dataTest)
testPredictPerc = np.delete(testPredictPerc, 0,1)
testPredict = makePrediction(testPredictPerc, 0.5)
df = pd.DataFrame(testPredict)
df.index = range(892,len(df)+892)
df.columns = ['Survived']
df.index.names = ['PassengerId']
df.to_csv(path_or_buf = 'predictions.csv', sep=',')
| mit |
asreimer/davitpy_asr | models/raydarn/rt.py | 3 | 41400 | # Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
"""
*********************
**Module**: models.raydarn.rt
*********************
This module runs the raytracing code
**Classes**:
* :class:`models.raydarn.rt.RtRun`: run the code
* :class:`models.raydarn.rt.Scatter`: store and process modeled backscatter
* :class:`models.raydarn.rt.Edens`: store and process electron density profiles
* :class:`models.raydarn.rt.Rays`: store and process individual rays
.. note:: The ray tracing requires mpi to run. You can adjust the number of processors, but be wise about it and do not assign more than you have
"""
#########################################################################
# Main object
#########################################################################
class RtRun(object):
"""This class runs the raytracing code and processes the output
**Args**:
* [**sTime**] (datetime.datetime): start time UT
* [**eTime**] (datetime.datetime): end time UT (if not provided run for a single time sTime)
* [**rCode**] (str): radar 3-letter code
* [**radarObj**] (:class:`pydarn.radar.radar`): radar object (overrides rCode)
* [**dTime**] (float): time step in Hours
* [**freq**] (float): operating frequency [MHz]
* [**beam**] (int): beam number (if None run all beams)
* [**nhops**] (int): number of hops
* [**elev**] (tuple): (start elevation, end elevation, step elevation) [degrees]
* [**azim**] (tuple): (start azimuth, end azimuth, step azimuth) [degrees East] (overrides beam specification)
* [**hmf2**] (float): F2 peak alitude [km] (default: use IRI)
* [**nmf2**] (float): F2 peak electron density [log10(m^-3)] (default: use IRI)
* [**debug**] (bool): print some diagnostics of the fortran run and output processing
* [**fext**] (str): output file id, max 10 character long (mostly used for multiple users environments, like a website)
* [**loadFrom**] (str): file name where a pickled instance of RtRun was saved (supersedes all other args)
* [**nprocs**] (int): number of processes to use with MPI
**Methods**:
* :func:`RtRun.readRays`
* :func:`RtRun.readEdens`
* :func:`RtRun.readScatter`
* :func:`RtRun.save`
* :func:`RtRun.load`
**Example**:
::
# Run a 2-hour ray trace from Blackstone on a random day
sTime = dt.datetime(2012, 11, 18, 5)
eTime = sTime + dt.timedelta(hours=2)
radar = 'bks'
# Save the results to your /tmp directory
rto = raydarn.RtRun(sTime, eTime, rCode=radar, outDir='/tmp')
"""
def __init__(self, sTime=None, eTime=None,
rCode=None, radarObj=None,
dTime=.5,
freq=11, beam=None, nhops=1,
elev=(5, 60, .1), azim=None,
hmf2=None, nmf2=None,
outDir=None,
debug=False,
fext=None,
loadFrom=None,
nprocs=4):
import datetime as dt
from os import path
from pydarn import radar
# Load pickled instance...
if loadFrom:
self.load(loadFrom)
# ...or get to work!
else:
# Load radar info
if radarObj:
self.radar = radarObj
elif rCode:
self.radar = radar.radar(code=rCode)
# Set azimuth
self.site = self.radar.getSiteByDate(sTime)
if beam and not azim:
az = self.site.beamToAzim(beam)
azim = (az, az, 1)
else:
az1 = self.site.beamToAzim(0)
az2 = self.site.beamToAzim(self.site.maxbeam-1)
azim = (az1, az2, self.site.bmsep)
self.azim = azim
self.beam = beam
# Set elevation
self.elev = elev
# Set time interval
if not sTime:
print 'No start time. Using now.'
sTime = dt.datetime.utcnow()
if not eTime:
eTime = sTime + dt.timedelta(minutes=1)
if eTime > sTime + dt.timedelta(days=1):
print 'The time interval requested if too large. Reducing to 1 day.'
eTime = sTime + dt.timedelta(days=1)
self.time = [sTime, eTime]
self.dTime = dTime
# Set frequency
self.freq = freq
# Set number of hops
self.nhops = nhops
# Set ionosphere
self.hmf2 = hmf2 if hmf2 else 0
self.nmf2 = nmf2 if nmf2 else 0
# Set output directory and file extension
if not outDir:
outDir = path.abspath( path.curdir )
self.outDir = path.join( outDir, '' )
self.fExt = '0' if not fext else fext
# Write input file
inputFile = self._genInput()
# Run the ray tracing
success = self._execute(nprocs, inputFile, debug=debug)
def _genInput(self):
"""Generate input file
"""
from os import path
fname = path.join(self.outDir, 'rtrun.{}.inp'.format(self.fExt))
with open(fname, 'w') as f:
f.write( "{:8.2f} Transmitter latitude (degrees N)\n".format( self.site.geolat ) )
f.write( "{:8.2f} Transmitter Longitude (degrees E\n".format( self.site.geolon ) )
f.write( "{:8.2f} Azimuth (degrees E) (begin)\n".format( self.azim[0] ) )
f.write( "{:8.2f} Azimuth (degrees E) (end)\n".format( self.azim[1] ) )
f.write( "{:8.2f} Azimuth (degrees E) (step)\n".format( self.azim[2] ) )
f.write( "{:8.2f} Elevation angle (begin)\n".format( self.elev[0] ) )
f.write( "{:8.2f} Elevation angle (end)\n".format( self.elev[1] ) )
f.write( "{:8.2f} Elevation angle (step)\n".format( self.elev[2] ) )
f.write( "{:8.2f} Frequency (Mhz)\n".format( self.freq ) )
f.write( "{:8d} nubmer of hops (minimum 1)\n".format( self.nhops) )
f.write( "{:8d} Year (yyyy)\n".format( self.time[0].year ) )
f.write( "{:8d} Month and day (mmdd)\n".format( self.time[0].month*100 + self.time[0].day ) )
tt = self.time[0].hour + self.time[0].minute/60.
tt += 25.
f.write( "{:8.2f} hour (add 25 for UT) (begin)\n".format( tt ) )
tt = self.time[1].hour + self.time[1].minute/60.
tt += (self.time[1].day - self.time[0].day) * 24.
tt += 25.
f.write( "{:8.2f} hour (add 25 for UT) (end)\n".format( tt ) )
f.write( "{:8.2f} hour (step)\n".format( self.dTime ) )
f.write( "{:8.2f} hmf2 (km, if 0 then ignored)\n".format( self.hmf2 ) )
f.write( "{:8.2f} nmf2 (log10, if 0 then ignored)\n".format( self.nmf2 ) )
return fname
def _execute(self, nprocs, inputFileName, debug=False):
"""Execute raytracing command
"""
import subprocess as subp
from os import path
command = ['mpiexec', '-n', '{}'.format(nprocs),
path.join(path.abspath( __file__.split('rt.py')[0] ), 'rtFort'),
inputFileName,
self.outDir,
self.fExt]
process = subp.Popen(command, shell=False, stdout=subp.PIPE, stderr=subp.STDOUT)
output = process.communicate()[0]
exitCode = process.returncode
if debug or (exitCode != 0):
print 'In:: {}'.format( command )
print 'Exit code:: {}'.format( exitCode )
print 'Returned:: \n', output
if (exitCode != 0):
raise Exception('Fortran execution error.')
else:
subp.call(['rm',inputFileName])
return True
def readRays(self, saveToAscii=None, debug=False):
"""Read rays.dat fortran output into dictionnary
**Args**:
* [**saveToAscii**] (str): output content to text file
* [**debug**] (bool): print some i/o diagnostics
**Returns**:
* Add a new member to :class:`rt.RtRun`: **rays**, of type :class:`rt.rays`
"""
import subprocess as subp
from os import path
# File name and path
fName = path.join(self.outDir, 'rays.{}.dat'.format(self.fExt))
if hasattr(self, 'rays') and not path.exists(fName):
print 'The file is gone, and it seems you may already have read it into memory...?'
return
# Initialize rays output
self.rays = Rays(fName,
site=self.site, radar=self.radar,
saveToAscii=saveToAscii, debug=debug)
# Remove Input file
subp.call(['rm',fName])
def readEdens(self, debug=False):
"""Read edens.dat fortran output
**Args**:
* [**site**] (pydarn.radar.radStrict.site): site object of current radar
* [**debug**] (bool): print some i/o diagnostics
**Returns**:
* Add a new member to :class:`rt.RtRun`: **rays**, of type :class:`rt.rays`
"""
import subprocess as subp
from os import path
# File name and path
fName = path.join(self.outDir, 'edens.{}.dat'.format(self.fExt))
if hasattr(self, 'ionos') and not path.exists(fName):
print 'The file is gone, and it seems you may already have read it into memory...?'
return
# Initialize rays output
self.ionos = Edens(fName,
site=self.site, radar=self.radar,
debug=debug)
# Remove Input file
subp.call(['rm',fName])
def readScatter(self, debug=False):
"""Read iscat.dat and gscat.dat fortran output
**Args**:
* [**site**] (pydarn.radar.radStrict.site): site object of current radar
* [**debug**] (bool): print some i/o diagnostics
**Returns**:
* Add a new member to :class:`rt.RtRun`: **rays**, of type :class:`rt.rays`
"""
import subprocess as subp
from os import path
# File name and path
isName = path.join(self.outDir, 'iscat.{}.dat'.format(self.fExt))
gsName = path.join(self.outDir, 'gscat.{}.dat'.format(self.fExt))
if hasattr(self, 'scatter') \
and (not path.exists(isName) \
or not path.exists(gsName)):
print 'The files are gone, and it seems you may already have read them into memory...?'
return
# Initialize rays output
self.scatter = Scatter(gsName, isName,
site=self.site, radar=self.radar,
debug=debug)
# Remove Input file
# subp.call(['rm',isName])
# subp.call(['rm',gsName])
def save(self, filename):
"""Save :class:`rt.RtRun` to a file
"""
import cPickle as pickle
with open( filename, "wb" ) as f:
pickle.dump(self, f)
def load(self, filename):
"""Load :class:`rt.RtRun` from a file
"""
import cPickle as pickle
with open( filename, "rb" ) as f:
obj = pickle.load(f)
for k, v in obj.__dict__.items():
self.__dict__[k] = v
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.clean()
def clean(self):
'''Clean-up files
'''
import subprocess as subp
from os import path
files = ['rays', 'edens', 'gscat', 'iscat']
for f in files:
fName = path.join(self.outDir, '{}.{}.dat'.format(f, self.fExt))
subp.call(['rm', fName])
#########################################################################
# Electron densities
#########################################################################
class Edens(object):
"""Store and process electron density profiles after ray tracing
**Args**:
* **readFrom** (str): edens.dat file to read the rays from
* [**site**] (:class:`pydarn.radar.site): radar site object
* [**radar**] (:class:`pydarn.radar.radar): radar object
* [**debug**] (bool): verbose mode
**Methods**:
* :func:`Edens.readEdens`
* :func:`Edens.plot`
"""
def __init__(self, readFrom,
site=None, radar=None,
debug=False):
self.readFrom = readFrom
self.edens = {}
self.name = ''
if radar:
self.name = radar.code[0].upper()
# Read rays
self.readEdens(site=site, debug=debug)
def readEdens(self, site=None, debug=False):
"""Read edens.dat fortran output
**Args**:
* [**site**] (pydarn.radar.radStrict.site): site object of current radar
* [**debug**] (bool): print some i/o diagnostics
**Returns**:
* Populate member edens :class:`rt.Edens`
"""
from struct import unpack
import datetime as dt
from numpy import array
# Read binary file
with open(self.readFrom, 'rb') as f:
if debug:
print self.readFrom+' header: '
self.header = _readHeader(f, debug=debug)
self.edens = {}
while True:
bytes = f.read(2*4)
# Check for eof
if not bytes: break
# read hour and azimuth
hour, azim = unpack('2f', bytes)
# format time index
hour = hour - 25.
mm = self.header['mmdd']/100
dd = self.header['mmdd'] - mm*100
rtime = dt.datetime(self.header['year'], mm, dd) + dt.timedelta(hours=hour)
# format azimuth index (beam)
raz = site.azimToBeam(azim) if site else round(raz, 2)
# Initialize dicts
if rtime not in self.edens.keys(): self.edens[rtime] = {}
self.edens[rtime][raz] = {}
# Read edens dict
# self.edens[rtime][raz]['pos'] = array( unpack('{}f'.format(250*2),
# f.read(250*2*4)) )
self.edens[rtime][raz]['th'] = array( unpack('{}f'.format(250),
f.read(250*4)) )
self.edens[rtime][raz]['nel'] = array( unpack('{}f'.format(250*250),
f.read(250*250*4)) ).reshape((250,250), order='F')
self.edens[rtime][raz]['dip'] = array( unpack('{}f'.format(250*2),
f.read(250*2*4)) ).reshape((250,2), order='F')
def plot(self, time, beam=None, maxground=2000, maxalt=500,
nel_cmap='jet', nel_lim=[10, 12], title=False,
fig=None, rect=111, ax=None, aax=None):
"""Plot electron density profile
**Args**:
* **time** (datetime.datetime): time of profile
* [**beam**]: beam number
* [**maxground**]: maximum ground range [km]
* [**maxalt**]: highest altitude limit [km]
* [**nel_cmap**]: color map name for electron density index coloring
* [**nel_lim**]: electron density index plotting limits
* [**rect**]: subplot spcification
* [**fig**]: A pylab.figure object (default to gcf)
* [**ax**]: Existing main axes
* [**aax**]: Existing auxialary axes
* [**title**]: Show default title
**Returns**:
* **ax**: matplotlib.axes object containing formatting
* **aax**: matplotlib.axes object containing data
* **cbax**: matplotlib.axes object containing colorbar
**Example**:
::
# Show electron density profile
import datetime as dt
from models import raydarn
sTime = dt.datetime(2012, 11, 18, 5)
rto = raydarn.RtRun(sTime, rCode='bks', beam=12)
rto.readEdens() # read electron density into memory
ax, aax, cbax = rto.ionos.plot(sTime, title=True)
ax.grid()
written by Sebastien, 2013-04
"""
from utils import plotUtils
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
# Set up axes
if not ax and not aax:
ax, aax = plotUtils.curvedEarthAxes(fig=fig, rect=rect,
maxground=maxground, maxalt=maxalt)
else:
ax = ax
aax = aax
if hasattr(ax, 'time'):
time = ax.time
if hasattr(ax, 'beam'):
beam = ax.beam
# make sure that the required time and beam are present
assert (time in self.edens.keys()), 'Unkown time %s' % time
if beam:
assert (beam in self.edens[time].keys()), 'Unkown beam %s' % beam
else:
beam = self.edens[time].keys()[0]
X, Y = np.meshgrid(self.edens[time][beam]['th'], ax.Re + np.linspace(60,560,250))
im = aax.pcolormesh(X, Y, np.log10( self.edens[time][beam]['nel'] ),
vmin=nel_lim[0], vmax=nel_lim[1], cmap=nel_cmap)
# Plot title with date ut time and local time
if title:
stitle = _getTitle(time, beam, self.header, None)
ax.set_title( stitle )
# Add a colorbar
cbax = plotUtils.addColorbar(im, ax)
_ = cbax.set_ylabel(r"N$_{el}$ [$\log_{10}(m^{-3})$]")
ax.beam = beam
return ax, aax, cbax
#########################################################################
# Scatter
#########################################################################
class Scatter(object):
"""Stores and process ground and ionospheric scatter
**Args**:
* **readISFrom** (str): iscat.dat file to read the ionospheric scatter from
* **readGSFrom** (str): gscat.dat file to read the ground scatter from
* [**site**] (:class:`pydarn.radar.site): radar site object
* [**debug**] (bool): verbose mode
**Methods**:
* :func:`Scatter.readGS`
* :func:`Scatter.readIS`
* :func:`Scatter.plot`
"""
def __init__(self, readGSFrom=None, readISFrom=None,
site=None, radar=None,
debug=False):
self.readISFrom = readISFrom
self.readGSFrom = readGSFrom
# Read ground scatter
if self.readGSFrom:
self.gsc = {}
self.readGS(site=site, debug=debug)
# Read ionospheric scatter
if self.readISFrom:
self.isc = {}
self.readIS(site=site, debug=debug)
def readGS(self, site=None, debug=False):
"""Read gscat.dat fortran output
**Args**:
* [**site**] (pydarn.radar.radStrict.site): site object of current radar
* [**debug**] (bool): print some i/o diagnostics
**Returns**:
* Populate member isc :class:`rt.Scatter`
"""
from struct import unpack
import datetime as dt
import numpy as np
with open(self.readGSFrom, 'rb') as f:
# read header
if debug:
print self.readGSFrom+' header: '
self.header = _readHeader(f, debug=debug)
# Then read ray data, one ray at a time
while True:
bytes = f.read(3*4)
# Check for eof
if not bytes: break
# read number of ray steps, time, azimuth and elevation
rhr, raz, rel = unpack('3f', bytes)
# Read reminder of the record
rr, tht, gran, lat, lon = unpack('5f', f.read(5*4))
# Convert azimuth to beam number
raz = site.azimToBeam(raz) if site else np.round(raz, 2)
# Adjust rel to 2 decimal
rel = np.around(rel, 2)
# convert time to python datetime
rhr = rhr - 25.
mm = self.header['mmdd']/100
dd = self.header['mmdd'] - mm*100
rtime = dt.datetime(self.header['year'], mm, dd) + dt.timedelta(hours=rhr)
# Create new entries in rays dict
if rtime not in self.gsc.keys(): self.gsc[rtime] = {}
if raz not in self.gsc[rtime].keys(): self.gsc[rtime][raz] = {}
if rel not in self.gsc[rtime][raz].keys():
self.gsc[rtime][raz][rel] = {
'r': np.empty(0),
'th': np.empty(0),
'gran': np.empty(0),
'lat': np.empty(0),
'lon': np.empty(0) }
self.gsc[rtime][raz][rel]['r'] = np.append( self.gsc[rtime][raz][rel]['r'], rr )
self.gsc[rtime][raz][rel]['th'] = np.append( self.gsc[rtime][raz][rel]['th'], tht )
self.gsc[rtime][raz][rel]['gran'] = np.append( self.gsc[rtime][raz][rel]['gran'], gran )
self.gsc[rtime][raz][rel]['lat'] = np.append( self.gsc[rtime][raz][rel]['lat'], lat )
self.gsc[rtime][raz][rel]['lon'] = np.append( self.gsc[rtime][raz][rel]['lon'], lon )
def readIS(self, site=None, debug=False):
"""Read iscat.dat fortran output
**Args**:
* [**site**] (pydarn.radar.radStrict.site): site object of current radar
* [**debug**] (bool): print some i/o diagnostics
**Returns**:
* Populate member isc :class:`rt.Scatter`
"""
from struct import unpack
import datetime as dt
from numpy import around, array
with open(self.readISFrom, 'rb') as f:
# read header
if debug:
print self.readISFrom+' header: '
self.header = _readHeader(f, debug=debug)
# Then read ray data, one ray at a time
while True:
bytes = f.read(4*4)
# Check for eof
if not bytes: break
# read number of ray steps, time, azimuth and elevation
nstp, rhr, raz, rel = unpack('4f', bytes)
nstp = int(nstp)
# Convert azimuth to beam number
raz = site.azimToBeam(raz) if site else around(raz, 2)
# Adjust rel to 2 decimal
rel = around(rel, 2)
# convert time to python datetime
rhr = rhr - 25.
mm = self.header['mmdd']/100
dd = self.header['mmdd'] - mm*100
rtime = dt.datetime(self.header['year'], mm, dd) + dt.timedelta(hours=rhr)
# Create new entries in rays dict
if rtime not in self.isc.keys(): self.isc[rtime] = {}
if raz not in self.isc[rtime].keys(): self.isc[rtime][raz] = {}
self.isc[rtime][raz][rel] = {}
# Read to paths dict
self.isc[rtime][raz][rel]['nstp'] = nstp
self.isc[rtime][raz][rel]['r'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['th'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['gran'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['rel'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['w'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['nr'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['lat'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['lon'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['h'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
def plot(self, time, beam=None, maxground=2000, maxalt=500,
iscat=True, gscat=True, title=False, weighted=False, cmap='hot_r',
fig=None, rect=111, ax=None, aax=None, zorder=4):
"""Plot scatter on ground/altitude profile
**Args**:
* **time** (datetime.datetime): time of profile
* [**beam**]: beam number
* [**iscat**] (bool): show ionospheric scatter
* [**gscat**] (bool): show ground scatter
* [**maxground**]: maximum ground range [km]
* [**maxalt**]: highest altitude limit [km]
* [**rect**]: subplot spcification
* [**fig**]: A pylab.figure object (default to gcf)
* [**ax**]: Existing main axes
* [**aax**]: Existing auxialary axes
* [**title**]: Show default title
* [**weighted**] (bool): plot ionospheric scatter relative strength (based on background density and range)
* [**cmap**]: colormap used for weighted ionospheric scatter
**Returns**:
* **ax**: matplotlib.axes object containing formatting
* **aax**: matplotlib.axes object containing data
* **cbax**: matplotlib.axes object containing colorbar
**Example**:
::
# Show ionospheric scatter
import datetime as dt
from models import raydarn
sTime = dt.datetime(2012, 11, 18, 5)
rto = raydarn.RtRun(sTime, rCode='bks', beam=12)
rto.readRays() # read rays into memory
ax, aax, cbax = rto.rays.plot(sTime, title=True)
rto.readScatter() # read scatter into memory
rto.scatter.plot(sTime, ax=ax, aax=aax)
ax.grid()
written by Sebastien, 2013-04
"""
from utils import plotUtils
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
# Set up axes
if not ax and not aax:
ax, aax = plotUtils.curvedEarthAxes(fig=fig, rect=rect,
maxground=maxground, maxalt=maxalt)
else:
ax = ax
aax = aax
if hasattr(ax, 'beam'):
beam = ax.beam
# make sure that the required time and beam are present
assert (time in self.isc.keys() or time in self.gsc.keys()), 'Unkown time %s' % time
if beam:
assert (beam in self.isc[time].keys()), 'Unkown beam %s' % beam
else:
beam = self.isc[time].keys()[0]
if gscat and time in self.gsc.keys():
for ir, (el, rays) in enumerate( sorted(self.gsc[time][beam].items()) ):
if len(rays['r']) == 0: continue
_ = aax.scatter(rays['th'], ax.Re*np.ones(rays['th'].shape),
color='0', zorder=zorder)
if iscat and time in self.isc.keys():
if weighted:
wmin = np.min( [ r['w'].min() for r in self.isc[time][beam].values() if r['nstp'] > 0] )
wmax = np.max( [ r['w'].max() for r in self.isc[time][beam].values() if r['nstp'] > 0] )
for ir, (el, rays) in enumerate( sorted(self.isc[time][beam].items()) ):
if rays['nstp'] == 0: continue
t = rays['th']
r = rays['r']*1e-3
spts = np.array([t, r]).T.reshape(-1, 1, 2)
h = rays['h']*1e-3
rel = np.radians( rays['rel'] )
r = np.sqrt( r**2 + h**2 + 2*r*h*np.sin( rel ) )
t = t + np.arcsin( h/r * np.cos( rel ) )
epts = np.array([t, r]).T.reshape(-1, 1, 2)
segments = np.concatenate([spts, epts], axis=1)
lcol = LineCollection( segments, zorder=zorder )
if weighted:
_ = lcol.set_cmap( cmap )
_ = lcol.set_norm( plt.Normalize(0, 1) )
_ = lcol.set_array( ( rays['w'] - wmin ) / wmax )
else:
_ = lcol.set_color('0')
_ = aax.add_collection( lcol )
# Plot title with date ut time and local time
if title:
stitle = _getTitle(time, beam, self.header, None)
ax.set_title( stitle )
# If weighted, plot ionospheric scatter with colormap
if weighted:
# Add a colorbar
cbax = plotUtils.addColorbar(lcol, ax)
_ = cbax.set_ylabel("Ionospheric Scatter")
else: cbax = None
ax.beam = beam
return ax, aax, cbax
#########################################################################
# Rays
#########################################################################
class Rays(object):
"""Store and process individual rays after ray tracing
**Args**:
* **readFrom** (str): rays.dat file to read the rays from
* [**site**] (:class:`pydarn.radar.site): radar site object
* [**radar**] (:class:`pydarn.radar.radar): radar object
* [**saveToAscii**] (str): file name where to output ray positions
* [**debug**] (bool): verbose mode
**Methods**:
* :func:`Rays.readRays`
* :func:`Rays.writeToAscii`
* :func:`Rays.plot`
"""
def __init__(self, readFrom,
site=None, radar=None,
saveToAscii=None, debug=False):
self.readFrom = readFrom
self.paths = {}
self.name = ''
if radar:
self.name = radar.code[0].upper()
# Read rays
self.readRays(site=site, debug=debug)
# If required, save to ascii
if saveToAscii:
self.writeToAscii(saveToAscii)
def readRays(self, site=None, debug=False):
"""Read rays.dat fortran output
**Args**:
* [**site**] (pydarn.radar.radStrict.site): site object of current radar
* [**debug**] (bool): print some i/o diagnostics
**Returns**:
* Populate member paths :class:`rt.Rays`
"""
from struct import unpack
import datetime as dt
from numpy import round, array
# Read binary file
with open(self.readFrom, 'rb') as f:
# read header
if debug:
print self.readFrom+' header: '
self.header = _readHeader(f, debug=debug)
# Then read ray data, one ray at a time
while True:
bytes = f.read(4*4)
# Check for eof
if not bytes: break
# read number of ray steps, time, azimuth and elevation
nrstep, rhr, raz, rel = unpack('4f', bytes)
nrstep = int(nrstep)
# Convert azimuth to beam number
raz = site.azimToBeam(raz) if site else round(raz, 2)
# convert time to python datetime
rhr = rhr - 25.
mm = self.header['mmdd']/100
dd = self.header['mmdd'] - mm*100
rtime = dt.datetime(self.header['year'], mm, dd) + dt.timedelta(hours=rhr)
# Create new entries in rays dict
if rtime not in self.paths.keys(): self.paths[rtime] = {}
if raz not in self.paths[rtime].keys(): self.paths[rtime][raz] = {}
self.paths[rtime][raz][rel] = {}
# Read to paths dict
self.paths[rtime][raz][rel]['nrstep'] = nrstep
self.paths[rtime][raz][rel]['r'] = array( unpack('{}f'.format(nrstep),
f.read(nrstep*4)) )
self.paths[rtime][raz][rel]['th'] = array( unpack('{}f'.format(nrstep),
f.read(nrstep*4)) )
self.paths[rtime][raz][rel]['gran'] = array( unpack('{}f'.format(nrstep),
f.read(nrstep*4)) )
# self.paths[rtime][raz][rel]['pran'] = array( unpack('{}f'.format(nrstep),
# f.read(nrstep*4)) )
self.paths[rtime][raz][rel]['nr'] = array( unpack('{}f'.format(nrstep),
f.read(nrstep*4)) )
def writeToAscii(self, fname):
"""Save rays to ASCII file (limited use)
"""
with open(fname, 'w') as f:
f.write('## HEADER ##\n')
[f.write('{:>10s}'.format(k)) for k in self.header.keys()]
f.write('\n')
for v in self.header.values():
if isinstance(v, float): strFmt = '{:10.2f}'
elif isinstance(v, int): strFmt = '{:10d}'
elif isinstance(v, str): strFmt = '{:10s}'
f.write(strFmt.format(v))
f.write('\n')
f.write('## RAYS ##\n')
for kt in sorted(self.paths.keys()):
f.write('Time: {:%Y %m %d %H %M}\n'.format(kt))
for kb in sorted(self.paths[kt].keys()):
f.write('--Beam/Azimuth: {}\n'.format(kb))
for ke in sorted(self.paths[kt][kb].keys()):
f.write('----Elevation: {:4.2f}\n'.format(ke))
f.write('------r\n')
[f.write('{:10.3f}\t'.format(r*1e-3)) for r in self.paths[kt][kb][ke]['r']]
f.write('\n')
f.write('------theta\n')
[f.write('{:10.5f}\t'.format(th)) for th in self.paths[kt][kb][ke]['th']]
f.write('\n')
def plot(self, time, beam=None,
maxground=2000, maxalt=500, step=1,
showrefract=False, nr_cmap='jet_r', nr_lim=[0.8, 1.],
raycolor='0.3', title=False, zorder=2, alpha=1,
fig=None, rect=111, ax=None, aax=None):
"""Plot ray paths
**Args**:
* **time** (datetime.datetime): time of rays
* [**beam**]: beam number
* [**maxground**]: maximum ground range [km]
* [**maxalt**]: highest altitude limit [km]
* [**step**]: step between each plotted ray (in number of ray steps)
* [**showrefract**]: show refractive index along ray paths (supersedes raycolor)
* [**nr_cmap**]: color map name for refractive index coloring
* [**nr_lim**]: refractive index plotting limits
* [**raycolor**]: color of ray paths
* [**rect**]: subplot spcification
* [**fig**]: A pylab.figure object (default to gcf)
* [**title**]: Show default title
* [**ax**]: Existing main axes
* [**aax**]: Existing auxialary axes
**Returns**:
* **ax**: matplotlib.axes object containing formatting
* **aax**: matplotlib.axes object containing data
* **cbax**: matplotlib.axes object containing colorbar
**Example**:
::
# Show ray paths with colored refractive index along path
import datetime as dt
from models import raydarn
sTime = dt.datetime(2012, 11, 18, 5)
rto = raydarn.RtRun(sTime, rCode='bks', beam=12, title=True)
rto.readRays() # read rays into memory
ax, aax, cbax = rto.rays.plot(sTime, step=10, showrefract=True, nr_lim=[.85,1])
ax.grid()
written by Sebastien, 2013-04
"""
from utils import plotUtils
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
from types import MethodType
# Set up axes
if not ax and not aax:
ax, aax = plotUtils.curvedEarthAxes(fig=fig, rect=rect,
maxground=maxground, maxalt=maxalt)
else:
ax = ax
aax = aax
if hasattr(ax, 'time'):
time = ax.time
if hasattr(ax, 'beam'):
beam = ax.beam
# make sure that the required time and beam are present
assert (time in self.paths.keys()), 'Unkown time %s' % time
if beam:
assert (beam in self.paths[time].keys()), 'Unkown beam %s' % beam
else:
beam = self.paths[time].keys()[0]
for ir, (el, rays) in enumerate( sorted(self.paths[time][beam].items()) ):
if not ir % step:
if not showrefract:
aax.plot(rays['th'], rays['r']*1e-3, c=raycolor,
zorder=zorder, alpha=alpha)
else:
points = np.array([rays['th'], rays['r']*1e-3]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lcol = LineCollection( segments, zorder=zorder, alpha=alpha)
_ = lcol.set_cmap( nr_cmap )
_ = lcol.set_norm( plt.Normalize(*nr_lim) )
_ = lcol.set_array( rays['nr'] )
_ = aax.add_collection( lcol )
# Plot title with date ut time and local time
if title:
stitle = _getTitle(time, beam, self.header, self.name)
ax.set_title( stitle )
# Add a colorbar when plotting refractive index
if showrefract:
cbax = plotUtils.addColorbar(lcol, ax)
_ = cbax.set_ylabel("refractive index")
else: cbax = None
# Declare a new method to show range markers
# This method is only available after rays have been plotted
# This ensures that the markers match the plotted rays
def showRange(self, markers=None,
color='.8', s=2, zorder=3,
**kwargs):
"""Plot ray paths
**Args**:
* [**markers**]: range markers. Defaults to every 250 km
* All other keywords are borrowed from :func:`matplotlib.pyplot.scatter`
**Returns**:
* **coll**: a collection of range markers
**Example**:
::
# Add range markers to an existing ray plot
ax, aax, cbax = rto.rays.plot(sTime, step=10)
rto.rays.showRange()
written by Sebastien, 2013-04
"""
if not markers:
markers = np.arange(0, 5000, 250)
x, y = [], []
for el, rays in self.paths[time][beam].items():
for rm in markers:
inds = (rays['gran']*1e-3 >= rm)
if inds.any():
x.append( rays['th'][inds][0] )
y.append( rays['r'][inds][0]*1e-3 )
coll = aax.scatter(x, y,
color=color, s=s, zorder=zorder, **kwargs)
return coll
# End of new method
# Assign new method
self.showRange = MethodType(showRange, self)
ax.beam = beam
return ax, aax, cbax
#########################################################################
# Misc.
#########################################################################
def _readHeader(fObj, debug=False):
"""Read the header part of ray-tracing *.dat files
**Args**:
* **fObj**: file object
* [**debug**] (bool): print some i/o diagnostics
**Returns**:
* **header**: a dictionary of header values
"""
from struct import unpack
import datetime as dt
from collections import OrderedDict
import os
# Declare header parameters
params = ('nhour', 'nazim', 'nelev',
'tlat', 'tlon',
'saz', 'eaz', 'daz',
'sel', 'eel', 'del',
'freq', 'nhop', 'year', 'mmdd',
'shour', 'ehour', 'dhour',
'hmf2', 'nmf2')
# Read header
header = OrderedDict( zip( params, unpack('3i9f3i5f', fObj.read(3*4 + 9*4 + 3*4 + 5*4)) ) )
header['fext'] = unpack('10s', fObj.read(10))[0].strip()
header['outdir'] = unpack('100s', fObj.read(100))[0].strip()
# Only print header if in debug mode
if debug:
for k, v in header.items(): print '{:10s} :: {}'.format(k,v)
header.pop('fext'); header.pop('outdir')
return header
def _getTitle(time, beam, header, name):
"""Create a title for ground/altitude plots
**Args**:
* **time** (datetime.datetime): time shown in plot
* **beam**: beam shown in plot
* **header** (dict): header of fortran uotput file
* **name** (str): radar name
**Returns**:
* **title** (str): a title string
"""
from numpy import floor, round
utdec = time.hour + time.minute/60.
tlon = (header['tlon'] % 360.)
ctlon = tlon if tlon <=180. else tlon - 360.
ltdec = ( utdec + ( ctlon/360.*24.) ) % 24.
lthr = floor(ltdec)
ltmn = round( (ltdec - lthr)*60 )
title = '{:%Y-%b-%d at %H:%M} UT (~{:02.0f}:{:02.0f} LT)'.format(
time, lthr, ltmn)
title += '\n(IRI-2012) {} beam {}; freq {:.1f}MHz'.format(name, beam, header['freq'])
return title | gpl-3.0 |
pianomania/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 85 | 5600 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
eigen_solvers = ['dense', 'arpack']
# ----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
# ----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
# Test the error raised when parameter passed to lle is invalid
def test_lle_init_parameters():
X = np.random.rand(5, 3)
clf = manifold.LocallyLinearEmbedding(eigen_solver="error")
msg = "unrecognized eigen_solver 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
clf = manifold.LocallyLinearEmbedding(method="error")
msg = "unrecognized method 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
# regression test for #6033
def test_integer_input():
rand = np.random.RandomState(0)
X = rand.randint(0, 100, size=(20, 3))
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(method=method, n_neighbors=10)
clf.fit(X) # this previously raised a TypeError
| bsd-3-clause |
c22n/ion-channel-ABC | docs/examples/hl1/experiments/whole_cell.py | 1 | 3187 | import numpy as np
import pandas as pd
import myokit
from ionchannelABC.experiment import Experiment
ap_desc = """Action potential and calcium transient characteristics
from paced whole cell simulation. 80pA/pF for 0.5ms at 1Hz for 100s.
"""
# AP measurements
mdp, mdp_sem, mdp_n = -67, 2, 25 # maximum diastolic potential
mdp_sd = np.sqrt(mdp_n)*mdp_sem
dvdt_max, dvdt_max_sem, dvdt_max_n = 107, 7, 11 # maximum upstroke
dvdt_max_sd = np.sqrt(dvdt_max_n)*dvdt_max_sem
amp, amp_sem, amp_n = 105, 2, 11 # maximum amplitude of AP
amp_sd = np.sqrt(amp_n)*amp_sem
apd90, apd90_sem, apd90_n = 42, 9, 7 # 90% repolarisation of AP
apd90_sd = np.sqrt(apd90_n)*apd90_sem
# CaT measurements
t2p, t2p_sem, t2p_n = 59, 2, 6 # CaT time to peak
t2p_sd = np.sqrt(t2p_n)*t2p_sem
CaTR50, CaTR50_sem, CaTR50_n = 157, 6, 6 # CaT time to 50% repolarisation
CaTR50_sd = np.sqrt(CaTR50_n)*CaTR50_sem
CaTR90, CaTR90_sem, CaTR90_n = 397, 14, 6 # CaT time to 90% repolarisation
CaTR90_sd = np.sqrt(CaTR90_n)*CaTR90_sem
ap_dataset = [np.asarray([[0], [mdp], [mdp_sd**2]]),
np.asarray([[0], [dvdt_max], [dvdt_max_sd**2]]),
np.asarray([[0], [amp], [amp_sd**2]]),
np.asarray([[0], [apd90], [apd90_sd**2]]),
np.asarray([[0], [t2p], [t2p_sd**2]]),
np.asarray([[0], [CaTR50], [CaTR50_sd**2]]),
np.asarray([[0], [CaTR90], [CaTR90_sd**2]])]
ap_protocol = myokit.pacing.blocktrain(
period=1000, duration=2, limit=101, offset=2
)
ap_conditions = {'extra.Ca_o': 1.8e3,
'extra.K_o' : 4.0e3,
'extra.Na_o': 130e3,
'phys.T' : 295}
def ap_sum_stats(data):
output = []
d = data.trim_left(1000*100, adjust=True)
t = d['engine.time']
v = d['membrane.V']
CaT = d['calcium.Ca_i']
# minimum diastolic potential
mdp = np.min(v)
# maximum upstroke gradient
dvdt_max_idx = np.argmax(np.gradient(v, t))
dvdt_max = np.max(np.gradient(v, t))
# amplitude
peak_idx = np.argmax(v)
amp = np.max(v)-mdp
# action potential duration (90% repolarisation)
try:
decay = d.trim_left(t[peak_idx])['membrane.V']
apd90_idx = np.argwhere(decay < np.max(v)-0.9*amp)[0][0]
apd90 = t[peak_idx+apd90_idx] - t[dvdt_max_idx]
except:
apd90 = float('inf')
# CaT time to peak
peak_cat_idx = np.argmax(CaT)
cat_t2p = t[peak_cat_idx] - 2 # offset 2ms
if cat_t2p < 0:
cat_t2p = float('inf')
# CaT time to repolarisation 50% and 90%
peak_cat = np.max(CaT)
try:
decay = d.trim_left(t[peak_cat_idx])['calcium.Ca_i']
cat_r50_idx = np.argwhere(decay < peak_cat - 0.5*CaT[0])[0][0]
cat_r50 = t[peak_cat_idx+cat_r50_idx] - 2
cat_r90_idx = np.argwhere(decay < peak_cat - 0.9*CaT[0])[0][0]
cat_r90 = t[peak_cat_idx+cat_r90_idx] - 2
except:
cat_r50 = float('inf')
cat_r90 = float('inf')
return [mdp, dvdt_max, amp, apd90, cat_t2p, cat_r50, cat_r90]
ap = Experiment(
dataset=ap_dataset,
protocol=ap_protocol,
conditions=ap_conditions,
sum_stats=ap_sum_stats,
description=ap_desc,
Q10=None,
Q10_factor=0.
)
| gpl-3.0 |
harisbal/pandas | pandas/tests/io/formats/test_to_excel.py | 4 | 10954 | """Tests formatting as writer-agnostic ExcelCells
ExcelFormatter is tested implicitly in pandas/tests/io/test_excel.py
"""
import pytest
import pandas.util.testing as tm
from pandas.io.formats.excel import CSSToExcelConverter
from pandas.io.formats.css import CSSWarning
@pytest.mark.parametrize('css,expected', [
# FONT
# - name
('font-family: foo,bar', {'font': {'name': 'foo'}}),
('font-family: "foo bar",baz', {'font': {'name': 'foo bar'}}),
('font-family: foo,\nbar', {'font': {'name': 'foo'}}),
('font-family: foo, bar, baz', {'font': {'name': 'foo'}}),
('font-family: bar, foo', {'font': {'name': 'bar'}}),
('font-family: \'foo bar\', baz', {'font': {'name': 'foo bar'}}),
('font-family: \'foo \\\'bar\', baz', {'font': {'name': 'foo \'bar'}}),
('font-family: "foo \\"bar", baz', {'font': {'name': 'foo "bar'}}),
('font-family: "foo ,bar", baz', {'font': {'name': 'foo ,bar'}}),
# - family
('font-family: serif', {'font': {'name': 'serif', 'family': 1}}),
('font-family: Serif', {'font': {'name': 'serif', 'family': 1}}),
('font-family: roman, serif', {'font': {'name': 'roman', 'family': 1}}),
('font-family: roman, sans-serif', {'font': {'name': 'roman',
'family': 2}}),
('font-family: roman, sans serif', {'font': {'name': 'roman'}}),
('font-family: roman, sansserif', {'font': {'name': 'roman'}}),
('font-family: roman, cursive', {'font': {'name': 'roman', 'family': 4}}),
('font-family: roman, fantasy', {'font': {'name': 'roman', 'family': 5}}),
# - size
('font-size: 1em', {'font': {'size': 12}}),
('font-size: xx-small', {'font': {'size': 6}}),
('font-size: x-small', {'font': {'size': 7.5}}),
('font-size: small', {'font': {'size': 9.6}}),
('font-size: medium', {'font': {'size': 12}}),
('font-size: large', {'font': {'size': 13.5}}),
('font-size: x-large', {'font': {'size': 18}}),
('font-size: xx-large', {'font': {'size': 24}}),
('font-size: 50%', {'font': {'size': 6}}),
# - bold
('font-weight: 100', {'font': {'bold': False}}),
('font-weight: 200', {'font': {'bold': False}}),
('font-weight: 300', {'font': {'bold': False}}),
('font-weight: 400', {'font': {'bold': False}}),
('font-weight: normal', {'font': {'bold': False}}),
('font-weight: lighter', {'font': {'bold': False}}),
('font-weight: bold', {'font': {'bold': True}}),
('font-weight: bolder', {'font': {'bold': True}}),
('font-weight: 700', {'font': {'bold': True}}),
('font-weight: 800', {'font': {'bold': True}}),
('font-weight: 900', {'font': {'bold': True}}),
# - italic
('font-style: italic', {'font': {'italic': True}}),
('font-style: oblique', {'font': {'italic': True}}),
# - underline
('text-decoration: underline',
{'font': {'underline': 'single'}}),
('text-decoration: overline',
{}),
('text-decoration: none',
{}),
# - strike
('text-decoration: line-through',
{'font': {'strike': True}}),
('text-decoration: underline line-through',
{'font': {'strike': True, 'underline': 'single'}}),
('text-decoration: underline; text-decoration: line-through',
{'font': {'strike': True}}),
# - color
('color: red', {'font': {'color': 'FF0000'}}),
('color: #ff0000', {'font': {'color': 'FF0000'}}),
('color: #f0a', {'font': {'color': 'FF00AA'}}),
# - shadow
('text-shadow: none', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px #CCC', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px #999', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px', {'font': {'shadow': False}}),
('text-shadow: 2px -0em 0px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -2em 0px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -0em 2px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -0em 2px', {'font': {'shadow': True}}),
('text-shadow: 0px -2em', {'font': {'shadow': True}}),
# FILL
# - color, fillType
('background-color: red', {'fill': {'fgColor': 'FF0000',
'patternType': 'solid'}}),
('background-color: #ff0000', {'fill': {'fgColor': 'FF0000',
'patternType': 'solid'}}),
('background-color: #f0a', {'fill': {'fgColor': 'FF00AA',
'patternType': 'solid'}}),
# BORDER
# - style
('border-style: solid',
{'border': {'top': {'style': 'medium'},
'bottom': {'style': 'medium'},
'left': {'style': 'medium'},
'right': {'style': 'medium'}}}),
('border-style: solid; border-width: thin',
{'border': {'top': {'style': 'thin'},
'bottom': {'style': 'thin'},
'left': {'style': 'thin'},
'right': {'style': 'thin'}}}),
('border-top-style: solid; border-top-width: thin',
{'border': {'top': {'style': 'thin'}}}),
('border-top-style: solid; border-top-width: 1pt',
{'border': {'top': {'style': 'thin'}}}),
('border-top-style: solid',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: medium',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: 2pt',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: thick',
{'border': {'top': {'style': 'thick'}}}),
('border-top-style: solid; border-top-width: 4pt',
{'border': {'top': {'style': 'thick'}}}),
('border-top-style: dotted',
{'border': {'top': {'style': 'mediumDashDotDot'}}}),
('border-top-style: dotted; border-top-width: thin',
{'border': {'top': {'style': 'dotted'}}}),
('border-top-style: dashed',
{'border': {'top': {'style': 'mediumDashed'}}}),
('border-top-style: dashed; border-top-width: thin',
{'border': {'top': {'style': 'dashed'}}}),
('border-top-style: double',
{'border': {'top': {'style': 'double'}}}),
# - color
('border-style: solid; border-color: #0000ff',
{'border': {'top': {'style': 'medium', 'color': '0000FF'},
'right': {'style': 'medium', 'color': '0000FF'},
'bottom': {'style': 'medium', 'color': '0000FF'},
'left': {'style': 'medium', 'color': '0000FF'}}}),
('border-top-style: double; border-top-color: blue',
{'border': {'top': {'style': 'double', 'color': '0000FF'}}}),
('border-top-style: solid; border-top-color: #06c',
{'border': {'top': {'style': 'medium', 'color': '0066CC'}}}),
# ALIGNMENT
# - horizontal
('text-align: center',
{'alignment': {'horizontal': 'center'}}),
('text-align: left',
{'alignment': {'horizontal': 'left'}}),
('text-align: right',
{'alignment': {'horizontal': 'right'}}),
('text-align: justify',
{'alignment': {'horizontal': 'justify'}}),
# - vertical
('vertical-align: top',
{'alignment': {'vertical': 'top'}}),
('vertical-align: text-top',
{'alignment': {'vertical': 'top'}}),
('vertical-align: middle',
{'alignment': {'vertical': 'center'}}),
('vertical-align: bottom',
{'alignment': {'vertical': 'bottom'}}),
('vertical-align: text-bottom',
{'alignment': {'vertical': 'bottom'}}),
# - wrap_text
('white-space: nowrap',
{'alignment': {'wrap_text': False}}),
('white-space: pre',
{'alignment': {'wrap_text': False}}),
('white-space: pre-line',
{'alignment': {'wrap_text': False}}),
('white-space: normal',
{'alignment': {'wrap_text': True}}),
# NUMBER FORMAT
('number-format: 0%',
{'number_format': {'format_code': '0%'}}),
])
def test_css_to_excel(css, expected):
convert = CSSToExcelConverter()
assert expected == convert(css)
def test_css_to_excel_multiple():
convert = CSSToExcelConverter()
actual = convert('''
font-weight: bold;
text-decoration: underline;
color: red;
border-width: thin;
text-align: center;
vertical-align: top;
unused: something;
''')
assert {"font": {"bold": True, "underline": "single", "color": "FF0000"},
"border": {"top": {"style": "thin"},
"right": {"style": "thin"},
"bottom": {"style": "thin"},
"left": {"style": "thin"}},
"alignment": {"horizontal": "center",
"vertical": "top"}} == actual
@pytest.mark.parametrize('css,inherited,expected', [
('font-weight: bold', '',
{'font': {'bold': True}}),
('', 'font-weight: bold',
{'font': {'bold': True}}),
('font-weight: bold', 'font-style: italic',
{'font': {'bold': True, 'italic': True}}),
('font-style: normal', 'font-style: italic',
{'font': {'italic': False}}),
('font-style: inherit', '', {}),
('font-style: normal; font-style: inherit', 'font-style: italic',
{'font': {'italic': True}}),
])
def test_css_to_excel_inherited(css, inherited, expected):
convert = CSSToExcelConverter(inherited)
assert expected == convert(css)
@pytest.mark.parametrize("input_color,output_color", (
[(name, rgb) for name, rgb in CSSToExcelConverter.NAMED_COLORS.items()] +
[("#" + rgb, rgb) for rgb in CSSToExcelConverter.NAMED_COLORS.values()] +
[("#F0F", "FF00FF"), ("#ABC", "AABBCC")])
)
def test_css_to_excel_good_colors(input_color, output_color):
# see gh-18392
css = ("border-top-color: {color}; "
"border-right-color: {color}; "
"border-bottom-color: {color}; "
"border-left-color: {color}; "
"background-color: {color}; "
"color: {color}").format(color=input_color)
expected = dict()
expected["fill"] = {
"patternType": "solid",
"fgColor": output_color
}
expected["font"] = {
"color": output_color
}
expected["border"] = {
k: {
"color": output_color,
} for k in ("top", "right", "bottom", "left")
}
with tm.assert_produces_warning(None):
convert = CSSToExcelConverter()
assert expected == convert(css)
@pytest.mark.parametrize("input_color", [None, "not-a-color"])
def test_css_to_excel_bad_colors(input_color):
# see gh-18392
css = ("border-top-color: {color}; "
"border-right-color: {color}; "
"border-bottom-color: {color}; "
"border-left-color: {color}; "
"background-color: {color}; "
"color: {color}").format(color=input_color)
expected = dict()
if input_color is not None:
expected["fill"] = {
"patternType": "solid"
}
with tm.assert_produces_warning(CSSWarning):
convert = CSSToExcelConverter()
assert expected == convert(css)
| bsd-3-clause |
wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/sympy/external/tests/test_importtools.py | 91 | 1215 | from sympy.external import import_module
# fixes issue that arose in addressing issue 6533
def test_no_stdlib_collections():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections2():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections3():
'''make sure we get the right collections with no catch'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0')
if matplotlib:
assert collections != matplotlib.collections
| mit |
dsjoerg/esdb | scripts/army_correspondence_2.py | 1 | 2002 | #!/Users/david/local/bin/python
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from scipy.stats import norm
import numpy.lib.recfunctions
import math
leaguenames = ['Bronze', 'Silver', 'Gold', 'Platinum', 'Diamond', 'Master']
snaps = np.load("/Users/David/Dropbox/Daves_Docs/ggtracker/research/s2gs_20130215/snaps.npy")
# snaps = np.loadtxt('/Users/David/Dropbox/Daves_Docs/ggtracker/research/s2gs_20130215/s2gs_stats_2.txt.fixed.interp', dtype=[('match_id','i8'), ('gateway','S3'), ('winner','i1'), ('average_league','i1'), ('summary_league','i1'), ('race1','S1'), ('race2','S1'), ('seconds','i4'), ('army1','i4'), ('army2','i4'), ('income1','i4'), ('income2','i4'), ('duration_seconds','i4')])
print "Input file loaded"
# trueleague = np.where(snaps['summary_league'] > -1, snaps['summary_league'], snaps['average_league'])
# snaps = np.lib.recfunctions.append_fields(snaps, 'trueleague', trueleague, 'i1')
subsnap = dict()
for second in np.arange(5,21) * 60:
subsnap[second] = snaps[snaps['seconds'] == second]
print "Subsnaps arranged"
plt.rc('font', size=5)
for second in np.arange(5,21) * 60:
print "Doing {}".format(second)
plt.figure(figsize=(8,6), dpi=100)
fignum=1
for league in range(0,6):
plt.subplot(2,3,fignum)
fignum=fignum+1
leaguesnap = subsnap[second]['trueleague'] == league
xdesc = sp.stats.describe(subsnap[second][leaguesnap]['army1'])
ydesc = sp.stats.describe(subsnap[second][leaguesnap]['army1'])
xmin = 0
ymin = 0
xmax = xdesc[2] + 3.0 * math.sqrt(xdesc[3])
ymax = ydesc[2] + 3.0 * math.sqrt(ydesc[3])
plt.hexbin(subsnap[second][leaguesnap]['army1'], subsnap[second][leaguesnap]['army2'], cmap=plt.cm.YlOrRd_r, gridsize=30, extent=(0,xmax,0,ymax))
plt.title(leaguenames[league])
plt.savefig("army_correspondence_{}.pdf".format(second))
| gpl-3.0 |
OAkyildiz/dataset_tools | tile_maker.py | 1 | 2041 | #!/usr/bin/env python3
from os import listdir, makedirs, getcwd
from os.path import join, splitext
from sklearn.feature_extraction.image import extract_patches_2d
import cv2
import numpy as np
def main():
fldrs=[]
images=[]
classes={}
data_dir = None
data_fldr = None
# def imgs_from_fldr():
while not data_dir:
try:
data_dir = join(getcwd(), input('path for dataset folders: '))
fldrs=listdir(data_dir)
#eliminate non-folder
#remove non files in a trivial way
fldrs=[ elm for elm in fldrs if "." not in elm ]
except (FileNotFoundError, NotADirectoryError) as edir:
print("Invalid path")
data_dir=None
if not fldrs:
print("Empty directory")
data_dir=None
print('In ',data_dir)
while not data_fldr:
try:
idx=int(input('Choose from (0-%d): \n'%(len(fldrs)-1) + str(fldrs)+'\n'))
data_fldr=fldrs[idx]
except (IndexError, NotADirectoryError, TypeError) as ei:
print('Invalid entry')
data_fldr = None
full_path=join(data_dir, data_fldr +'/originals')
# return full_path
#def main():
hw=int(input('patch size: '))
N=int(input('number of pathces: '))
#images=listdir(imgs_from_fldr())
images=listdir(full_path)
size = (hw, hw) #patch_size
sizetxt = str(size[0])+str(size[1])
save_path=join(full_path, '../'+input('Please name your patching attempt: '))
print(save_path)
makedirs(save_path, exist_ok=True)
rand = np.random.RandomState(0)
for img_name in images:
img=cv2.imread(join(full_path, img_name))
print(img_name)
###### ACTUAL STUFF #######
#Alternatively: tf.extract_image_patches
data = extract_patches_2d(img, size, max_patches=N, random_state=rand)
###########################
name_split=splitext(img_name)
count=0
for patch in data:
target=save_path+'/'+name_split[0]+'_'+ sizetxt +'_'+str(count)+'.jpg'
print(target)
cv2.imwrite(target, patch)
count+=1
print(len(data))
if __name__ == "__main__":
exit(main())
| mit |
Achuth17/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
liulohua/ml_tutorial | abalone_example/svm_classification.py | 1 | 1543 | import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
import os
# init parameters
root_data_dir = 'abalone_data'
train_file = os.path.join(root_data_dir, 'abalone_train.csv')
test_file = os.path.join(root_data_dir, 'abalone_test.csv')
# test_file = os.path.join(root_data_dir, 'abalone_predict.csv')
col_names = ['length', 'diameter', 'height',
'whole_weight', 'shucked_weight',
'viscera_weight', 'shell_weight', 'age']
# load data
df_train = pd.read_csv(train_file, names=col_names)
df_test = pd.read_csv(test_file, names=col_names)
train_data = df_train.as_matrix()
test_data = df_test.as_matrix()
features_train, targets_train = train_data[:, :-1], train_data[:, -1]
features_test, targets_test = test_data[:, :-1], test_data[:, -1]
# train ExtTreesRegressor
# classifier = ExtraTreesRegressor(n_estimators=500,
# max_depth=20,
# min_samples_split=5,
# verbose=2)
# train NaiveGaussianBayes
# classifier = GaussianNB()
# train svm
classifier = svm.SVR()
classifier.fit(features_train , targets_train)
preds = classifier.predict(features_test).astype(np.int32)
print([(pred, target) for pred, target in zip(preds, targets_test.astype(np.int32))])
| apache-2.0 |
Ambrosys/glyph | glyph/observer.py | 1 | 2678 | import logging
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
logger = logging.getLogger(__name__)
EMPTY = np.array(1)
def get_limits(x, factor=1.1):
"""Calculates the plot range given an array x."""
avg = np.nanmean(x)
range_ = np.nanmax(x) - np.nanmin(x)
if range_ == 0:
range_ = 0.5
return avg - range_ / 2 * factor, avg + range_ / 2 * factor
class ProgressObserver(object): # pragma: no cover
def __init__(self):
"""Animates the progress of the evolutionary optimization.
Note:
Uses matplotlib's interactive mode.
"""
logger.debug("The ProgressObserver needs an interactive matplotlib backend.")
logger.debug(f"Using {matplotlib.rcParams['backend']} as backend in matplotlib.")
logger.debug("Try export MPLBACKEND='TkAgg'")
plt.ion()
self.fig = None
self.axis = None
self.lines = []
self.t_key = "gen"
@staticmethod
def _update_plt(ax, line, *data):
x, y = data
x = np.array(x)
y = np.array(y)
ax.relim()
ax.autoscale_view()
line.set_xdata(x)
line.set_ydata(y)
def _blank_canvas(self, chapters):
self.fig, self.axes = plt.subplots(nrows=len(chapters) + 1)
for c, ax in zip(chapters, self.axes):
ax.set_xlabel(self.t_key)
ax.set_ylabel(c)
ax.set_title("Best " + c)
(line,) = ax.plot(EMPTY, EMPTY)
self.lines.append(line)
(line,) = self.axes[-1].plot(EMPTY, EMPTY, "o-")
self.axes[-1].set_xlabel(chapters[0])
self.axes[-1].set_ylabel(chapters[1])
self.axes[-1].set_title("Pareto Front")
self.lines.append(line)
plt.tight_layout()
def __call__(self, app):
"""
Note:
To be used as a callback in :class:`glyph.application.Application`.
Needs an interactive mpl backend.
Args:
app (glyph.application.Application)
"""
# see also https://github.com/matplotlib/matplotlib/issues/7886
if not matplotlib.is_interactive():
return
chapters = sorted(app.logbook.chapters.keys())
if self.fig is None:
self._blank_canvas(chapters)
t = app.logbook.select(self.t_key)
for c, ax, line in zip(chapters, self.axes, self.lines):
self._update_plt(ax, line, t, app.logbook.chapters[c].select("min"))
x, y = zip(*sorted([i.fitness.values for i in app.gp_runner.pareto_front]))
self._update_plt(self.axes[-1], self.lines[-1], x, y)
self.fig.canvas.draw()
| lgpl-3.0 |
kazemakase/scikit-learn | benchmarks/bench_covertype.py | 154 | 7296 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
ngoix/OCRF | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
stober/gridworld | src/boyan.py | 1 | 1624 | #! /usr/bin/env python
"""
Author: Jeremy M. Stober
Program: BOYAN_EXAMPLE.PY
Date: Tuesday, January 12 2010
Description: Simple MDP from Boyan 2002.
"""
import os, sys, getopt, pdb, string
import numpy as np
import numpy.random as npr
import random as pr
import numpy.linalg as la
import matplotlib as mpl
from markovdp import MP
class Boyan(MP):
def __init__(self):
self.nfeatures = 4
self.statefeatures = np.array([[0,0,0,1],[0,0,0.25,0.75], [0,0,0.5,0.5],[0,0,0.75,0.25],
[0,0,1,0],[0,0.25,0.75,0], [0,0.5,0.5,0],[0,0.75,0.25,0],
[0,1,0,0],[0.25,0.75,0,0], [0.5,0.5,0,0],[0.75,0.25,0,0],
[1,0,0,0]], dtype=float)
self.actionfeatures = np.array([[0,1],[1,0]], dtype = float)
self.endstates = [0]
MP.__init__(self, nstates = 13)
def terminal(self, state):
return state == 0
def initialize_model(self, a, i, j):
if i == 1 and j == 0:
return 1.0
elif i - 1 == j:
return 0.5
elif i - 2 == j:
return 0.5
elif i == 0 and j == 0:
return 1.0
else:
return 0.0
def initialize_rewards(self, a, i, j):
if i == 1 and j == 0:
return -2.0
elif i - 1 == j:
return -3.0
elif i - 2 == j:
return -3.0
else:
return 0.0
def vphi(self, state):
if self.terminal(state):
return np.zeros(4)
else:
return self.statefeatures[state]
| bsd-2-clause |
cbertinato/pandas | pandas/core/arrays/_ranges.py | 1 | 7335 | """
Helper functions to generate range-like data for DatetimeArray
(and possibly TimedeltaArray/PeriodArray)
"""
from typing import Tuple
import numpy as np
from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp
from pandas.tseries.offsets import DateOffset, Tick, generate_range
def generate_regular_range(start: Timestamp,
end: Timestamp,
periods: int,
freq: DateOffset) -> Tuple[np.ndarray, str]:
"""
Generate a range of dates with the spans between dates described by
the given `freq` DateOffset.
Parameters
----------
start : Timestamp or None
first point of produced date range
end : Timestamp or None
last point of produced date range
periods : int
number of periods in produced date range
freq : DateOffset
describes space between dates in produced date range
Returns
-------
ndarray[np.int64] representing nanosecond unix timestamps
"""
if isinstance(freq, Tick):
stride = freq.nanos
if periods is None:
b = Timestamp(start).value
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = (b + (Timestamp(end).value - b) // stride * stride +
stride // 2 + 1)
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
b = Timestamp(start).value
e = _generate_range_overflow_safe(b, periods, stride, side='start')
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
b = _generate_range_overflow_safe(e, periods, stride, side='end')
tz = end.tz
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
with np.errstate(over="raise"):
# If the range is sufficiently large, np.arange may overflow
# and incorrectly return an empty array if not caught.
try:
values = np.arange(b, e, stride, dtype=np.int64)
except FloatingPointError:
xdr = [b]
while xdr[-1] != e:
xdr.append(xdr[-1] + stride)
values = np.array(xdr[:-1], dtype=np.int64)
else:
tz = None
# start and end should have the same timezone by this point
if start is not None:
tz = start.tz
elif end is not None:
tz = end.tz
xdr = generate_range(start=start, end=end,
periods=periods, offset=freq)
values = np.array([x.value for x in xdr], dtype=np.int64)
return values, tz
def _generate_range_overflow_safe(endpoint: int,
periods: int,
stride: int,
side: str = 'start') -> int:
"""
Calculate the second endpoint for passing to np.arange, checking
to avoid an integer overflow. Catch OverflowError and re-raise
as OutOfBoundsDatetime.
Parameters
----------
endpoint : int
nanosecond timestamp of the known endpoint of the desired range
periods : int
number of periods in the desired range
stride : int
nanoseconds between periods in the desired range
side : {'start', 'end'}
which end of the range `endpoint` refers to
Returns
-------
other_end : int
Raises
------
OutOfBoundsDatetime
"""
# GH#14187 raise instead of incorrectly wrapping around
assert side in ['start', 'end']
i64max = np.uint64(np.iinfo(np.int64).max)
msg = ('Cannot generate range with {side}={endpoint} and '
'periods={periods}'
.format(side=side, endpoint=endpoint, periods=periods))
with np.errstate(over="raise"):
# if periods * strides cannot be multiplied within the *uint64* bounds,
# we cannot salvage the operation by recursing, so raise
try:
addend = np.uint64(periods) * np.uint64(np.abs(stride))
except FloatingPointError:
raise OutOfBoundsDatetime(msg)
if np.abs(addend) <= i64max:
# relatively easy case without casting concerns
return _generate_range_overflow_safe_signed(
endpoint, periods, stride, side)
elif ((endpoint > 0 and side == 'start' and stride > 0) or
(endpoint < 0 and side == 'end' and stride > 0)):
# no chance of not-overflowing
raise OutOfBoundsDatetime(msg)
elif (side == 'end' and endpoint > i64max and endpoint - stride <= i64max):
# in _generate_regular_range we added `stride` thereby overflowing
# the bounds. Adjust to fix this.
return _generate_range_overflow_safe(endpoint - stride,
periods - 1, stride, side)
# split into smaller pieces
mid_periods = periods // 2
remaining = periods - mid_periods
assert 0 < remaining < periods, (remaining, periods, endpoint, stride)
midpoint = _generate_range_overflow_safe(endpoint, mid_periods,
stride, side)
return _generate_range_overflow_safe(midpoint, remaining, stride, side)
def _generate_range_overflow_safe_signed(endpoint: int,
periods: int,
stride: int,
side: str) -> int:
"""
A special case for _generate_range_overflow_safe where `periods * stride`
can be calculated without overflowing int64 bounds.
"""
assert side in ['start', 'end']
if side == 'end':
stride *= -1
with np.errstate(over="raise"):
addend = np.int64(periods) * np.int64(stride)
try:
# easy case with no overflows
return np.int64(endpoint) + addend
except (FloatingPointError, OverflowError):
# with endpoint negative and addend positive we risk
# FloatingPointError; with reversed signed we risk OverflowError
pass
# if stride and endpoint had opposite signs, then endpoint + addend
# should never overflow. so they must have the same signs
assert (stride > 0 and endpoint >= 0) or (stride < 0 and endpoint <= 0)
if stride > 0:
# watch out for very special case in which we just slightly
# exceed implementation bounds, but when passing the result to
# np.arange will get a result slightly within the bounds
assert endpoint >= 0
result = np.uint64(endpoint) + np.uint64(addend)
i64max = np.uint64(np.iinfo(np.int64).max)
assert result > i64max
if result <= i64max + np.uint64(stride):
return result
raise OutOfBoundsDatetime('Cannot generate range with '
'{side}={endpoint} and '
'periods={periods}'
.format(side=side, endpoint=endpoint,
periods=periods))
| bsd-3-clause |
sjsrey/pysal_core | pysal_core/examples/__init__.py | 2 | 2352 | import os
import _version
base = os.path.abspath(os.path.dirname(_version.__file__))
__all__ = ['get_path', 'available', 'explain']
file_2_dir = {}
example_dir = base
dirs = []
for root, subdirs, files in os.walk(example_dir, topdown=False):
for f in files:
file_2_dir[f] = root
head, tail = os.path.split(root)
if tail != 'examples':
dirs.append(tail)
def get_path(example_name):
"""
Get path of example folders
"""
if type(example_name) != str:
try:
example_name = str(example_name)
except:
raise KeyError('Cannot coerce requested example name to string')
if example_name in dirs:
return os.path.join(example_dir, example_name, example_name)
elif example_name in file_2_dir:
d = file_2_dir[example_name]
return os.path.join(d, example_name)
elif example_name == "":
return os.path.join(base, 'examples', example_name)
else:
raise KeyError(example_name + ' not found in PySAL built-in examples.')
def available(verbose=False):
"""
List available datasets
"""
examples = [os.path.join(base, d) for d in dirs]
if not verbose:
return [os.path.split(d)[-1] for d in examples]
examples = [os.path.join(dty, 'README.md') for dty in examples]
descs = [_read_example(path) for path in examples]
return [{desc['name']:desc['description'] for desc in descs}]
def _read_example(pth):
try:
with open(pth, 'r') as io:
title = io.readline().strip('\n')
io.readline() # titling
io.readline() # pad
short = io.readline().strip('\n')
io.readline() # subtitling
io.readline() # pad
rest = io.readlines()
rest = [l.strip('\n') for l in rest if l.strip('\n') != '']
d = {'name': title, 'description': short, 'explanation': rest}
except IOError:
basename = os.path.split(pth)[-2]
dirname = os.path.split(basename)[-1]
d = {'name': dirname, 'description': None, 'explanation': None}
return d
def explain(name): # would be nice to use pandas for display here
"""
Explain a dataset by name
"""
path = os.path.split(get_path(name))[0]
fpath = os.path.join(path, 'README.md')
return _read_example(fpath)
| bsd-3-clause |
OshynSong/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
AIML/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 22 | 16769 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LogisticRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LogisticRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
| bsd-3-clause |
precedenceguo/mxnet | example/bayesian-methods/bdk_demo.py | 45 | 15837 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import logging
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import argparse
from algos import *
from data_loader import *
from utils import *
class CrossEntropySoftmax(mx.operator.NumpyOp):
def __init__(self):
super(CrossEntropySoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (y - l)
class LogSoftmax(mx.operator.NumpyOp):
def __init__(self):
super(LogSoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = (x - x.max(axis=1, keepdims=True)).astype('float32')
y -= numpy.log(numpy.exp(y).sum(axis=1, keepdims=True)).astype('float32')
# y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
# y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (numpy.exp(y) - l).astype('float32')
def classification_student_grad(student_outputs, teacher_pred):
return [student_outputs[0] - teacher_pred]
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
student_mean = student_outputs[0]
student_var = student_outputs[1]
grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)
grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
+ 1.0 / teacher_noise_precision)) / 2
return [grad_mean, grad_var]
def get_mnist_sym(output_op=None, num_hidden=400):
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='mnist_fc1', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc2', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu2', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc3', num_hidden=10)
if output_op is None:
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
else:
net = output_op(data=net, name='softmax')
return net
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta2 / v2
grad[:] = grad_npy
return grad
def get_toy_sym(teacher=True, teacher_noise_precision=None):
if teacher:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='teacher_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='teacher_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='teacher_fc2', num_hidden=1)
net = mx.symbol.LinearRegressionOutput(data=net, name='teacher_output',
grad_scale=teacher_noise_precision)
else:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='student_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='student_relu1', act_type="relu")
student_mean = mx.symbol.FullyConnected(data=net, name='student_mean', num_hidden=1)
student_var = mx.symbol.FullyConnected(data=net, name='student_var', num_hidden=1)
net = mx.symbol.Group([student_mean, student_var])
return net
def dev():
return mx.gpu()
def run_mnist_SGD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, exe_params, _ = SGD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
lr=5E-6, prior_precision=1.0, minibatch_size=100)
def run_mnist_SGLD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, sample_pool = SGLD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
learning_rate=4E-6, prior_precision=1.0, minibatch_size=100,
thin_interval=100, burn_in_iter_num=1000)
def run_mnist_DistilledSGLD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
if training_num >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
student_initializer = BiasXavier(factor_type="in", magnitude=1)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num,
student_initializer=student_initializer,
teacher_initializer=teacher_initializer,
student_optimizing_algorithm="adam",
teacher_learning_rate=teacher_learning_rate,
student_learning_rate=student_learning_rate,
teacher_prior_precision=teacher_prior, student_prior_precision=student_prior,
perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev())
def run_toy_SGLD():
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0 / 9.0
net = get_toy_sym(True, teacher_noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
initializer = mx.init.Uniform(0.07)
exe, params, _ = \
SGLD(sym=net, data_inputs=data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=50000,
initializer=initializer,
learning_rate=1E-4,
# lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
prior_precision=0.1,
burn_in_iter_num=1000,
thin_interval=10,
task='regression',
minibatch_size=minibatch_size, dev=dev())
def run_toy_DistilledSGLD():
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0
teacher_net = get_toy_sym(True, teacher_noise_precision)
student_net = get_toy_sym(False)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev())}
# 'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
teacher_initializer = mx.init.Uniform(0.07)
student_initializer = mx.init.Uniform(0.07)
student_grad_f = lambda student_outputs, teacher_pred: \
regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000,
teacher_initializer=teacher_initializer,
student_initializer=student_initializer,
teacher_learning_rate=1E-4, student_learning_rate=0.01,
# teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8),
student_grad_f=student_grad_f,
teacher_prior_precision=0.1, student_prior_precision=0.001,
perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression',
dev=dev())
def run_toy_HMC():
X, Y, X_test, Y_test = load_toy()
minibatch_size = Y.shape[0]
noise_precision = 1 / 9.0
net = get_toy_sym(True, noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
initializer = mx.init.Uniform(0.07)
sample_pool = HMC(net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test,
sample_num=300000, initializer=initializer, prior_precision=1.0,
learning_rate=1E-3, L=10, dev=dev())
def run_synthetic_SGLD():
theta1 = 0
theta2 = 1
sigma1 = numpy.sqrt(10)
sigma2 = 1
sigmax = numpy.sqrt(2)
X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
minibatch_size = 1
total_iter_num = 1000000
lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
factor=0.55)
optimizer = mx.optimizer.create('sgld',
learning_rate=None,
rescale_grad=1.0,
lr_scheduler=lr_scheduler,
wd=0)
updater = mx.optimizer.get_updater(optimizer)
theta = mx.random.normal(0, 1, (2,), mx.cpu())
grad = nd.empty((2,), mx.cpu())
samples = numpy.zeros((2, total_iter_num))
start = time.time()
for i in xrange(total_iter_num):
if (i + 1) % 100000 == 0:
end = time.time()
print("Iter:%d, Time spent: %f" % (i + 1, end - start))
start = time.time()
ind = numpy.random.randint(0, X.shape[0])
synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax, rescale_grad=
X.shape[0] / float(minibatch_size), grad=grad)
updater('theta', grad, theta)
samples[:, i] = theta.asnumpy()
plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
plt.colorbar()
plt.show()
if __name__ == '__main__':
numpy.random.seed(100)
mx.random.seed(100)
parser = argparse.ArgumentParser(
description="Examples in the paper [NIPS2015]Bayesian Dark Knowledge and "
"[ICML2011]Bayesian Learning via Stochastic Gradient Langevin Dynamics")
parser.add_argument("-d", "--dataset", type=int, default=1,
help="Dataset to use. 0 --> TOY, 1 --> MNIST, 2 --> Synthetic Data in "
"the SGLD paper")
parser.add_argument("-l", "--algorithm", type=int, default=2,
help="Type of algorithm to use. 0 --> SGD, 1 --> SGLD, other-->DistilledSGLD")
parser.add_argument("-t", "--training", type=int, default=50000,
help="Number of training samples")
args = parser.parse_args()
training_num = args.training
if args.dataset == 1:
if 0 == args.algorithm:
run_mnist_SGD(training_num)
elif 1 == args.algorithm:
run_mnist_SGLD(training_num)
else:
run_mnist_DistilledSGLD(training_num)
elif args.dataset == 0:
if 1 == args.algorithm:
run_toy_SGLD()
elif 2 == args.algorithm:
run_toy_DistilledSGLD()
elif 3 == args.algorithm:
run_toy_HMC()
else:
run_synthetic_SGLD()
| apache-2.0 |
bthirion/scikit-learn | examples/covariance/plot_outlier_detection.py | 36 | 5023 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
| bsd-3-clause |
jmschrei/scikit-learn | examples/tree/unveil_tree_structure.py | 4 | 4825 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %ss else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
cdcapano/pycbc | pycbc/types/timeseries.py | 1 | 42221 | # Copyright (C) 2014 Tito Dal Canton, Josh Willis, Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Provides a class representing a time series.
"""
from __future__ import division
import os as _os, h5py
from pycbc.types.array import Array, _convert, complex_same_precision_as, zeros
from pycbc.types.array import _nocomplex
from pycbc.types.frequencyseries import FrequencySeries
import lal as _lal
import numpy as _numpy
from scipy.io.wavfile import write as write_wav
class TimeSeries(Array):
"""Models a time series consisting of uniformly sampled scalar values.
Parameters
----------
initial_array : array-like
Array containing sampled data.
delta_t : float
Time between consecutive samples in seconds.
epoch : {None, lal.LIGOTimeGPS}, optional
Time of the first sample in seconds.
dtype : {None, data-type}, optional
Sample data type.
copy : boolean, optional
If True, samples are copied to a new array.
Attributes
----------
delta_t
duration
start_time
end_time
sample_times
sample_rate
"""
def __init__(self, initial_array, delta_t=None,
epoch=None, dtype=None, copy=True):
if len(initial_array) < 1:
raise ValueError('initial_array must contain at least one sample.')
if delta_t is None:
try:
delta_t = initial_array.delta_t
except AttributeError:
raise TypeError('must provide either an initial_array with a delta_t attribute, or a value for delta_t')
if not delta_t > 0:
raise ValueError('delta_t must be a positive number')
# Get epoch from initial_array if epoch not given (or is None)
# If initialy array has no epoch, set epoch to 0.
# If epoch is provided, use that.
if not isinstance(epoch, _lal.LIGOTimeGPS):
if epoch is None:
if isinstance(initial_array, TimeSeries):
epoch = initial_array._epoch
else:
epoch = _lal.LIGOTimeGPS(0)
elif epoch is not None:
try:
epoch = _lal.LIGOTimeGPS(epoch)
except:
raise TypeError('epoch must be either None or a lal.LIGOTimeGPS')
Array.__init__(self, initial_array, dtype=dtype, copy=copy)
self._delta_t = delta_t
self._epoch = epoch
def epoch_close(self, other):
""" Check if the epoch is close enough to allow operations """
dt = abs(float(self.start_time - other.start_time))
return dt <= 1e-7
def sample_rate_close(self, other):
""" Check if the sample rate is close enough to allow operations """
# compare our delta_t either to a another time series' or
# to a given sample rate (float)
if isinstance(other, TimeSeries):
odelta_t = other.delta_t
else:
odelta_t = 1.0/other
if (odelta_t - self.delta_t) / self.delta_t > 1e-4:
return False
if abs(1 - odelta_t / self.delta_t) * len(self) > 0.5:
return False
return True
def _return(self, ary):
return TimeSeries(ary, self._delta_t, epoch=self._epoch, copy=False)
def _typecheck(self, other):
if isinstance(other, TimeSeries):
if not self.sample_rate_close(other):
raise ValueError('different delta_t, {} vs {}'.format(
self.delta_t, other.delta_t))
if not self.epoch_close(other):
raise ValueError('different epoch, {} vs {}'.format(
self.start_time, other.start_time))
def _getslice(self, index):
# Set the new epoch---note that index.start may also be None
if index.start is None:
new_epoch = self._epoch
else:
if index.start < 0:
raise ValueError(('Negative start index ({})'
' not supported').format(index.start))
new_epoch = self._epoch + index.start * self._delta_t
if index.step is not None:
new_delta_t = self._delta_t * index.step
else:
new_delta_t = self._delta_t
return TimeSeries(Array._getslice(self, index), new_delta_t,
new_epoch, copy=False)
def prepend_zeros(self, num):
"""Prepend num zeros onto the beginning of this TimeSeries. Update also
epoch to include this prepending.
"""
self.resize(len(self) + num)
self.roll(num)
self._epoch = self._epoch - num * self._delta_t
def append_zeros(self, num):
"""Append num zeros onto the end of this TimeSeries.
"""
self.resize(len(self) + num)
def get_delta_t(self):
"""Return time between consecutive samples in seconds.
"""
return self._delta_t
delta_t = property(get_delta_t,
doc="Time between consecutive samples in seconds.")
def get_duration(self):
"""Return duration of time series in seconds.
"""
return len(self) * self._delta_t
duration = property(get_duration,
doc="Duration of time series in seconds.")
def get_sample_rate(self):
"""Return the sample rate of the time series.
"""
return 1.0/self.delta_t
sample_rate = property(get_sample_rate,
doc="The sample rate of the time series.")
def time_slice(self, start, end, mode='floor'):
"""Return the slice of the time series that contains the time range
in GPS seconds.
"""
if start < self.start_time:
raise ValueError('Time series does not contain a time as early as %s' % start)
if end > self.end_time:
raise ValueError('Time series does not contain a time as late as %s' % end)
start_idx = float(start - self.start_time) * self.sample_rate
end_idx = float(end - self.start_time) * self.sample_rate
if _numpy.isclose(start_idx, round(start_idx)):
start_idx = round(start_idx)
if _numpy.isclose(end_idx, round(end_idx)):
end_idx = round(end_idx)
if mode == 'floor':
start_idx = int(start_idx)
end_idx = int(end_idx)
elif mode == 'nearest':
start_idx = int(round(start_idx))
end_idx = int(round(end_idx))
else:
raise ValueError("Invalid mode: {}".format(mode))
return self[start_idx:end_idx]
@property
def delta_f(self):
"""Return the delta_f this ts would have in the frequency domain
"""
return 1.0 / self.duration
@property
def start_time(self):
"""Return time series start time as a LIGOTimeGPS.
"""
return self._epoch
@start_time.setter
def start_time(self, time):
""" Set the start time
"""
self._epoch = _lal.LIGOTimeGPS(time)
def get_end_time(self):
"""Return time series end time as a LIGOTimeGPS.
"""
return self._epoch + self.get_duration()
end_time = property(get_end_time,
doc="Time series end time as a LIGOTimeGPS.")
def get_sample_times(self):
"""Return an Array containing the sample times.
"""
if self._epoch is None:
return Array(range(len(self))) * self._delta_t
else:
return Array(range(len(self))) * self._delta_t + float(self._epoch)
sample_times = property(get_sample_times,
doc="Array containing the sample times.")
def at_time(self, time, nearest_sample=False):
""" Return the value at the specified gps time
"""
if nearest_sample:
time += self.delta_t / 2.0
return self[int((time-self.start_time)*self.sample_rate)]
def __eq__(self,other):
"""
This is the Python special method invoked whenever the '=='
comparison is used. It will return true if the data of two
time series are identical, and all of the numeric meta-data
are identical, irrespective of whether or not the two
instances live in the same memory (for that comparison, the
Python statement 'a is b' should be used instead).
Thus, this method returns 'True' if the types of both 'self'
and 'other' are identical, as well as their lengths, dtypes,
epochs, delta_ts and the data in the arrays, element by element.
It will always do the comparison on the CPU, but will *not* move
either object to the CPU if it is not already there, nor change
the scheme of either object. It is possible to compare a CPU
object to a GPU object, and the comparison should be true if the
data and meta-data of the two objects are the same.
Note in particular that this function returns a single boolean,
and not an array of booleans as Numpy does. If the numpy
behavior is instead desired it can be obtained using the numpy()
method of the PyCBC type to get a numpy instance from each
object, and invoking '==' on those two instances.
Parameters
----------
other: another Python object, that should be tested for equality
with 'self'.
Returns
-------
boolean: 'True' if the types, dtypes, lengths, epochs, delta_ts
and data of the two objects are each identical.
"""
if super(TimeSeries,self).__eq__(other):
return (self._epoch == other._epoch and self._delta_t == other._delta_t)
else:
return False
def almost_equal_elem(self,other,tol,relative=True,dtol=0.0):
"""
Compare whether two time series are almost equal, element
by element.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(self[i]-other[i]) <= tol*abs(self[i])
for all elements of the series.
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(self[i]-other[i]) <= tol
for all elements of the series.
The method also checks that self.delta_t is within 'dtol' of
other.delta_t; if 'dtol' has its default value of 0 then exact
equality between the two is required.
Other meta-data (type, dtype, length, and epoch) must be exactly
equal. If either object's memory lives on the GPU it will be
copied to the CPU for the comparison, which may be slow. But the
original object itself will not have its memory relocated nor
scheme changed.
Parameters
----------
other: another Python object, that should be tested for
almost-equality with 'self', element-by-element.
tol: a non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative: A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
dtol: a non-negative number, the tolerance for delta_t. Like 'tol',
it is interpreted as relative or absolute based on the value of
'relative'. This parameter defaults to zero, enforcing exact
equality between the delta_t values of the two TimeSeries.
Returns
-------
boolean: 'True' if the data and delta_ts agree within the tolerance,
as interpreted by the 'relative' keyword, and if the types,
lengths, dtypes, and epochs are exactly the same.
"""
# Check that the delta_t tolerance is non-negative; raise an exception
# if needed.
if (dtol < 0.0):
raise ValueError("Tolerance in delta_t cannot be negative")
if super(TimeSeries,self).almost_equal_elem(other,tol=tol,relative=relative):
if relative:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol*self._delta_t)
else:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol)
else:
return False
def almost_equal_norm(self,other,tol,relative=True,dtol=0.0):
"""
Compare whether two time series are almost equal, normwise.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(norm(self-other)) <= tol*abs(norm(self)).
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(norm(self-other)) <= tol
The method also checks that self.delta_t is within 'dtol' of
other.delta_t; if 'dtol' has its default value of 0 then exact
equality between the two is required.
Other meta-data (type, dtype, length, and epoch) must be exactly
equal. If either object's memory lives on the GPU it will be
copied to the CPU for the comparison, which may be slow. But the
original object itself will not have its memory relocated nor
scheme changed.
Parameters
----------
other: another Python object, that should be tested for
almost-equality with 'self', based on their norms.
tol: a non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative: A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
dtol: a non-negative number, the tolerance for delta_t. Like 'tol',
it is interpreted as relative or absolute based on the value of
'relative'. This parameter defaults to zero, enforcing exact
equality between the delta_t values of the two TimeSeries.
Returns
-------
boolean: 'True' if the data and delta_ts agree within the tolerance,
as interpreted by the 'relative' keyword, and if the types,
lengths, dtypes, and epochs are exactly the same.
"""
# Check that the delta_t tolerance is non-negative; raise an exception
# if needed.
if (dtol < 0.0):
raise ValueError("Tolerance in delta_t cannot be negative")
if super(TimeSeries,self).almost_equal_norm(other,tol=tol,relative=relative):
if relative:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol*self._delta_t)
else:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol)
else:
return False
@_convert
def lal(self):
"""Produces a LAL time series object equivalent to self.
Returns
-------
lal_data : {lal.*TimeSeries}
LAL time series object containing the same data as self.
The actual type depends on the sample's dtype. If the epoch of
self is 'None', the epoch of the returned LAL object will be
LIGOTimeGPS(0,0); otherwise, the same as that of self.
Raises
------
TypeError
If time series is stored in GPU memory.
"""
lal_data = None
ep = self._epoch
if self._data.dtype == _numpy.float32:
lal_data = _lal.CreateREAL4TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.float64:
lal_data = _lal.CreateREAL8TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.complex64:
lal_data = _lal.CreateCOMPLEX8TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.complex128:
lal_data = _lal.CreateCOMPLEX16TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
lal_data.data.data[:] = self.numpy()
return lal_data
def crop(self, left, right):
""" Remove given seconds from either end of time series
Parameters
----------
left : float
Number of seconds of data to remove from the left of the time series.
right : float
Number of seconds of data to remove from the right of the time series.
Returns
-------
cropped : pycbc.types.TimeSeries
The reduced time series
"""
if left + right > self.duration:
raise ValueError('Cannot crop more data than we have')
s = int(left * self.sample_rate)
e = len(self) - int(right * self.sample_rate)
return self[s:e]
def save_to_wav(self, file_name):
""" Save this time series to a wav format audio file.
Parameters
----------
file_name : string
The output file name
"""
scaled = _numpy.int16(self.numpy()/max(abs(self)) * 32767)
write_wav(file_name, int(self.sample_rate), scaled)
def psd(self, segment_duration, **kwds):
""" Calculate the power spectral density of this time series.
Use the `pycbc.psd.welch` method to estimate the psd of this time segment.
For more complete options, please see that function.
Parameters
----------
segment_duration: float
Duration in seconds to use for each sample of the spectrum.
kwds : keywords
Additional keyword arguments are passed on to the `pycbc.psd.welch` method.
Returns
-------
psd : FrequencySeries
Frequency series containing the estimated PSD.
"""
from pycbc.psd import welch
seg_len = int(segment_duration * self.sample_rate)
seg_stride = int(seg_len / 2)
return welch(self, seg_len=seg_len,
seg_stride=seg_stride,
**kwds)
def gate(self, time, window=0.25, method='taper', copy=True,
taper_width=0.25, invpsd=None):
""" Gate out portion of time series
Parameters
----------
time: float
Central time of the gate in seconds
window: float
Half-length in seconds to remove data around gate time.
method: str
Method to apply gate, options are 'hard', 'taper', and 'paint'.
copy: bool
If False, do operations inplace to this time series, else return
new time series.
taper_width: float
Length of tapering region on either side of excized data. Only
applies to the taper gating method.
invpsd: pycbc.types.FrequencySeries
The inverse PSD to use for painting method. If not given,
a PSD is generated using default settings.
Returns
-------
data: pycbc.types.TimeSeris
Gated time series
"""
data = self.copy() if copy else self
if method == 'taper':
from pycbc.strain import gate_data
return gate_data(data, [(time, window, taper_width)])
elif method == 'paint':
# Uses the hole-filling method of
# https://arxiv.org/pdf/1908.05644.pdf
from pycbc.strain.gate import gate_and_paint
if invpsd is None:
# These are some bare minimum settings, normally you
# should probably provide a psd
invpsd = 1. / self.filter_psd(self.duration/32, self.delta_f, 0)
lindex = int((time - window - self.start_time) / self.delta_t)
rindex = lindex + int(2 * window / self.delta_t)
lindex = lindex if lindex >= 0 else 0
rindex = rindex if rindex <= len(self) else len(self)
return gate_and_paint(data, lindex, rindex, invpsd, copy=False)
elif method == 'hard':
tslice = data.time_slice(time - window, time + window)
tslice[:] = 0
return data
else:
raise ValueError('Invalid method name: {}'.format(method))
def filter_psd(self, segment_duration, delta_f, flow):
""" Calculate the power spectral density of this time series.
Use the `pycbc.psd.welch` method to estimate the psd of this time segment.
The psd is then truncated in the time domain to the segment duration
and interpolated to the requested sample frequency.
Parameters
----------
segment_duration: float
Duration in seconds to use for each sample of the spectrum.
delta_f : float
Frequency spacing to return psd at.
flow : float
The low frequency cutoff to apply when truncating the inverse
spectrum.
Returns
-------
psd : FrequencySeries
Frequency series containing the estimated PSD.
"""
from pycbc.psd import interpolate, inverse_spectrum_truncation
p = self.psd(segment_duration)
samples = int(p.sample_rate * segment_duration)
p = interpolate(p, delta_f)
return inverse_spectrum_truncation(p, samples,
low_frequency_cutoff=flow,
trunc_method='hann')
def whiten(self, segment_duration, max_filter_duration, trunc_method='hann',
remove_corrupted=True, low_frequency_cutoff=None,
return_psd=False, **kwds):
""" Return a whitened time series
Parameters
----------
segment_duration: float
Duration in seconds to use for each sample of the spectrum.
max_filter_duration : int
Maximum length of the time-domain filter in seconds.
trunc_method : {None, 'hann'}
Function used for truncating the time-domain filter.
None produces a hard truncation at `max_filter_len`.
remove_corrupted : {True, boolean}
If True, the region of the time series corrupted by the whitening
is excised before returning. If false, the corrupted regions
are not excised and the full time series is returned.
low_frequency_cutoff : {None, float}
Low frequency cutoff to pass to the inverse spectrum truncation.
This should be matched to a known low frequency cutoff of the
data if there is one.
return_psd : {False, Boolean}
Return the estimated and conditioned PSD that was used to whiten
the data.
kwds : keywords
Additional keyword arguments are passed on to the `pycbc.psd.welch` method.
Returns
-------
whitened_data : TimeSeries
The whitened time series
"""
from pycbc.psd import inverse_spectrum_truncation, interpolate
# Estimate the noise spectrum
psd = self.psd(segment_duration, **kwds)
psd = interpolate(psd, self.delta_f)
max_filter_len = int(max_filter_duration * self.sample_rate)
# Interpolate and smooth to the desired corruption length
psd = inverse_spectrum_truncation(psd,
max_filter_len=max_filter_len,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method=trunc_method)
# Whiten the data by the asd
white = (self.to_frequencyseries() / psd**0.5).to_timeseries()
if remove_corrupted:
white = white[int(max_filter_len/2):int(len(self)-max_filter_len/2)]
if return_psd:
return white, psd
return white
def qtransform(self, delta_t=None, delta_f=None, logfsteps=None,
frange=None, qrange=(4,64), mismatch=0.2, return_complex=False):
""" Return the interpolated 2d qtransform of this data
Parameters
----------
delta_t : {self.delta_t, float}
The time resolution to interpolate to
delta_f : float, Optional
The frequency resolution to interpolate to
logfsteps : int
Do a log interpolation (incompatible with delta_f option) and set
the number of steps to take.
frange : {(30, nyquist*0.8), tuple of ints}
frequency range
qrange : {(4, 64), tuple}
q range
mismatch : float
Mismatch between frequency tiles
return_complex: {False, bool}
return the raw complex series instead of the normalized power.
Returns
-------
times : numpy.ndarray
The time that the qtransform is sampled.
freqs : numpy.ndarray
The frequencies that the qtransform is sampled.
qplane : numpy.ndarray (2d)
The two dimensional interpolated qtransform of this time series.
"""
from pycbc.filter.qtransform import qtiling, qplane
from scipy.interpolate import interp2d
if frange is None:
frange = (30, int(self.sample_rate / 2 * 8))
q_base = qtiling(self, qrange, frange, mismatch)
_, times, freqs, q_plane = qplane(q_base, self.to_frequencyseries(),
return_complex=return_complex)
if logfsteps and delta_f:
raise ValueError("Provide only one (or none) of delta_f and logfsteps")
# Interpolate if requested
if delta_f or delta_t or logfsteps:
if return_complex:
interp_amp = interp2d(times, freqs, abs(q_plane))
interp_phase = interp2d(times, freqs, _numpy.angle(q_plane))
else:
interp = interp2d(times, freqs, q_plane)
if delta_t:
times = _numpy.arange(float(self.start_time),
float(self.end_time), delta_t)
if delta_f:
freqs = _numpy.arange(int(frange[0]), int(frange[1]), delta_f)
if logfsteps:
freqs = _numpy.logspace(_numpy.log10(frange[0]),
_numpy.log10(frange[1]),
logfsteps)
if delta_f or delta_t or logfsteps:
if return_complex:
q_plane = _numpy.exp(1.0j * interp_phase(times, freqs))
q_plane *= interp_amp(times, freqs)
else:
q_plane = interp(times, freqs)
return times, freqs, q_plane
def notch_fir(self, f1, f2, order, beta=5.0, remove_corrupted=True):
""" notch filter the time series using an FIR filtered generated from
the ideal response passed through a time-domain kaiser
window (beta = 5.0)
The suppression of the notch filter is related to the bandwidth and
the number of samples in the filter length. For a few Hz bandwidth,
a length corresponding to a few seconds is typically
required to create significant suppression in the notched band.
Parameters
----------
Time Series: TimeSeries
The time series to be notched.
f1: float
The start of the frequency suppression.
f2: float
The end of the frequency suppression.
order: int
Number of corrupted samples on each side of the time series
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation.
"""
from pycbc.filter import notch_fir
ts = notch_fir(self, f1, f2, order, beta=beta)
if remove_corrupted:
ts = ts[order:len(ts)-order]
return ts
def lowpass_fir(self, frequency, order, beta=5.0, remove_corrupted=True):
""" Lowpass filter the time series using an FIR filtered generated from
the ideal response passed through a kaiser window (beta = 5.0)
Parameters
----------
Time Series: TimeSeries
The time series to be low-passed.
frequency: float
The frequency below which is suppressed.
order: int
Number of corrupted samples on each side of the time series
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation.
remove_corrupted : {True, boolean}
If True, the region of the time series corrupted by the filtering
is excised before returning. If false, the corrupted regions
are not excised and the full time series is returned.
"""
from pycbc.filter import lowpass_fir
ts = lowpass_fir(self, frequency, order, beta=beta)
if remove_corrupted:
ts = ts[order:len(ts)-order]
return ts
def highpass_fir(self, frequency, order, beta=5.0, remove_corrupted=True):
""" Highpass filter the time series using an FIR filtered generated from
the ideal response passed through a kaiser window (beta = 5.0)
Parameters
----------
Time Series: TimeSeries
The time series to be high-passed.
frequency: float
The frequency below which is suppressed.
order: int
Number of corrupted samples on each side of the time series
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation.
remove_corrupted : {True, boolean}
If True, the region of the time series corrupted by the filtering
is excised before returning. If false, the corrupted regions
are not excised and the full time series is returned.
"""
from pycbc.filter import highpass_fir
ts = highpass_fir(self, frequency, order, beta=beta)
if remove_corrupted:
ts = ts[order:len(ts)-order]
return ts
def fir_zero_filter(self, coeff):
"""Filter the timeseries with a set of FIR coefficients
Parameters
----------
coeff: numpy.ndarray
FIR coefficients. Should be and odd length and symmetric.
Returns
-------
filtered_series: pycbc.types.TimeSeries
Return the filtered timeseries, which has been properly shifted to account
for the FIR filter delay and the corrupted regions zeroed out.
"""
from pycbc.filter import fir_zero_filter
return self._return(fir_zero_filter(coeff, self))
def save(self, path, group = None):
"""
Save time series to a Numpy .npy, hdf, or text file. The first column
contains the sample times, the second contains the values.
In the case of a complex time series saved as text, the imaginary
part is written as a third column. When using hdf format, the data is stored
as a single vector, along with relevant attributes.
Parameters
----------
path: string
Destination file path. Must end with either .hdf, .npy or .txt.
group: string
Additional name for internal storage use. Ex. hdf storage uses
this as the key value.
Raises
------
ValueError
If path does not end in .npy or .txt.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
output = _numpy.vstack((self.sample_times.numpy(), self.numpy())).T
_numpy.save(path, output)
elif ext == '.txt':
if self.kind == 'real':
output = _numpy.vstack((self.sample_times.numpy(),
self.numpy())).T
elif self.kind == 'complex':
output = _numpy.vstack((self.sample_times.numpy(),
self.numpy().real,
self.numpy().imag)).T
_numpy.savetxt(path, output)
elif ext =='.hdf':
key = 'data' if group is None else group
with h5py.File(path, 'a') as f:
ds = f.create_dataset(key, data=self.numpy(),
compression='gzip',
compression_opts=9, shuffle=True)
ds.attrs['start_time'] = float(self.start_time)
ds.attrs['delta_t'] = float(self.delta_t)
else:
raise ValueError('Path must end with .npy, .txt or .hdf')
def to_timeseries(self):
""" Return time series"""
return self
@_nocomplex
def to_frequencyseries(self, delta_f=None):
""" Return the Fourier transform of this time series
Parameters
----------
delta_f : {None, float}, optional
The frequency resolution of the returned frequency series. By
default the resolution is determined by the duration of the timeseries.
Returns
-------
FrequencySeries:
The fourier transform of this time series.
"""
from pycbc.fft import fft
if not delta_f:
delta_f = 1.0 / self.duration
# add 0.5 to round integer
tlen = int(1.0 / delta_f / self.delta_t + 0.5)
flen = int(tlen / 2 + 1)
if tlen < len(self):
raise ValueError("The value of delta_f (%s) would be "
"undersampled. Maximum delta_f "
"is %s." % (delta_f, 1.0 / self.duration))
if not delta_f:
tmp = self
else:
tmp = TimeSeries(zeros(tlen, dtype=self.dtype),
delta_t=self.delta_t, epoch=self.start_time)
tmp[:len(self)] = self[:]
f = FrequencySeries(zeros(flen,
dtype=complex_same_precision_as(self)),
delta_f=delta_f)
fft(tmp, f)
return f
def inject(self, other, copy=True):
"""Return copy of self with other injected into it.
The other vector will be resized and time shifted with sub-sample
precision before adding. This assumes that one can assume zeros
outside of the original vector range.
"""
# only handle equal sample rate for now.
if not self.sample_rate_close(other):
raise ValueError('Sample rate must be the same')
# determine if we want to inject in place or not
if copy:
ts = self.copy()
else:
ts = self
# Other is disjoint
if ((other.start_time >= ts.end_time) or
(ts.start_time > other.end_time)):
return ts
other = other.copy()
dt = float((other.start_time - ts.start_time) * ts.sample_rate)
# This coaligns other to the time stepping of self
if not dt.is_integer():
diff = (dt - _numpy.floor(dt)) * ts.delta_t
# insert zeros at end
other.resize(len(other) + (len(other) + 1) % 2 + 1)
# fd shift to the right
other = other.cyclic_time_shift(diff)
# get indices of other with respect to self
# this is already an integer to floating point precission
left = float(other.start_time - ts.start_time) * ts.sample_rate
left = int(round(left))
right = left + len(other)
oleft = 0
oright = len(other)
# other overhangs on left so truncate
if left < 0:
oleft = -left
left = 0
# other overhangs on right so truncate
if right > len(ts):
oright = len(other) - (right - len(ts))
right = len(ts)
ts[left:right] += other[oleft:oright]
return ts
add_into = inject # maintain backwards compatibility for now
@_nocomplex
def cyclic_time_shift(self, dt):
"""Shift the data and timestamps by a given number of seconds
Shift the data and timestamps in the time domain a given number of
seconds. To just change the time stamps, do ts.start_time += dt.
The time shift may be smaller than the intrinsic sample rate of the data.
Note that data will be cyclically rotated, so if you shift by 2
seconds, the final 2 seconds of your data will now be at the
beginning of the data set.
Parameters
----------
dt : float
Amount of time to shift the vector.
Returns
-------
data : pycbc.types.TimeSeries
The time shifted time series.
"""
# We do this in the frequency domain to allow us to do sub-sample
# time shifts. This also results in the shift being circular. It
# is left to a future update to do a faster impelementation in the case
# where the time shift can be done with an exact number of samples.
return self.to_frequencyseries().cyclic_time_shift(dt).to_timeseries()
def match(self, other, psd=None,
low_frequency_cutoff=None, high_frequency_cutoff=None):
""" Return the match between the two TimeSeries or FrequencySeries.
Return the match between two waveforms. This is equivalent to the overlap
maximized over time and phase. By default, the other vector will be
resized to match self. This may remove high frequency content or the
end of the vector.
Parameters
----------
other : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : Frequency Series
A power spectral density to weight the overlap.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the match.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the match.
Returns
-------
match: float
index: int
The number of samples to shift to get the match.
"""
return self.to_frequencyseries().match(other, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
def detrend(self, type='linear'):
""" Remove linear trend from the data
Remove a linear trend from the data to improve the approximation that
the data is circularly convolved, this helps reduce the size of filter
transients from a circular convolution / filter.
Parameters
----------
type: str
The choice of detrending. The default ('linear') removes a linear
least squares fit. 'constant' removes only the mean of the data.
"""
from scipy.signal import detrend
return self._return(detrend(self.numpy(), type=type))
def plot(self, **kwds):
""" Basic plot of this time series
"""
from matplotlib import pyplot
if self.kind == 'real':
plot = pyplot.plot(self.sample_times, self, **kwds)
return plot
elif self.kind == 'complex':
plot1 = pyplot.plot(self.sample_times, self.real(), **kwds)
plot2 = pyplot.plot(self.sample_times, self.imag(), **kwds)
return plot1, plot2
def load_timeseries(path, group=None):
"""
Load a TimeSeries from a .hdf, .txt or .npy file. The
default data types will be double precision floating point.
Parameters
----------
path: string
source file path. Must end with either .npy or .txt.
group: string
Additional name for internal storage use. Ex. hdf storage uses
this as the key value.
Raises
------
ValueError
If path does not end in .npy or .txt.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
data = _numpy.load(path)
elif ext == '.txt':
data = _numpy.loadtxt(path)
elif ext == '.hdf':
key = 'data' if group is None else group
f = h5py.File(path)
data = f[key][:]
series = TimeSeries(data, delta_t=f[key].attrs['delta_t'],
epoch=f[key].attrs['start_time'])
f.close()
return series
else:
raise ValueError('Path must end with .npy, .hdf, or .txt')
if data.ndim == 2:
delta_t = (data[-1][0] - data[0][0]) / (len(data)-1)
epoch = _lal.LIGOTimeGPS(data[0][0])
return TimeSeries(data[:,1], delta_t=delta_t, epoch=epoch)
elif data.ndim == 3:
delta_t = (data[-1][0] - data[0][0]) / (len(data)-1)
epoch = _lal.LIGOTimeGPS(data[0][0])
return TimeSeries(data[:,1] + 1j*data[:,2],
delta_t=delta_t, epoch=epoch)
else:
raise ValueError('File has %s dimensions, cannot convert to Array, \
must be 2 (real) or 3 (complex)' % data.ndim)
| gpl-3.0 |
AbhiAgarwal/prep | python/guide.py | 1 | 18005 | #!/usr/bin/env python
"""Basic Python Cheat Sheet by Filip Kral on 2015/02/16"""
"""
Python is a cross-platform, interpreted, object-oriented programming language.
That means you can run it on Linux, Windows, Mac, and other platforms,
you don't need to compile your code to execute it because it is compiled on
the fly, and you can use classes and objects.
This cheat sheet summarizes the very basics of Python programming syntax.
It is by no means exhaustive! For more comprehensive yet still compact reference
I recommend Python Pocket Reference by Mark Lutz published by O'Reilly.
And of course use standard help: http://docs.python.org/
As you can see on this string, triple double-quotes indicate multi-line string.
Triple double quotes are also used for documentation strings at the beginning
of functions, classes, and modules.
"""
# Anything after a hash character on the same row is a comment
# (unless the hash is in a string)
# The very top row of #!/usr/bin/env python is not relevant for Windows users.
# On unix/linux however, it indicates which program to use to run the script.
## Data types
# You do not have to declare variables, data type is inferred.
#http://docs.python.org/tutorial/introduction.html#strings
#http://docs.python.org/tutorial/introduction.html#numbers
#http://docs.python.org/tutorial/stdlib.html#dates-and-times
# Here we overwrite variable x on every line with a new value:
x = 1 # integer number
x = int(1) # integer number, function int() returns integer part of the argument
# Do not confuse int() and round()! int(1.8) => 1, round(1.8) => 2.0
x = False # boolean, can be True or False
x = 1.0 # floating point number
# Notice the decimal point, that makes it floating point and not integer!
x = float(1.0) # floating point explicitly, same as float(1) or float('1')
# Other numeric data types are Decimal and Fraction (see help for details)
x = 'A string' # string, single quotes preferred but double-quotes work too
x = r'Another string' # string, the r at the beginning indicates a raw string
# # so special characters are read as they are without
# # the need to escape them. For example:
# # 'c:\\data' and r'c:\data' are the same
x = r'One type of "quotes" can include the other type'
x = r"That's helpful when dealing with apostrophes."
# There are some other prefixes than r, see Python help for details.
x = str(123456) # Function str() converts its argument to string
g = 'a'
f = 'b'
# O(n^2) operation
g = g + f
# O(n) operation
# Tuple is better than a list
# Join assures consistent linear concatenation performance across versions and implementations.
g = ''.join((g, f))
g = ''.join([g, f])
# String notes
# http://stackoverflow.com/questions/19926089/python-equivalent-of-java-stringbuffer
# join still wins over concat, but marginally
# list comprehensions are faster than loops
# joining generators is slower than joining lists
# other methods are of no use (unless you're doing something special)
# Slicing
# Extracting parts of a variable is called slicing or subsetting.
# Slicing works on most objects that have multiple elements, for example
# strings (have multiple characters), lists, tuples, dictionaries. For example:
x = str(123456) # x is now '123456'
x[0] # '1'
x[-1] # '6'
x[1:3] # '23'
x[3:] # '456'
x[:2] # '12'
x[0:6:2] # '135', generic pattern is x[from:to:step]
x[::-1] # '654321'
# Why objects? What are objects all about?
# More details are further down, for now just know that anything in Python is
# an object. Objects can have properties (data) and methods (functions).
# For example strings have many methods. Here is how you call some of them:
x = 'A,b,C-'
x.upper() # returns 'A,B,C-'
x.strip('-') # reutrns 'A,b,C'
x.split(',') # returns a list ['A', 'b', 'C-']
x = [1, 2, 3] # list
x = list([1, 2]) # explicit declaration of a list
x = [] # this is an empty list, same as calling list()
x = ['Lists', "can", ['mix', 'types'], 123, 4.56] # lists can be nested
# Refer to individual items by zero-based offset (e.g. x[2][0] returns 'mix')
# Lists are mutable so you can change values of items (e.g. x[0] = 4)
x = (1, 2, 3) # tuple
x = tuple((1, 3)) # explicit declaration of a tuple
x = () # this is an empty tuple, same as calling tuple()
x = ('nested', ('and', 'mixed'), 'tuple', 1, 2, 3, 4) # tuples can be nested
# Refer to individual items by zero-based offset, e.g. x[0] returns 'nested'
# Tuples are immutable so you cannot change items, x[0] = 4 raises exception!
x = {'a': 'dictionaries', 'b': 'are great', 'c': 123} # dictionary
x = {1: 'keys can be mixed', 'b': 'and items too', 2: 123, 'c': 4.56}
x = dict({1: 'explicit', 3: 'dictionary', 'b': 'declaration'})
x = dict([[1, 'many ways to'], [3, 'create'], ['b', 'dicts']])
# Refer to values in dictionaries by their keys, e.g. x['b'] returns 'dicts'
# Assign new value to a key x['c'] = 'new value'
x = None # x is now a reference to the empty object, i.e. to nothing
## Operators and operations
# Binary operators take one argument on the left, one argument on the right,
# and return a value. Convention suggests spaces around a binary operators.
# One operator can have different effect depending on the types of arguments.
1 + 1 # addition; returns 2; similarily – (subtraction)
2 * 2 # multiplication; returns 4
2 ** 3 # power; returns 8
2.0 / 3.0 # division; returns 0.6666...
# Note that 2 / 3 returns 0, correct division an operand to be float:
# float(2) / 3 or 2 / 3.0
2.0 // 3.0 # floor division (result is rounded down); returns 0.0
4 % 3 # modulo; returns 1
'a' + 'b' # string concatenation, returns 'ab'
'a' * 3 # repetition; returns 'aaa'
['a'] + ['b'] # returns ['a', 'b'], see also list.append and list.extend methods
[1] * 3 # repetition; returns [1,1,1]
# Boolean and comparison operators
1 == 2 # equal to; returns False
1 != 3 # not equal to; returns True
x or y # if x is False then y, else x
x and y # if x is False then x, else y
not x # if x is False then True, else False
# Other boolean operators are >, <, >=, =<, is, is not.
# Preferably use brackets to separate individual conditions to prevent
# unexpected operator preference effects:
(1 < 0) or (3 > 1 and 0 < 1)
# which is: False or (True and True) => False or True => the result is True
# Operations are executed using methods, different types have different methods.
# There are plenty of methods. Just a few more examples then:
'A-string-with-dashes'.replace('-', '_') # replace dashes with underscores
a_list = [1, 2] # define a new list, and now call its methods:
a_list.append(3) # a_list is now [1, 2, 3]
a_list.extend([4, 5]) # a_list is now [1, 2, 3, 4, 5]
a_list.append([6, 7]) # a_list is now [1, 2, 3, 4, 5, [6, 7]]
a_dict = {1: 'a', 2: 'dictionary'} # define a new dictionary
a_dict.keys() # returns [1, 2]
a_dict.items() # returns ['a', 'dictionary']
a_dict.update({3: 'new item'}) # a_dict is now {1: 'a', 2: 'dictionary', 3: 'new item'}
a_dict.get(1, 2) # returns 'a', would return 2 if 1 was an invalid key
## Program flow - code structure and code blocks
# In many languages, blocks of code are surrounded by brackets,
# usually by curly brackets {}, but Python is different.
# In Python, blocks of code are defined by indentation!
# By convention, four spaces define an indentation of a block.
# It is important to be consistent!
# Python interpreter is very sensitive to inappropriate indentation.
## Branching (executing blocks of codes depending on a condition)
# Examples below use print function to print things to the console.
# full branching example
x = 1
if x < 0:
print('This block will execute if x is lower than zero.')
elif x == 0:
print('This block will execute if x is exactly zero.')
else:
print('This will execute if any of the above is true.')
# basic branching example
if x is None:
print('This will execute if x is an empty reference')
else:
print('And... this will print to console in other cases.')
# simple branching example
if x > 0:
print('The simplest if block.')
## Looping (iterations)
# while loops are the basics
i = 0
while i < 10:
print('Looping is great' + str(i))
i = i + 1 # don't forget to increment the counter to avoid infinite loop
# for loops are often more convenient than while loops
for i in [1,2,3,4]:
print('Variable i goes from 1 to 4, now it is ' + str(i))
print('You can iterate over various iterable variables, not just lists')
# couple of tricks for looping
x = 0
while 1: # condition is always true (infinite loop)
if x < 9:
pass # pass statement does nothing but is needed to define a block
elif x < 14:
continue # continue statement moves onto another iteration
else:
break # break statement stops the closest loop
x += 1 # increment variable x by one, equivalent to x = x + 1
## Functions and statements by example (an extremely reduced selection)
# Built-in functions
x = range(5) # x is now list of [0, 1, 2, 3, 4]
len(x) # returns length of x, 5 in this case
type(x) # returns type of an object, <type 'list'> in this case
min(x), max(x) # returns minimum and maximum
map(len, ['applies', 'function', 'to', 'items']) # this returns [7, 8, 2, 5]
zip("a" * 3, ['b', 'c', 'd']) # returns ['ab', 'ac', 'ad']
dir(x) # returns list of names of properties and methods of object x
dir() # returns list of names of variables
del x # delete the first reachable variable x from memory
## Defining your own functions
# declare function like this
def myFunction(x):
"""Functions are declared using the def statement.
Get used to writing documentation strings like this one.
This function just simply returns the input unchanged."""
return x
# and then call a function like this
y = myFunction([1, 2, 3]) # y is now [1, 2, 3]
# couple of notes about arguments
def secondFunction(a_list, y='my default', *args, **kwargs):
"""This function demonstrates different ways of passing arguments."""
# a_list must be always supplied and here we assume it is a list
# so we can append y, which always exists thanks to its default value
a_list.append(y)
# variable globList was not specified in the argument list, however...
# ... Python will search in the global scope from the function call.
# See help for details about scope, this is just a dirty demo.
# We assume globList is a list somewhere in the scope.
# Relying on global variables should be avoided in most cases.
# We also assume a_list argument is a list so we can do:
globList.extend(a_list)
# Extra unnamed arguments are collected as a list
print('There are ' + str(len(args)) + ' extra arguments without a name.')
# Extra named arguments are collected as a dictionary
print('Keys ' + str(kwargs.keys()) + '; items ' + str(kwargs.items()))
# Functions may not return any value.
# Technically, in Python, functions always return at least None.
# Global variables altered within a function will remain altered.
return
# You can call the above function in many ways:
globList = [1, 2, 3] # let's define a list outside a function first
secondFunction([4, 5])
# >> There are 0 extra arguments passed without a name.
# >> Keys []; items []
# globList is now [1, 2, 3, 4, 5, 'my default']
secondFunction([6, 7], y = 'not default')
# >> There are 0 extra arguments passed without a name.
# >> Keys []; items []
# globList is now [1, 2, 3, 4, 5, 'my default', 6, 7, 'not default']
secondFunction([8, 9], 'a', 'b', c = 10, d = 11)
# >> There are 1 extra arguments passed without a name.
# >> Keys ['c', 'd']; items [('c', 10), ('d', 11)]
# globList is now [1, 2, 3, 4, 5, 'my default', 6, 7, 'not default', 8, 9, 'a']
## Reading and writing files
# An example reads lines from one file and writes them to another file.
# This is an old way of handling files.
readthis = r'C:\toread.txt'
writethis = r'C:\towrite.txt'
fread = open(readthis, 'r') # fread is now a file object ready to be read
fwrite = open(writethis, 'w') # fwrite is now a file object ready to be written in
# Other useful modes are 'a' for append, 'rb' for binary reading, etc.
line = fread.readline() # read one line
fwrite.write(line) # write line
# Remember to add characters for new line like fwrite.write('abc' + '\n')
# let's finish the rest quickly
for line in fread:
fwrite.write(line)
# and close the files when done; see Exception handling for another example
fread.close()
fwrite.close()
# The above is an old way of handling files. Below is the modern version.
# Use the with statement to close files automatically even if something fails.
readthis = r'C:\toread.txt'
writethis = r'C:\towrite.txt'
with open(readthis, 'r') as fread:
with open(writethis, 'w') as fwrite:
for line in fread:
fwrite.write(line)
## Exception handling
readthis = r"C:\toread.txt"
try:
# code for some risky operation like reading or writing files
fread = open(fread, 'r')
for line in fread:
print('Just doing something with a line ' + str(line))
except Exception as e:
# Older versions of Python (<2.6) use syntax: "except Exception, e:".
# Here you would put anything you want to do to when an error occurs.
# You can have more except branches addressing different types of exceptions
print('Sorry, there was an error: ' + str(e)) # notify the user via console
finally:
# Finally branch is for cleanup code, here we want to close the file.
# First, check if variable fread exists,
# if it has property "closed" and if closed is not True
if 'fread' in dir() and 'closed' in dir(fread) and not fread.closed:
# At this point we can close the file.
# If we don't check the above, we may introduce another exception!
fread.close()
# The finally branch is optional,
# you can leave it out altogether if you have nothing to clean up.
# Use the with statement to avoid try:except:finally when handling files,
# however, try:except:finally blocks are necessary many other scenarios.
## Classes and object oriented programming
# Programming is about data as variables of certain data types,
# mechanisms for branching and looping, and functions.
# Classes (and instances of classes, i.e. objects)
# encapsulate data and functions into self-contained bundles.
# An example of a simple class
class Point2D:
"""A 2D point"""
def __init__(self, x, y):
"""Constructors are by convention called __init__.
Constructor is invoked when the class is instantiated.
Constructor is a type of method, all methods must
receive a reference to the current object as first parameter,
by convention called self, not used when methods are called.
Constructors are not always necessary.
"""
self.X = float(x) # defines a data member of the class
self.Y = float(y) # defines another data member of the class
# there is much more to data members (see help for details)
def shift(self, dx, dy):
"""A method to shift the point by vector (dx, dy)"""
self.X = self.X + float(dx)
self.Y = self.Y + float(dy)
# Instantiate an object of class Point and use its method
pt = Point2D(1, 5)
pt.shift(2,-1) # pt.X is now 3, pt.Y is now 4
# Every object is an instance of a class. Including exceptions. Most exceptions
# are derived from Exception class. Let's define a custom exception class.
class MyException(Exception):
"""This class inherits from class Exception.
Exception is a baseclass (or superclass) of class (or subclass) MyException.
A class can inherit from multiple baseclasses (separated by comma).
"""
def __init__(self, extradata):
"""The __init__ function runs when an object of this class is created.
This is also an example of method overriding: the baseclass has its own
Exception.__init__ function, but it has been overridden by this method.
"""
# There are no real private members (properties or methods) in Python,
# all members can be accessed directly. However there are conventions.
self.extra = extradata # defining a data member of the class
# One underscore indicates this should be treated as private member,
# i.e. accessed only within the class where it is defined.'
self._secret = 'Not intended for use outside of this class'
def notifyUser(self):
print(str(self.args)) # self.args is inherited from baseclass Exception
print(str(self.message)) # also inherited from Exception
print(str(len(self.extra)) + ' data items: ' + str(self.extra))
# We can raise our custom exception intentionally (somewhat artificial example)
try: raise MyException([1, 2, 3])
except MyException as m: m.notifyUser()
# prints out "() \n3 data items: [1, 2, 3]"
## Modules
# Modules are collections of functions you can load using the import statement
# The import statement has several forms.
# import module for interaction with operating system
import os
a_path = r'C:\my\file.txt'
filename = os.path.basename(a_path) # 'file.txt'
foldername = os.path.dirname(a_path) # 'C:\my'
os.path.join(foldername, filename) # 'C:\my\file.txt'
os.listdir(foldername) # returns list of files in folder foldername
# use the datetime module to retrieve date-time stamp
import datetime
t = datetime.datetime.now() # returns current computer time
t.strftime('%Y%m%d%H%M%S') # formats time into a string as specified
# Other useful modules are sys, shutil, math, and many others. You can create
# your own modules too ( http://docs.python.org/2/tutorial/modules.html ).
## Message to R users
# If you are familiar with R, Python may seem somewhat clumsy.
# R is more flexible when it comes to vector operations, stats, and plotting.
# Check out Python modules numpy, scipy, matplotlib, and pandas.
| mit |
sjperkins/tensorflow | tensorflow/contrib/keras/python/keras/callbacks.py | 8 | 36338 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from collections import Iterable
from collections import OrderedDict
import csv
import json
import os
import time
import numpy as np
import six
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.generic_utils import Progbar
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training import saver as saver_lib
# pylint: disable=g-import-not-at-top
try:
import requests
except ImportError:
requests = None
# pylint: enable=g-import-not-at-top
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
for callback in self.callbacks:
callback.set_model(model)
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs=None):
"""Called right before processing a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
logging.warning(
'Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs=None):
"""Called at the end of a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if (self._delta_t_batch > 0. and
(delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)):
logging.warning(
'Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Called at the end of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_end(logs)
def __iter__(self):
return iter(self.callbacks)
class Callback(object):
"""Abstract base class used to build new callbacks.
# Properties
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
"""
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
logs[k] = self.totals[k] / self.seen
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered."""
def __init__(self):
super(TerminateOnNaN, self).__init__()
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seens or steps (batches) seen.
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples'):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(target=self.target, verbose=self.verbose)
self.seen = 0
def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
if self.use_steps:
self.seen += 1
else:
self.seen += batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and self.seen < self.target:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values, force=True)
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode))
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.' % (self.monitor))
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('Epoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('Epoch %05d: %s did not improve' % (epoch, self.monitor))
else:
if self.verbose > 0:
print('Epoch %05d: saving model to %s' % (epoch, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto'):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.min_delta = min_delta
self.wait = 0
self.stopped_epoch = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.' % (self.mode))
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor)
if current is None:
logging.warning('Early stopping requires %s available!' % (self.monitor))
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
self.wait += 1
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch))
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
Arguments:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
headers: Dictionary; optional custom HTTP headers.
Defaults to:
`{'Accept': 'application/json', 'Content-Type': 'application/json'}`
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None):
super(RemoteMonitor, self).__init__()
if headers is None:
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
self.root = root
self.path = path
self.field = field
self.headers = headers
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires ' 'the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
logging.warning('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
Arguments:
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
"""
def __init__(self, schedule):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""Tensorboard basic visualizations.
This callback writes a log for TensorBoard, which allows
you to visualize dynamic graphs of your training and test
metrics, as well as activation histograms for the different
layers in your model.
TensorBoard is a visualization tool provided with TensorFlow.
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```
tensorboard --logdir=/full_path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log
files to be parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation
and weight histograms for the layers of the model. If set to 0,
histograms won't be computed. Validation data (or split) must be
specified for histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard.
The log file can become quite large when
write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network
for histograms computation.
write_images: whether to write model weights to visualize as
image in TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata or {}
self.batch_size = batch_size
def set_model(self, model):
self.model = model
self.sess = K.get_session()
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
tf_summary.histogram(weight.name, weight)
if self.write_grads:
grads = model.optimizer.get_gradients(model.total_loss, weight)
tf_summary.histogram('{}_grad'.format(weight.name), grads)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(weight.name, w_img)
if hasattr(layer, 'output'):
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
self.merged = tf_summary.merge_all()
if self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
else:
self.writer = tf_summary.FileWriter(self.log_dir)
if self.embeddings_freq:
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
embeddings = {
layer.name: layer.weights[0]
for layer in self.model.layers if layer.name in embeddings_layer_names
}
self.saver = saver_lib.Saver(list(embeddings.values()))
embeddings_metadata = {}
if not isinstance(self.embeddings_metadata, str):
embeddings_metadata = self.embeddings_metadata
else:
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings.keys()
}
config = projector.ProjectorConfig()
self.embeddings_ckpt_path = os.path.join(self.log_dir,
'keras_embedding.ckpt')
for layer_name, tensor in embeddings.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
if layer_name in embeddings_metadata:
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.validation_data and self.histogram_freq:
if epoch % self.histogram_freq == 0:
val_data = self.validation_data
tensors = (
self.model.inputs + self.model.targets + self.model.sample_weights)
if self.model.uses_learning_phase:
tensors += [K.learning_phase()]
assert len(val_data) == len(tensors)
val_size = val_data[0].shape[0]
i = 0
while i < val_size:
step = min(self.batch_size, val_size - i)
batch_val = []
batch_val.append(val_data[0][i:i + step])
batch_val.append(val_data[1][i:i + step])
batch_val.append(val_data[2][i:i + step])
if self.model.uses_learning_phase:
batch_val.append(val_data[3])
feed_dict = dict(zip(tensors, batch_val))
result = self.sess.run([self.merged], feed_dict=feed_dict)
summary_str = result[0]
self.writer.add_summary(summary_str, epoch)
i += self.batch_size
if self.embeddings_freq and self.embeddings_ckpt_path:
if epoch % self.embeddings_freq == 0:
self.saver.save(self.sess, self.embeddings_ckpt_path, epoch)
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, epoch)
self.writer.flush()
def on_train_end(self, _):
self.writer.close()
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Arguments:
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
epsilon: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
epsilon=1e-4,
cooldown=0,
min_lr=0):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
self.factor = factor
self.min_lr = min_lr
self.epsilon = epsilon
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
logging.warning('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode))
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
self.lr_epsilon = self.min_lr * 1e-4
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
logging.warning('Learning Rate Plateau Reducing requires %s available!' %
self.monitor)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr + self.lr_epsilon:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: reducing learning rate to %s.' % (epoch,
new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
self.wait += 1
def in_cooldown(self):
return self.cooldown_counter > 0
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Arguments:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
self.file_flags = 'b' if six.PY2 and os.name == 'nt' else ''
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
self.csv_file = open(self.filename, 'a' + self.file_flags)
else:
self.csv_file = open(self.filename, 'w' + self.file_flags)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if not self.writer:
self.keys = sorted(logs.keys())
class CustomDialect(csv.excel):
delimiter = self.sep
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=['epoch'] + self.keys,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class LambdaCallback(Callback):
"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Arguments:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Plot the loss after every epoch.
import numpy as np
import matplotlib.pyplot as plt
plot_loss_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: plt.plot(np.arange(epoch),
logs['loss']))
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
plot_loss_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| apache-2.0 |
mgahsan/QuantEcon.py | examples/main_LS.py | 6 | 4318 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 20 14:07:56 2015
@author: dgevans
"""
import matplotlib.pyplot as plt
import numpy as np
import lucas_stokey as LS
from calibrations.BGP import M1
from calibrations.CES import M2
from calibrations.CES import M_time_example
'''
Time Varying Example
'''
PP_seq_time = LS.Planners_Allocation_Sequential(M_time_example) #solve sequential problem
sHist_h = np.array([0,1,2,3,5,5,5])
sHist_l = np.array([0,1,2,4,5,5,5])
sim_seq_h = PP_seq_time.simulate(1.,0,7,sHist_h)
sim_seq_l = PP_seq_time.simulate(1.,0,7,sHist_l)
plt.figure(figsize=[14,10])
plt.subplot(3,2,1)
plt.title('Consumption')
plt.plot(sim_seq_l[0],'-ok')
plt.plot(sim_seq_h[0],'-or')
plt.subplot(3,2,2)
plt.title('Labor Supply')
plt.plot(sim_seq_l[1],'-ok')
plt.plot(sim_seq_h[1],'-or')
plt.subplot(3,2,3)
plt.title('Government Debt')
plt.plot(sim_seq_l[2],'-ok')
plt.plot(sim_seq_h[2],'-or')
plt.subplot(3,2,4)
plt.title('Taxe Rate')
plt.plot(sim_seq_l[3],'-ok')
plt.plot(sim_seq_h[3],'-or')
plt.subplot(3,2,5)
plt.title('Government Spending')
plt.plot(M_time_example.G[sHist_l],'-ok')
plt.plot(M_time_example.G[sHist_h],'-or')
plt.subplot(3,2,6)
plt.title('Output')
plt.plot(M_time_example.Theta[sHist_l]*sim_seq_l[1],'-ok')
plt.plot(M_time_example.Theta[sHist_h]*sim_seq_h[1],'-or')
plt.tight_layout()
plt.savefig('TaxSequence_time_varying.png')
plt.figure(figsize=[8,5])
plt.title('Gross Interest Rate')
plt.plot(sim_seq_l[-1],'-ok')
plt.plot(sim_seq_h[-1],'-or')
plt.tight_layout()
plt.savefig('InterestRate_time_varying.png')
'''
Time 0 example
'''
PP_seq_time0 = LS.Planners_Allocation_Sequential(M2) #solve sequential problem
B_vec = np.linspace(-1.5,1.,100)
taxpolicy = np.vstack([PP_seq_time0.simulate(B_,0,2)[3] for B_ in B_vec])
interest_rate = np.vstack([PP_seq_time0.simulate(B_,0,3)[-1] for B_ in B_vec])
plt.figure(figsize=[14,6])
plt.subplot(211)
plt.plot(B_vec,taxpolicy[:,0],linewidth=2.)
plt.plot(B_vec,taxpolicy[:,1],linewidth=2.)
plt.title('Tax Rate')
plt.legend((r'Time $t=0$', 'Time $t\geq1$'),loc=2,shadow=True)
plt.subplot(212)
plt.title('Gross Interest Rate')
plt.plot(B_vec,interest_rate[:,0],linewidth=2.)
plt.plot(B_vec,interest_rate[:,1],linewidth=2.)
plt.xlabel('Initial Government Debt')
plt.tight_layout()
plt.savefig('Time0_taxpolicy.png')
#compute the debt entered with at time 1
B1_vec = np.hstack([PP_seq_time0.simulate(B_,0,2)[2][1] for B_ in B_vec])
#now compute the optimal policy if the government could reset
tau1_reset = np.hstack([PP_seq_time0.simulate(B1,0,1)[3] for B1 in B1_vec])
plt.figure(figsize=[10,6])
plt.plot(B_vec,taxpolicy[:,1],linewidth=2.)
plt.plot(B_vec,tau1_reset,linewidth=2.)
plt.xlabel('Initial Government Debt')
plt.title('Tax Rate')
plt.legend((r'$\tau_1$', r'$\tau_1^R$'),loc=2,shadow=True)
plt.tight_layout()
plt.savefig('Time0_inconsistent.png')
'''
BGP Example
'''
#initialize mugrid for value function iteration
muvec = np.linspace(-0.6,0.0,200)
PP_seq = LS.Planners_Allocation_Sequential(M1) #solve sequential problem
PP_bel = LS.Planners_Allocation_Bellman(M1,muvec) #solve recursive problem
T = 20
#sHist = utilities.simulate_markov(M1.Pi,0,T)
sHist = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0],dtype=int)
#simulate
sim_seq = PP_seq.simulate(0.5,0,T,sHist)
sim_bel = PP_bel.simulate(0.5,0,T,sHist)
#plot policies
plt.figure(figsize=[14,10])
plt.subplot(3,2,1)
plt.title('Consumption')
plt.plot(sim_seq[0],'-ok')
plt.plot(sim_bel[0],'-xk')
plt.legend(('Sequential','Recursive'),loc='best')
plt.subplot(3,2,2)
plt.title('Labor Supply')
plt.plot(sim_seq[1],'-ok')
plt.plot(sim_bel[1],'-xk')
plt.subplot(3,2,3)
plt.title('Government Debt')
plt.plot(sim_seq[2],'-ok')
plt.plot(sim_bel[2],'-xk')
plt.subplot(3,2,4)
plt.title('Tax Rate')
plt.plot(sim_seq[3],'-ok')
plt.plot(sim_bel[3],'-xk')
plt.subplot(3,2,5)
plt.title('Government Spending')
plt.plot(M1.G[sHist],'-ok')
plt.plot(M1.G[sHist],'-xk')
plt.plot(M1.G[sHist],'-^k')
plt.subplot(3,2,6)
plt.title('Output')
plt.plot(M1.Theta[sHist]*sim_seq[1],'-ok')
plt.plot(M1.Theta[sHist]*sim_bel[1],'-xk')
plt.tight_layout()
plt.savefig('TaxSequence_LS.png')
plt.figure(figsize=[8,5])
plt.title('Gross Interest Rate')
plt.plot(sim_seq[-1],'-ok')
plt.plot(sim_bel[-1],'-xk')
plt.legend(('Sequential','Recursive'),loc='best')
plt.tight_layout()
| bsd-3-clause |
ericmjl/bokeh | bokeh/_testing/plugins/pandas.py | 1 | 2063 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a Pytest plugin for handling tests when Pandas may be missing.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import pytest
# Bokeh imports
from bokeh.util.dependencies import import_optional
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'pd',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@pytest.fixture
def pd():
''' A PyTest fixture that will automatically skip a test if Pandas is
not installed.
'''
pandas = import_optional('pandas')
if pandas is None:
pytest.skip('pandas is not installed')
return pandas
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
huzq/scikit-learn | examples/ensemble/plot_feature_transformation.py | 17 | 4335 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(
X_train, y_train, test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression(max_iter=1000)
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression(max_iter=1000)
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
# Supervised transformation based on gradient boosted trees
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression(max_iter=1000)
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
tosolveit/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
ColumbiaCMB/kid_readout | apps/data_taking_scripts/old_scripts/fast_noise_sweep.py | 1 | 4018 | import matplotlib
from kid_readout.roach import baseband
matplotlib.use('agg')
import numpy as np
import time
import sys
from kid_readout.utils import data_block, data_file,sweeps
from kid_readout.analysis.resonator import Resonator
#from sim900 import sim900Client
ri = baseband.RoachBasebandWide()
#ri.initialize()
ri.set_fft_gain(4)
#sc = sim900Client.sim900Client()
ri.set_dac_attenuator(19.5)
#f0s = np.load('/home/gjones/workspace/apps/f8_fit_resonances.npy')
#f0s = np.load('/home/gjones/workspace/apps/first_pass_sc3x3_0813f9.npy')
#f0s = np.load('/home/gjones/workspace/apps/sc5x4_0813f10_first_pass.npy')
f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f9_2014-02-11.npy')
f0s.sort()
f0s = f0s*0.993
nf = len(f0s)
atonce = 4
if nf % atonce > 0:
print "extending list of resonators to make a multiple of ",atonce
f0s = np.concatenate((f0s,np.arange(1,1+atonce-(nf%atonce))+f0s.max()))
offsets = np.linspace(-4882.8125,4638.671875,20)
offsets = np.concatenate(([-40e3,-20e3],offsets,[20e3,40e3]))/1e6
offsets = offsets*4
print f0s
print len(f0s)
start = time.time()
measured_freqs = sweeps.prepare_sweep(ri,f0s,offsets,nsamp=2**21)
print "loaded waveforms in", (time.time()-start),"seconds"
sys.stdout.flush()
time.sleep(1)
atten_list = [35.5,33.5,46.5,43.5,40.5,37.5]
for atten in atten_list:
df = data_file.DataFile()
dsize=None
ri.set_dac_attenuator(atten)
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=8)
df.add_sweep(sweep_data)
meas_cfs = []
idxs = []
for m in range(len(f0s)):
fr,s21 = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
res = Resonator(fr,s21)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
print meas_cfs
ri.add_tone_freqs(np.array(meas_cfs))
ri.select_bank(ri.tone_bins.shape[0]-1)
ri._sync()
time.sleep(0.5)
nsets = len(meas_cfs)/atonce
tsg = None
for iset in range(nsets):
selection = range(len(meas_cfs))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.2)
dmod,addr = ri.get_data(512,demod=True)
if dsize is None:
dsize = dmod.shape[0]
chids = ri.fpga_fft_readout_indexes+1
tones = ri.tone_bins[ri.bank,ri.readout_selection]
nsamp = ri.tone_nsamp
print "saving data"
for m in range(len(chids)):
print m
block = data_block.DataBlock(data = dmod[:dsize,m], tone=tones[m], fftbin = chids[m],
nsamp = nsamp, nfft = ri.nfft, wavenorm = ri.wavenorm, t0 = time.time(), fs = ri.fs)
tsg = df.add_block_to_timestream(block, tsg=tsg)
df.log_hw_state(ri)
#sc.fetchDict()
#df.add_cryo_data(sc.data)
df.nc.sync()
df.nc.close()
print "completed in",((time.time()-start)/60.0),"minutes"
# raw_input("turn off pulse tube")
#
# tsg = None
# dmod,addr = ri.get_data(2048,demod=True)
# chids = ri.fpga_fft_readout_indexes+1
# tones = ri.tone_bins[ri.readout_selection]
# nsamp = ri.tone_nsamp
# print "saving data"
# for m in range(len(chids)):
# print m
# block = data_block.DataBlock(data = dmod[:,m], tone=tones[m], fftbin = chids[m],
# nsamp = nsamp, nfft = ri.nfft, wavenorm = ri.wavenorm, t0 = time.time(), fs = ri.fs)
# tsg = df.add_block_to_timestream(block, tsg=tsg)
#
# df.log_hw_state(ri)
# df.nc.sync()
| bsd-2-clause |
iismd17/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
DavidMcDonald1993/ghsom | spearmint/examples/simple/make_plots.py | 3 | 4444 | import importlib
import sys
from itertools import izip
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as axes3d
from spearmint.utils.database.mongodb import MongoDB
from spearmint.main import get_options, parse_resources_from_config, load_jobs, remove_broken_jobs, \
load_task_group, load_hypers
def print_dict(d, level=1):
if isinstance(d, dict):
if level > 1: print ""
for k, v in d.iteritems():
print " " * level, k,
print_dict(v, level=level+1)
else:
print d
def main():
"""
Usage: python make_plots.py PATH_TO_DIRECTORY
TODO: Some aspects of this function are specific to the simple branin example
We should clean this up so that interpretation of plots are more clear and
so that it works in more general cases
(e.g. if objective likelihood is binomial then values should not be
unstandardized)
"""
options, expt_dir = get_options()
print "options:"
print_dict(options)
# reduce the grid size
options["grid_size"] = 400
resources = parse_resources_from_config(options)
# Load up the chooser.
chooser_module = importlib.import_module('spearmint.choosers.' + options['chooser'])
chooser = chooser_module.init(options)
print "chooser", chooser
experiment_name = options.get("experiment-name", 'unnamed-experiment')
# Connect to the database
db_address = options['database']['address']
sys.stderr.write('Using database at %s.\n' % db_address)
db = MongoDB(database_address=db_address)
# testing below here
jobs = load_jobs(db, experiment_name)
remove_broken_jobs(db, jobs, experiment_name, resources)
print "resources:", resources
print_dict(resources)
resource = resources.itervalues().next()
task_options = { task: options["tasks"][task] for task in resource.tasks }
print "task_options:"
print_dict(task_options) # {'main': {'likelihood': u'NOISELESS', 'type': 'OBJECTIVE'}}
task_group = load_task_group(db, options, resource.tasks)
print "task_group", task_group # TaskGroup
print "tasks:"
print_dict(task_group.tasks) # {'main': <spearmint.tasks.task.Task object at 0x10bf63290>}
hypers = load_hypers(db, experiment_name)
print "loaded hypers", hypers # from GP.to_dict()
hypers = chooser.fit(task_group, hypers, task_options)
print "\nfitted hypers:"
print_dict(hypers)
lp, x = chooser.best()
x = x.flatten()
print "best", lp, x
bestp = task_group.paramify(task_group.from_unit(x))
print "expected best position", bestp
# get the grid of points
grid = chooser.grid
# print "chooser objectives:",
# print_dict(chooser.objective)
print "chooser models:", chooser.models
print_dict(chooser.models)
obj_model = chooser.models[chooser.objective['name']]
obj_mean, obj_var = obj_model.function_over_hypers(obj_model.predict, grid)
# un-normalize the function values and variances
obj_task = task_group.tasks['main']
obj_mean = [obj_task.unstandardize_mean(obj_task.unstandardize_variance(v)) for v in obj_mean]
obj_std = [obj_task.unstandardize_variance(np.sqrt(v)) for v in obj_var]
# for xy, m, v in izip(grid, obj_mean, obj_var):
# print xy, m, v
grid = map(task_group.from_unit, grid)
# return
xymv = [(xy[0], xy[1], m, v) for xy, m, v in izip(grid, obj_mean, obj_std)]# if .2 < xy[0] < .25]
x = map(lambda x:x[0], xymv)
y = map(lambda x:x[1], xymv)
m = map(lambda x:x[2], xymv)
sig = map(lambda x:x[3], xymv)
# print y
fig = plt.figure(dpi=100)
ax = fig.add_subplot(111, projection='3d')
ax.plot(x, y, m, marker='.', linestyle="None")
# plot errorbars
for i in np.arange(0, len(x)):
ax.plot([x[i], x[i]], [y[i], y[i]], [m[i]+sig[i], m[i]-sig[i]], marker="_", color='k')
# get the observed points
task = task_group.tasks['main']
idata = task.valid_normalized_data_dict
xy = idata["inputs"]
xy = map(task_group.from_unit, xy)
xy = np.array(xy)
vals = idata["values"]
vals = [obj_task.unstandardize_mean(obj_task.unstandardize_variance(v)) for v in vals]
ax.plot(xy[:,0], xy[:,1], vals, marker='o', color="r", linestyle="None")
plt.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
samfpetersen/gnuradio | gnuradio-runtime/apps/evaluation_random_numbers.py | 26 | 5155 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
import numpy as np
from scipy.stats import norm, laplace, rayleigh
from matplotlib import pyplot as plt
# NOTE: scipy and matplotlib are optional packages and not included in the default gnuradio dependencies
#*** SETUP ***#
# Number of realisations per histogram
num_tests = 1000000
# Set number of bins in histograms
uniform_num_bins = 31
gauss_num_bins = 31
rayleigh_num_bins = 31
laplace_num_bins = 31
rndm = gr.random() # instance of gnuradio random class (gr::random)
print 'All histograms contain',num_tests,'realisations.'
#*** GENERATE DATA ***#
uniform_values = np.zeros(num_tests)
gauss_values = np.zeros(num_tests)
rayleigh_values = np.zeros(num_tests)
laplace_values = np.zeros(num_tests)
for k in range(num_tests):
uniform_values[k] = rndm.ran1()
gauss_values[k] = rndm.gasdev()
rayleigh_values[k] = rndm.rayleigh()
laplace_values[k] = rndm.laplacian()
#*** HISTOGRAM DATA AND CALCULATE EXPECTED COUNTS ***#
uniform_bins = np.linspace(0,1,uniform_num_bins)
gauss_bins = np.linspace(-8,8,gauss_num_bins)
laplace_bins = np.linspace(-8,8,laplace_num_bins)
rayleigh_bins = np.linspace(0,10,rayleigh_num_bins)
uniform_hist = np.histogram(uniform_values,uniform_bins)
gauss_hist = np.histogram(gauss_values,gauss_bins)
rayleigh_hist = np.histogram(rayleigh_values,rayleigh_bins)
laplace_hist = np.histogram(laplace_values,laplace_bins)
uniform_expected = np.zeros(uniform_num_bins-1)
gauss_expected = np.zeros(gauss_num_bins-1)
rayleigh_expected = np.zeros(rayleigh_num_bins-1)
laplace_expected = np.zeros(laplace_num_bins-1)
for k in range(len(uniform_hist[0])):
uniform_expected[k] = num_tests/float(uniform_num_bins-1)
for k in range(len(gauss_hist[0])):
gauss_expected[k] = float(norm.cdf(gauss_hist[1][k+1])-norm.cdf(gauss_hist[1][k]))*num_tests
for k in range(len(rayleigh_hist[0])):
rayleigh_expected[k] = float(rayleigh.cdf(rayleigh_hist[1][k+1])-rayleigh.cdf(rayleigh_hist[1][k]))*num_tests
for k in range(len(laplace_hist[0])):
laplace_expected[k] = float(laplace.cdf(laplace_hist[1][k+1])-laplace.cdf(laplace_hist[1][k]))*num_tests
#*** PLOT HISTOGRAMS AND EXPECTATIONS TAKEN FROM SCIPY ***#
uniform_bins_center = uniform_bins[0:-1]+(uniform_bins[1]-uniform_bins[0])/2.0
gauss_bins_center = gauss_bins[0:-1]+(gauss_bins[1]-gauss_bins[0])/2.0
rayleigh_bins_center = rayleigh_bins[0:-1]+(rayleigh_bins[1]-rayleigh_bins[0])/2.0
laplace_bins_center = laplace_bins[0:-1]+(laplace_bins[1]-laplace_bins[0])/2.0
plt.figure(1)
plt.subplot(2,1,1)
plt.plot(uniform_bins_center,uniform_hist[0],'s--',uniform_bins_center,uniform_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Uniform: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(uniform_bins_center,uniform_hist[0]/uniform_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Uniform: Relative deviation to scipy')
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(gauss_bins_center,gauss_hist[0],'s--',gauss_bins_center,gauss_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Gauss: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(gauss_bins_center,gauss_hist[0]/gauss_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Gauss: Relative deviation to scipy')
plt.figure(3)
plt.subplot(2,1,1)
plt.plot(rayleigh_bins_center,rayleigh_hist[0],'s--',rayleigh_bins_center,rayleigh_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Rayleigh: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(rayleigh_bins_center,rayleigh_hist[0]/rayleigh_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Rayleigh: Relative deviation to scipy')
plt.figure(4)
plt.subplot(2,1,1)
plt.plot(laplace_bins_center,laplace_hist[0],'s--',laplace_bins_center,laplace_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Laplace: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(laplace_bins_center,laplace_hist[0]/laplace_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Laplace: Relative deviation to scipy')
plt.show()
| gpl-3.0 |
sampadsaha5/sympy | sympy/external/tests/test_importtools.py | 91 | 1215 | from sympy.external import import_module
# fixes issue that arose in addressing issue 6533
def test_no_stdlib_collections():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections2():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections3():
'''make sure we get the right collections with no catch'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0')
if matplotlib:
assert collections != matplotlib.collections
| bsd-3-clause |
joelagnel/trappy | tests/test_plot_utils.py | 3 | 8672 | # Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import matplotlib
import pandas as pd
from test_thermal import BaseTestThermal
import trappy
import plot_utils
class TestPlotUtils(unittest.TestCase):
def test_normalize_title(self):
"""Test normalize_title"""
self.assertEquals(plot_utils.normalize_title("Foo", ""), "Foo")
self.assertEquals(plot_utils.normalize_title("Foo", "Bar"), "Bar - Foo")
def test_set_lim(self):
"""Test set_lim()"""
class GetSet(object):
def __init__(self):
self.min = 1
self.max = 2
def get(self):
return (self.min, self.max)
def set(self, minimum, maximum):
self.min = minimum
self.max = maximum
gs = GetSet()
plot_utils.set_lim("default", gs.get, gs.set)
self.assertEquals(gs.min, 1)
self.assertEquals(gs.max, 2)
plot_utils.set_lim("range", gs.get, gs.set)
self.assertEquals(gs.min, 0.9)
self.assertEquals(gs.max, 2.1)
plot_utils.set_lim((0, 100), gs.get, gs.set)
self.assertEquals(gs.min, 0)
self.assertEquals(gs.max, 100)
def test_set_ylim(self):
"""Test that set_ylim() doesn't bomb"""
_, ax = matplotlib.pyplot.subplots()
plot_utils.set_ylim(ax, "default")
plot_utils.set_ylim(ax, (0, 5))
def test_set_xlim(self):
"""Test that set_xlim() doesn't bomb"""
_, ax = matplotlib.pyplot.subplots()
plot_utils.set_xlim(ax, "default")
plot_utils.set_xlim(ax, (0, 5))
def test_pre_plot_setup(self):
"""Test that plot_utils.pre_plot_setup() doesn't bomb"""
plot_utils.pre_plot_setup(None, None)
plot_utils.pre_plot_setup(height=9, width=None)
plot_utils.pre_plot_setup(height=None, width=9)
plot_utils.pre_plot_setup(3, 9)
axis = plot_utils.pre_plot_setup(ncols=2)
self.assertEquals(len(axis), 2)
axis = plot_utils.pre_plot_setup(nrows=2, ncols=3)
self.assertEquals(len(axis), 2)
self.assertEquals(len(axis[0]), 3)
self.assertEquals(len(axis[1]), 3)
def test_post_plot_setup(self):
"""Test that post_plot_setup() doesn't bomb"""
_, ax = matplotlib.pyplot.subplots()
plot_utils.post_plot_setup(ax)
plot_utils.post_plot_setup(ax, title="Foo")
plot_utils.post_plot_setup(ax, ylim=(0, 72))
plot_utils.post_plot_setup(ax, ylim="range")
plot_utils.post_plot_setup(ax, xlabel="Bar")
plot_utils.post_plot_setup(ax, xlim=(0, 100))
plot_utils.post_plot_setup(ax, xlim="default")
def test_plot_hist(self):
"""Test that plost_hist doesn't bomb"""
data = pd.Series([1, 1, 2, 4])
_, ax = matplotlib.pyplot.subplots()
plot_utils.plot_hist(data, ax, "Foo", "m", 20, "numbers", (0, 4), "default")
class TestPlotUtilsNeedTrace(BaseTestThermal):
def __init__(self, *args, **kwargs):
super(TestPlotUtilsNeedTrace, self).__init__(*args, **kwargs)
self.map_label = {"00000000,00000039": "A53", "00000000,00000006": "A57"}
self.actor_order = ["GPU", "A57", "A53"]
def test_number_freq_plots(self):
"""Calculate the number of frequency plots correctly"""
trace_out = ""
trace = trappy.FTrace()
self.assertEquals(plot_utils.number_freq_plots([trace], self.map_label),
3)
# Strip out devfreq traces
with open("trace.txt") as fin:
for line in fin:
if ("thermal_power_devfreq_get_power:" not in line) and \
("thermal_power_devfreq_limit:" not in line):
trace_out += line
with open("trace.txt", "w") as fout:
fout.write(trace_out)
# Without devfreq there should only be two plots
trace = trappy.FTrace()
self.assertEquals(plot_utils.number_freq_plots([trace], self.map_label),
2)
def test_plot_temperature(self):
"""Test that plot_utils.plot_temperature() doesn't bomb"""
trace1 = trappy.FTrace(name="first")
trace2 = trappy.FTrace(name="second")
traces = [trace1, trace2]
plot_utils.plot_temperature(traces, ylim="default")
matplotlib.pyplot.close('all')
def test_plot_load(self):
"""Test that plot_utils.plot_load() doesn't bomb"""
trace1 = trappy.FTrace(name="first")
trace2 = trappy.FTrace(name="second")
traces = [trace1, trace2]
plot_utils.plot_load(traces, self.map_label, height=5)
matplotlib.pyplot.close('all')
def test_plot_load_single_trace(self):
"""plot_utils.plot_load() can be used with a single trace"""
trace = trappy.FTrace()
plot_utils.plot_load([trace], self.map_label)
matplotlib.pyplot.close('all')
def test_plot_allfreqs(self):
"""Test that plot_utils.plot_allfreqs() doesn't bomb"""
trace1 = trappy.FTrace(name="first")
trace2 = trappy.FTrace(name="second")
traces = [trace1, trace2]
plot_utils.plot_allfreqs(traces, self.map_label, width=20)
matplotlib.pyplot.close('all')
def test_plot_allfreqs_single_trace(self):
"""plot_utils.plot_allfreqs() can be used with a single trace"""
trace = trappy.FTrace()
plot_utils.plot_allfreqs([trace], self.map_label)
matplotlib.pyplot.close('all')
def test_plot_allfreqs_one_actor(self):
"""plot_utils.plot_allfreqs work when there is only one actor"""
in_data = """ kworker/4:1-397 [004] 720.741349: thermal_power_cpu_get: cpus=00000000,00000006 freq=1400000 raw_cpu_power=189 load={23, 12} power=14
kworker/4:1-397 [004] 720.741679: thermal_power_cpu_limit: cpus=00000000,00000006 freq=1400000 cdev_state=1 power=14"""
with open("trace.txt", "w") as fout:
fout.write(in_data)
traces = [trappy.FTrace(name="first"), trappy.FTrace(name="second")]
map_label = {"00000000,00000006": "A57"}
plot_utils.plot_allfreqs(traces, map_label)
matplotlib.pyplot.close("all")
def test_plot_controller(self):
"""plot_utils.plot_controller() doesn't bomb"""
trace1 = trappy.FTrace(name="first")
trace2 = trappy.FTrace(name="second")
traces = [trace1, trace2]
plot_utils.plot_controller(traces, height=5)
matplotlib.pyplot.close('all')
def test_plot_input_power(self):
"""plot_utils.plot_input_power() doesn't bomb"""
trace1 = trappy.FTrace(name="first")
trace2 = trappy.FTrace(name="second")
traces = [trace1, trace2]
plot_utils.plot_input_power(traces, self.actor_order, width=20)
matplotlib.pyplot.close('all')
def test_plot_output_power(self):
"""plot_utils.plot_output_power() doesn't bomb"""
trace1 = trappy.FTrace(name="first")
trace2 = trappy.FTrace(name="second")
traces = [trace1, trace2]
plot_utils.plot_output_power(traces, self.actor_order, width=20)
matplotlib.pyplot.close('all')
def test_plot_freq_hists(self):
"""plot_utils.plot_freq_hists() doesn't bomb"""
trace1 = trappy.FTrace(name="first")
trace2 = trappy.FTrace(name="second")
traces = [trace1, trace2]
plot_utils.plot_freq_hists(traces, self.map_label)
matplotlib.pyplot.close('all')
def test_plot_freq_hists_single_trace(self):
"""plot_utils.plot_freq_hists() works with a single trace"""
trace = trappy.FTrace()
plot_utils.plot_freq_hists([trace], self.map_label)
matplotlib.pyplot.close('all')
def test_plot_temperature_hist(self):
"""plot_utils.plot_temperature_hist() doesn't bomb"""
trace1 = trappy.FTrace(name="first")
trace2 = trappy.FTrace(name="second")
traces = [trace1, trace2]
plot_utils.plot_temperature_hist(traces)
matplotlib.pyplot.close('all')
| apache-2.0 |
MJuddBooth/pandas | pandas/io/formats/csvs.py | 3 | 11465 | # -*- coding: utf-8 -*-
"""
Module for formatting output data into CSV files.
"""
from __future__ import print_function
import csv as csvlib
import os
import warnings
from zipfile import ZipFile
import numpy as np
from pandas._libs import writers as libwriters
from pandas.compat import StringIO, range, zip
from pandas.core.dtypes.generic import (
ABCDatetimeIndex, ABCIndexClass, ABCMultiIndex, ABCPeriodIndex)
from pandas.core.dtypes.missing import notna
from pandas import compat
from pandas.io.common import (
UnicodeWriter, _get_handle, _infer_compression, get_filepath_or_buffer)
class CSVFormatter(object):
def __init__(self, obj, path_or_buf=None, sep=",", na_rep='',
float_format=None, cols=None, header=True, index=True,
index_label=None, mode='w', nanRep=None, encoding=None,
compression='infer', quoting=None, line_terminator='\n',
chunksize=None, tupleize_cols=False, quotechar='"',
date_format=None, doublequote=True, escapechar=None,
decimal='.'):
self.obj = obj
if path_or_buf is None:
path_or_buf = StringIO()
self.path_or_buf, _, _, _ = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression, mode=mode
)
self.sep = sep
self.na_rep = na_rep
self.float_format = float_format
self.decimal = decimal
self.header = header
self.index = index
self.index_label = index_label
self.mode = mode
if encoding is None:
encoding = 'ascii' if compat.PY2 else 'utf-8'
self.encoding = encoding
self.compression = _infer_compression(self.path_or_buf, compression)
if quoting is None:
quoting = csvlib.QUOTE_MINIMAL
self.quoting = quoting
if quoting == csvlib.QUOTE_NONE:
# prevents crash in _csv
quotechar = None
self.quotechar = quotechar
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator or os.linesep
self.date_format = date_format
self.tupleize_cols = tupleize_cols
self.has_mi_columns = (isinstance(obj.columns, ABCMultiIndex) and
not self.tupleize_cols)
# validate mi options
if self.has_mi_columns:
if cols is not None:
raise TypeError("cannot specify cols with a MultiIndex on the "
"columns")
if cols is not None:
if isinstance(cols, ABCIndexClass):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = list(cols)
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
cols = self.obj.columns
if isinstance(cols, ABCIndexClass):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = list(cols)
# save it
self.cols = cols
# preallocate data 2d list
self.blocks = self.obj._data.blocks
ncols = sum(b.shape[0] for b in self.blocks)
self.data = [None] * ncols
if chunksize is None:
chunksize = (100000 // (len(self.cols) or 1)) or 1
self.chunksize = int(chunksize)
self.data_index = obj.index
if (isinstance(self.data_index, (ABCDatetimeIndex, ABCPeriodIndex)) and
date_format is not None):
from pandas import Index
self.data_index = Index([x.strftime(date_format) if notna(x) else
'' for x in self.data_index])
self.nlevels = getattr(self.data_index, 'nlevels', 1)
if not index:
self.nlevels = 0
def save(self):
"""
Create the writer & save
"""
# GH21227 internal compression is not used when file-like passed.
if self.compression and hasattr(self.path_or_buf, 'write'):
msg = ("compression has no effect when passing file-like "
"object as input.")
warnings.warn(msg, RuntimeWarning, stacklevel=2)
# when zip compression is called.
is_zip = isinstance(self.path_or_buf, ZipFile) or (
not hasattr(self.path_or_buf, 'write')
and self.compression == 'zip')
if is_zip:
# zipfile doesn't support writing string to archive. uses string
# buffer to receive csv writing and dump into zip compression
# file handle. GH21241, GH21118
f = StringIO()
close = False
elif hasattr(self.path_or_buf, 'write'):
f = self.path_or_buf
close = False
else:
f, handles = _get_handle(self.path_or_buf, self.mode,
encoding=self.encoding,
compression=self.compression)
close = True
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
delimiter=self.sep, quoting=self.quoting,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar)
if self.encoding == 'ascii':
self.writer = csvlib.writer(f, **writer_kwargs)
else:
writer_kwargs['encoding'] = self.encoding
self.writer = UnicodeWriter(f, **writer_kwargs)
self._save()
finally:
if is_zip:
# GH17778 handles zip compression separately.
buf = f.getvalue()
if hasattr(self.path_or_buf, 'write'):
self.path_or_buf.write(buf)
else:
f, handles = _get_handle(self.path_or_buf, self.mode,
encoding=self.encoding,
compression=self.compression)
f.write(buf)
close = True
if close:
f.close()
for _fh in handles:
_fh.close()
def _save_header(self):
writer = self.writer
obj = self.obj
index_label = self.index_label
cols = self.cols
has_mi_columns = self.has_mi_columns
header = self.header
encoded_labels = []
has_aliases = isinstance(header, (tuple, list, np.ndarray,
ABCIndexClass))
if not (has_aliases or self.header):
return
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing {ncols} cols but got {nalias} '
'aliases'.format(ncols=len(cols),
nalias=len(header))))
else:
write_cols = header
else:
write_cols = cols
if self.index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(obj.index, ABCMultiIndex):
index_label = []
for i, name in enumerate(obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label,
(list, tuple, np.ndarray, ABCIndexClass)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if not has_mi_columns or has_aliases:
encoded_labels += list(write_cols)
writer.writerow(encoded_labels)
else:
# write out the mi
columns = obj.columns
# write out the names for each level, then ALL of the values for
# each level
for i in range(columns.nlevels):
# we need at least 1 index column to write our col names
col_line = []
if self.index:
# name is the first column
col_line.append(columns.names[i])
if isinstance(index_label, list) and len(index_label) > 1:
col_line.extend([''] * (len(index_label) - 1))
col_line.extend(columns._get_level_values(i))
writer.writerow(col_line)
# Write out the index line if it's not empty.
# Otherwise, we will print out an extraneous
# blank line between the mi and the data rows.
if encoded_labels and set(encoded_labels) != {''}:
encoded_labels.extend([''] * len(columns))
writer.writerow(encoded_labels)
def _save(self):
self._save_header()
nrows = len(self.data_index)
# write in chunksize bites
chunksize = self.chunksize
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self._save_chunk(start_i, end_i)
def _save_chunk(self, start_i, end_i):
data_index = self.data_index
# create the data for a chunk
slicer = slice(start_i, end_i)
for i in range(len(self.blocks)):
b = self.blocks[i]
d = b.to_native_types(slicer=slicer, na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
for col_loc, col in zip(b.mgr_locs, d):
# self.data is a preallocated list
self.data[col_loc] = col
ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
libwriters.write_csv_rows(self.data, ix, self.nlevels,
self.cols, self.writer)
| bsd-3-clause |
ueshin/apache-spark | python/pyspark/pandas/generic.py | 9 | 104900 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import (
Axis,
DataFrameOrSeries,
Dtype,
FrameLike,
Label,
Name,
Scalar,
)
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Name]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for col in columns:
if is_name_like_tuple(col):
label = cast(Label, col)
else:
label = cast(Label, (col,))
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Name, "Series", List[Union[Name, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Label, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Label, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Label, (by,))]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(cast(Label, key))
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(cast(Label, (key,)))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Label]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
enanablancaynumeros/mullpy | mullpy/auxiliar.py | 1 | 8422 | # This Python file uses the following encoding: utf-8
# !/usr/local/bin/python3.4
####################################################
# <Copyright (C) 2012, 2013, 2014, 2015 Yeray Alvarez Romero>
# This file is part of MULLPY.
####################################################
import os
import errno
from functools import reduce
####################################################
def nested_dict_access(list_of_keys, dictionary):
return reduce(dict.get, list_of_keys, dictionary)
def create_output_file(folder, output_file, operation, user_default_input):
"""
:param folder:
:param output_file:
:param operation: "overwrite" or "append"
:param user_default_input: "y" or "n". Leave it blank if you want to ask the user by console
:return:
"""
path_exists(folder)
if not check_file(folder, output_file):
return open(folder+output_file, "w")
else:
if operate_over_file(folder, output_file, operation, user_default_input):
if operation == "overwrite":
return open(folder+output_file, "w")
elif operation == "append":
return open(folder+output_file, "a")
else:
raise ValueError("Operation not recognized")
else:
return False
####################################################
def check_file(folder, output_file):
if os.path.isfile(folder+output_file):
return True
else:
return False
####################################################
def operate_over_file(folder, output_file, operation, user_default_input=None):
if user_default_input:
reply = user_default_input
else:
reply = input('¿Do you want to %s the file %s, y/n?' % (operation, folder+output_file))
if reply == "y":
return True
else:
return False
####################################################
def path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
####################################################
def summary_stats_from_file(folder, input_file):
"""
:param folder: The name of the folder
:param input_file: The name of the file that has the information in the next manner:
1) A set of measures names separated by tabulator
2) A undefined number of lines with:
classifier name: \t measure_value_0 \t measure_value_1 \t etc.
:return: Summary structure which contains classifiers statistics. Each classifiers has a structure
of measures names and its values associated
"""
summary = AutoVivification()
if not check_file(folder, input_file):
return summary
for i, line in enumerate(open(folder+input_file).readlines()):
if i > 0:
name = line[:line.find(":")]
values_list = []
#Each measures values has to be separated by \t
for x in sub(r" *\n", "", sub(r" +", "", sub(r"\n", "", line[line.find(":")+1:]))).split("\t"):
if x != "":
if len(x.split(',')) > 1:
values_list.append([float(y) for y in x.split(',')])
else:
values_list.append(float(x))
#Assign each sublist or each value to the summary structure
for measure_name, value in zip(measures_names, values_list):
summary[name][measure_name] = value
else:
from re import sub
measures_names = [x for x in sub(r" *\n", "", sub(r" +", "", sub(r"\n", "", line))).split("\t") if len(x)]
return summary
####################################################
def check_equal_classifier_patterns(context, classifier_name, classifier_name_2, pattern_kind):
if classifier_name_2 != classifier_name and classifier_name_2 in context["classifiers"].keys():
if pattern_kind in context["classifiers"][classifier_name_2]["patterns"] and \
context["classifiers"][classifier_name]["patterns"][pattern_kind] == \
context["classifiers"][classifier_name_2]["patterns"][pattern_kind] and \
context["patterns"].patterns[classifier_name_2][pattern_kind] is not None and \
context["classifiers"][classifier_name]["data_transformation"] == \
context["classifiers"][classifier_name_2]["data_transformation"] and \
"features_names" in context["classifiers"][classifier_name_2]:
if "features_names" in context["classifiers"][classifier_name] and \
context["classifiers"][classifier_name]["features_names"] == \
context["classifiers"][classifier_name_2]["features_names"]:
return 1
elif "features_names" not in context["classifiers"][classifier_name]:
return 1
return 0
####################################################
class AutoVivification(dict):
"""Implementation of perl's autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
####################################################
###############
# CSV to ARFF #
###############
#https://github.com/christinequintana/CSV-to-ARFF
import csv
class Csv2arff(object):
content = []
name = ''
def __init__(self):
self.csvInput()
self.arffOutput()
print('\nFinished.')
#import CSV
def csvInput(self):
user = input('Enter the CSV file name: ')
#remove .csv
if user.endswith('.csv') == True:
self.name = user.replace('.csv', '')
print('Opening CSV file.')
try:
with open(user, 'rt') as csvfile:
lines = csv.reader(csvfile, delimiter=',')
for row in lines:
self.content.append(row)
csvfile.close()
#just in case user tries to open a file that doesn't exist
except IOError:
print('File not found.\n')
self.csvInput()
#export ARFF
def arffOutput(self):
print('Converting to ARFF file.\n')
title = str(self.name) + '.arff'
new_file = open(title, 'w')
##
#following portions formats and writes to the new ARFF file
##
#write relation
new_file.write('@relation ' + str(self.name)+ '\n\n')
#get attribute type input
for i in range(len(self.content[0])-1):
# attribute_type = input('Is the type of ' + str(self.content[0][i]) + ' numeric or nominal? ')
# new_file.write('@attribute ' + str(self.content[0][i]) + ' ' + str(attribute_type) + '\n')
new_file.write('@attribute ' + str(self.content[0][i]) + ' numeric\n')
#create list for class attribute
last = len(self.content[0])
class_items = []
for i in range(len(self.content)):
name = self.content[i][last-1]
if name not in class_items:
class_items.append(self.content[i][last-1])
else:
pass
del class_items[0]
string = '{' + ','.join(sorted(class_items)) + '}'
new_file.write('@attribute ' + str(self.content[0][last-1]) + ' ' + str(string) + '\n')
#write data
new_file.write('\n@data\n')
del self.content[0]
for row in self.content:
new_file.write(','.join(row) + '\n')
#close file
new_file.close()
####################################################
def csv2pat(input_file, classes_len):
import pandas as pd
input_df = pd.read_csv(input_file)
output_file = open("%s.pat" % input_file[:input_file.find('.csv')], "w")
features = list(input_df.columns.values)
# First, we build the beginning of the pat file, specifying feature names.
for feature in features[:len(features)-classes_len]:
output_file.write("@FEATURE %s\n" % feature)
# Second, we add the content of the csv file, row by row
for i in range(len(input_df)):
# Building each row to be put in the output file.
row = []
for feature in features:
row.append(input_df[feature][i])
output_file.write("%s\n" % ",".join([str(x) for x in row]))
| mit |
AvengersPy/MyPairs | Simulation/onePair.py | 1 | 4066 | import os
import csv
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def readCSV():
os.chdir(r'/Users/yubing/MyPairs/Simulation/data')
data1 = pd.read_csv('spy.csv')# pandas dataframe
data2 = pd.read_csv('csco.csv')
ETFlist = list(data1['Adj Close']) # convert to list, you now calculate return and do the regression
STKlist = list(data2['Adj Close'])
return ETFlist, STKlist
def regression(listETF, listSTK, pTime):
etfRtn = np.log(listETF[1:pTime+1]) - np.log(listETF[0:pTime])
stkRtn = np.log(listSTK[1:pTime+1]) - np.log(listSTK[0:pTime])
gradient, intercept, r_value, p_value, std_err = stats.linregress(etfRtn, stkRtn)
n = len(etfRtn)
residual = []
for i in xrange(0, n):
x = float(stkRtn[i] - gradient * etfRtn[i] - intercept)
residual.append(x)
return intercept, gradient, residual
def score(Epsilon, ptime):
if ptime >= len(Epsilon):
try:
pass
except:
print "Wrong ptime!"
Xt = []
result = 0
for i in range(0,ptime):
result = result + Epsilon[i]
Xt.append(result)
Xt_Lag1 = []
Xt_Lag0 = []
for j in range(0,ptime-1):
Xt_Lag1.append(Xt[j])
Xt_Lag0.append(Xt[j + 1])
gradient, intercept, r_value, p_value, std_err = stats.linregress(Xt_Lag0, Xt_Lag1)
a = intercept
b = gradient
k = - np.log(b) * 252
m = a / (1 - b)
lam = []
for j in range(0,ptime-1):
lam.append(Xt_Lag0[j] - a - b * Xt_Lag1[j])
lamvar = np.var(lam)
sigmaeq = np.sqrt(lamvar*2*k/(1-b**2))
smod = []
s = []
for i in range(0, ptime):
s.append((Epsilon[i]-m)/sigmaeq)
smod.append(s[i] - a/(k*sigmaeq))
print m
return smod
# beta0 = 0.000194054
# beta = 0.840379173
# mmm_data = [77.53,78.4,77.89,77.42,78,77.31,78.05,78.41,79.48,
# 79.33,78.71,79.09,79.25,78.71,
# 78.85,79.56,78.97,78.54,74.92,74.21,74.75,
# 73.87,74.08,73.91,74.3,74.88,74.64,74.55,76,
# 76.43,76.9,76.91,76.5,76.1,76.25,76.49,76.45,
# 76.13,74.25,74,73.25,73.01,73.62,73.5,74.03,
# 74.59,74.61,74.75,74.78,75.8,76.23,75.54,
# 76.49,76.25,77.09,77.59,77.66,77.44,76.74,76.38]
# spy_data = [142.25,141.23,141.33,140.82,141.31,140.58,141.58,
# 142.15,143.07,142.85,143.17,142.54,143.07,142.26,142.97,143.86,
# 142.57,142.19,142.35,142.63,144.15,144.73,144.7,144.97,145.12,
# 144.78,145.06,143.94,143.77,144.8,145.67,145.44,145.56,145.61,
# 146.05,145.74,145.83,143.88,140.39,139.34,140.05,137.93,138.78,
# 139.59,140.54,141.31,140.42,140.18,138.43,138.97,139.31,139.26,
# 140.08,141.1,143.48,143.28,143.5,143.12,142.14,142.54]
# spy_data, mmm_data = readCSV()
# PTime = 100
# spyRtn = np.log(spy_data[1:PTime+1]) - np.log(spy_data[0:PTime])
# mmmRtn = np.log(mmm_data[1:PTime+1]) - np.log(mmm_data[0:PTime])
# beta0, beta1, epsilon = regression(spy_data, mmm_data, PTime)
# print len(epsilon)
# print mmm_data
# sscore = score(epsilon, PTime)
# print sscore
# plt.plot(range(0,len(sscore)),sscore)
#plt.axis([0,len(sscore),min(sscore)*1.2,max(sscore)*1.2])
# plt.axis([0,PTime,-0.3,0.3])
# plt.show()
#def funcion(pairs, PTime):
# sscoreLst = []
# for i in pairs:
# spy_data, mmm_data = readCSV(ietfFile, iStkFile);
# beta0, beta1, epsilon = regression(spy_data, mmm_data, PTime)
# sscore = score(epsilon)
# realizedVol = ...(sscore)
# sscoreLst.append(realizedVol)
# return sscoreLst
# def tradePair(stkSymbol, etfSymbol, PTime):
# spy_data, mmm_data = readCSV(ietfFile, iStkFile);
# beta0, beta1, epsilon = regression(spy_data, mmm_data, PTime)
# sscore = score(epsilon)
# sortedSS = sort(sscore)
# buyOpen = sortedSS[100*0.85]
# buyClose = sortedSS[100*0.0.6]
# shortOpen = sortedSS[100*0.15]
# buyCLose = sortedSS[100*0.4]
#def funcion(pairs, PTime):
# sscoreLst = []
# for i in pairs:
# spy_data, mmm_data = readCSV(ietfFile, iStkFile);
# beta0, beta1, epsilon = regression(spy_data, mmm_data, PTime)
# sscore = score(epsilon)
# sortedSS = sort(sscore)
# buyOpen = sortedSS[100*0.85]
# buyClose = sortedSS[100*0.0.6]
# shortOpen = sortedSS[100*0.15]
# buyCLose = sortedSS[100*0.4]
# return sscoreLst
| apache-2.0 |
hugo-lorenzo-mato/meteo-galicia-db | estudioDatos.py | 1 | 7556 | # -*- coding: utf-8 -*-
"""
eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJtaWd1ZWxvdXZpbmhhQGdtYWlsLmNvbSIsImp0aSI6ImY2MGY2ZTdhLTcxMmMtNDY0ZS05YTlmLTYzNWUyYjgyNThlYSIsImV4cCI6MTQ5OTE2MjExNiwiaXNzIjoiQUVNRVQiLCJpYXQiOjE0OTEzODYxMTYsInVzZXJJZCI6ImY2MGY2ZTdhLTcxMmMtNDY0ZS05YTlmLTYzNWUyYjgyNThlYSIsInJvbGUiOiIifQ.w0OazTbsiZdI5YQXCMIRnny_f0TwWF7leFvik1WeA8s
"""
import requests
import json
import pandas as pd
from pandas import DataFrame, Series
import numpy as np
import matplotlib.pyplot as plt
import math
# Datos comunes (API_KEY)
querystring = {"api_key":"eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJtaWd1ZWxvdXZpbmhhQGdtYWlsLmNvbSIsImp0aSI6ImY2MGY2ZTdhLTcxMmMtNDY0ZS05YTlmLTYzNWUyYjgyNThlYSIsImV4cCI6MTQ5OTE2MjExNiwiaXNzIjoiQUVNRVQiLCJpYXQiOjE0OTEzODYxMTYsInVzZXJJZCI6ImY2MGY2ZTdhLTcxMmMtNDY0ZS05YTlmLTYzNWUyYjgyNThlYSIsInJvbGUiOiIifQ.w0OazTbsiZdI5YQXCMIRnny_f0TwWF7leFvik1WeA8s"}
# Pedimos los datos al ususario
# Con esto pedimos el idema directamente, pero seria deseable pedir lugar y buscar nosotros el idema
print("Inserte el lugar del que quiera obtener los datos: \n")
print(" Provincia ------------ Estacion ------------- Idema ")
print(" A Coruña A Coruña (Estacion completa) 1387")
print(" A Coruña A Coruña (Aeroporto) 1387E")
print(" A Coruña Santiago de Compostela (Labacolla) 1428")
print(" A Coruña Cabo Vilán 1393")
print(" Lugo Rozas (Aeródromo) 1505")
print(" Ourense Ourense (Granxa Deputación) 1690A")
print(" Pontevedra Pontevedra (Mourente) 1484C")
print(" Pontevedra Vigo (Peinador) 1495")
idema = str(input())
analisis = str(input("Desea un analisis mensual/anual (1) o diario (2): \n"))
# Anuales
if (analisis == "1"):
anho = str(input("De que año desea hacer la comparacion de datos (1931 - 2016): \n"))
url = "https://opendata.aemet.es/opendata/api/valores/climatologicos/mensualesanuales/datos/anioini/" + anho + "/aniofin/" + anho + "/estacion/" + idema
response = requests.request("GET" , url ,params = querystring, verify = False)
#print(response.text)
cont = json.loads(response.text)
#print(cont)
cont = cont['datos']
#print(cont)
# Obtenemos los datos que nos interesan y los pasamos a formato json
response = requests.request("GET", cont, verify = False)
#print(response.text)
datos = json.loads(response.text)
#print(datos)
"""
ANALISIS DE DATOS CON PANDAS
"""
temperaturas = [ 'tm_mes', 'ta_max', 'ta_min', 'fecha', 'indicativo']
estado_ceo = ['n_cub', 'n_des', 'n_nub']
precipitacions = ['p_mes']
vento = ['w_med', 'w_racha']
humedad = ['hr']
cota_nieve = ['n_nie', 'n_gra']
cols = [ 'tm_mes', 'ta_max', 'ta_min', 'fecha', 'indicativo', 'n_cub', 'n_des', 'n_nub', 'p_mes',
'w_med', 'w_racha', 'hr', 'n_nie', 'n_gra']
indice = ['Enero', 'Febrero', 'Marzo', 'Abril', 'Mayo', 'Junio',
'Julio', 'Agosto', 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre', 'Resumen']
#frame = DataFrame(datos, index = indice)
'''
TEMPERATURAS
'''
# Creamos el DataFrame
frame_tem = DataFrame(datos, columns = temperaturas, index = indice)
# Con esto eliminamos la fila resumen que no nos sirve en nustro caso
frame_tem = frame_tem.iloc[0:12]
print(frame_tem)
#Procedemos a limpiar las filas del DataFrame
temperatura_max = frame_tem.ta_max.map(lambda x: x.replace('(', ',')).map(lambda x: x.split(',')).map(lambda x: x[0]).map(lambda x: float(x))
temperatura_min = frame_tem.ta_min.map(lambda x: x.replace('(', ',')).map(lambda x: x.split(',')).map(lambda x: x[0]).map(lambda x: float(x))
temperatura_media = frame_tem.tm_mes
temperatura_fechas = frame_tem.fecha.map(lambda x: x.replace('-', ',')).map(lambda x: x.split(',')).map(lambda x: x[1])
data = { 'Temperatura Maxima' : temperatura_max,
'Temperatura Media' : temperatura_media,
'Temperatura Minima' : temperatura_min }
finalTemperatura = DataFrame(data)
print(finalTemperatura)
'''
ESTADO CEO
'''
# Creamos el DataFrame
frame_ceo = DataFrame(datos, columns = estado_ceo, index = indice)
# Con esto eliminamos la fila resumen que no nos sirve en nustro caso
# En este caso puede que nos sirva de utilidad esta fila
frame_ceo = frame_ceo.iloc[0:12]
print(frame_ceo)
# En este apartado podemos mostrar graficas de sectores o barras multiples
# no se cual se adpatara mejor
'''
PRECIPITACIONES
'''
frame_pre = DataFrame(datos, columns = precipitacions, index = indice)
# Con esto eliminamos la fila resumen que no nos sirve en nustro caso
frame_pre = frame_pre.iloc[0:12]
frame_pre = frame_pre.p_mes.map(lambda x: float(x))
print(frame_pre)
'''
VENTO
'''
frame_vento = DataFrame(datos, columns = vento, index = indice)
# Con esto eliminamos la fila resumen que no nos sirve en nustro caso
frame_vento = frame_vento.iloc[0:12]
print(frame_vento)
frame_vento = frame_vento.dropna()
# Limpiamos datos y obtenemos los grados completos del resultada
frame_vento_dir = frame_vento.w_racha.map(lambda x: x.replace('(', '/')).map(lambda x: x.split('/')).map(lambda x: x[0]).map(lambda x: float(x)) * 10
# Limpiamos datos y pasamos a kilometros por hora
frame_vento_vel = frame_vento.w_racha.map(lambda x: x.replace('(', '/')).map(lambda x: x.split('/')).map(lambda x: x[1]).map(lambda x: float(x)) / 1000 * 3600
print("dir \n")
print(frame_vento_dir)
print("vel \n")
print(frame_vento_vel)
'''
HUMEDAD
'''
frame_hm = DataFrame(datos, columns = humedad, index = indice)
# Con esto eliminamos la fila resumen que no nos sirve en nustro caso
frame_hm = frame_hm.iloc[0:12]
print(frame_hm)
'''
COTA DE NEVE
'''
frame_cota = DataFrame(datos, columns = cota_nieve, index = indice)
# Con esto eliminamos la fila resumen que no nos sirve en nustro caso
frame_cota = frame_cota.iloc[0:12]
print(frame_cota)
'''
GRAFICAS
'''
#### TEMPERATURAS
finalTemperatura.plot()
plt.title("Gráfica de Temperaturas año: " + anho)
plt.xlabel("Mes")
plt.ylabel("Grados Celsius")
plt.savefig("grafica.png")
#### RECIPITACIONES
#### VIENTO
## Discretizamos el conjunto de valores en n intervalos,
## en este caso 8 intervalos
datosbin = np.histogram(frame_vento_dir, bins = np.linspace(np.min(frame_vento_dir), np.max(frame_vento_dir), 9))[0]
## Los datos los queremos en tanto por ciento
datosbin = datosbin * 100. / len(frame_vento_dir)
## Los datos los queremos en n direcciones/secciones/sectores,
## en este caso usamos 8 sectores de una circunferencia
sect = np.array([90, 45, 0, 315, 270, 225, 180, 135]) * 2. * math.pi / 360.
nombresect = ['E','NE','N','NW','W','SW','S','SE']
## Dibujamos la rosa de frecuencias
plt.axes([0.1,0.1,0.8,0.8], polar = True)
plt.bar(sect, datosbin, align='center', width=45 * 2 * math.pi / 360.,
facecolor='b', edgecolor='k', linewidth=2, alpha=0.5)
plt.thetagrids(np.arange(0, 360, 45),nombresect,frac = 1.1, fontsize = 10)
plt.title(u'Procedencia del viento en el año ' + anho)
plt.show()
# Diarios (Formato de fecha: 2016-06-01T10:00:00UTC)
elif (analisis == "2"):
print("Esto aun no esta implementado illooooooo!!")
# Analisis incorrecto
else:
print("Tipo de analisis incorrecto, aprende a leer mamón")
| mit |
marcioweck/PSSLib | src/pss.py | 1 | 8492 | # TODO license
# TODO explain
import array
import random
import copy
from itertools import izip, combinations, chain
from itertools import permutations as permut
import numpy as np
from scipy.spatial.distance import euclidean, pdist, squareform
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib import cm
from deap import base
from deap import creator
from deap import tools
import dm
import lhs
import nbcdm
from archive import Archive
from utils import solowPolaskyQC, averageFitnessQC, ensure_bounds
"""
Utils
"""
def sampler(dist_engine, xbounds, dim, n, convert=False):
rngs = [(0., 1.) for i in range(dim)]
P = np.array(lhs.lhs(dist_engine, rngs, siz=n)).T
P = (xbounds[1] - xbounds[0])*P + xbounds[0] # rebound
if convert:
if dim == 1:
samples = map(creator.Individual, [np.array((p, )) for p in P])
else:
samples = map(creator.Individual, P)
return samples
else:
if dim == 1:
P = np.array([np.array((x,)) for x in P])
return P
"""
PSS framework
"""
import pdb
def remove_overlaps(feval, individuals, hives):
if len(individuals) == 0: return []
idx = range(0, len(hives))
idx.sort(key=lambda i: individuals[i].fitness)
nm = np.sqrt(len(individuals[0]))
covs = [h.sigma*nm for h in hives]
D = squareform(pdist(individuals))
uniques = set(idx[:])
for i, j in permut(idx, 2):
if covs[i] > D[i,j]:
if individuals[i].fitness > individuals[j].fitness:
wps = dm.denorm(individuals[i], individuals[j], [0.5])
wpfit = np.array(map(feval, wps))
if all(wpfit >= individuals[j].fitness.wvalues[0]):
D[j,...] = np.inf
uniques.discard(j)
return (i for i in uniques)
def generate(ind_init, hives):
swarms = [h.generate(ind_init) for h in hives]
return swarms
def updateHive(hive, swarm):
hive.update(swarm)
return hive.shouldContinue()
def generateHive(hclass, (xstart, sigma)):
return hclass(copy.deepcopy(xstart), sigma)
import sys
sys.path.append("../benchmarks/niching-benchmark-cec2013/python/")
from framework import pycec2013
import cma
creator.create("Fitness", base.Fitness, weights=(1.0, 1.0))
creator.create("OneFitness", base.Fitness, weights=(1.0,))
creator.create("Individual", array.array, typecode='d', fitness=creator.Fitness)
creator.create("Centroid", array.array, typecode='d', fitness=creator.OneFitness)
creator.create("Hive", cma.Strategy)
def main():
benchmark = pycec2013.Benchmark(17)
lbounds = tuple(benchmark.get_lbounds())
ubounds = tuple(benchmark.get_ubounds())
min_ = min(lbounds)
max_ = max(ubounds)
toolbox = base.Toolbox()
toolbox.register("generate", generate, creator.Individual)
toolbox.register("update", map, updateHive)
toolbox.register("feval", map, benchmark.evaluate)
# toolbox.register("fdist", nearest_better_tree)
toolbox.register("hive", generateHive, cma.Strategy)
toolbox.register("bounds", ensure_bounds(lbounds, ubounds))
toolbox.decorate("generate", toolbox.bounds)
dim = benchmark.ndim
nmin = benchmark.ngoptima
leftfes = benchmark.max_fes
ngoptima = 0
max_ngoptima = benchmark.ngoptima
def similarity_func(a, b):
if np.isnan(np.sum(a)) or np.isnan(np.sum(b)):
pdb.set_trace()
d = euclidean(a, b)
return d < 0.06
hof = Archive(max_ngoptima, similarity_func)
distribs = [stats.uniform for i in range(dim)]
samples = sampler(distribs, (min_, max_), dim, 20*dim)
#samples = np.loadtxt("/home/weckwar/inputs.txt", delimiter=',', ndmin=2)
seeds, _ = nbcdm.raw_data_seeds_sel(benchmark.evaluate, samples, 20, useDM=True, maskmode='NEA1')
# xpoints = np.array([x for x,c,_ in seeds])
# np.savetxt("/home/weckwar/inputs.txt", xpoints, delimiter=',')
#plotseeds(benchmark.evaluate, min_, max_, dim, samples=xpoints)
#return
hives = list()
population = list()
norm = float(np.sqrt(dim))
for (xstart, c, (f1, f2)) in seeds:
ind = creator.Individual(xstart)
ind.fitness.values = (f1, f2)
population.append(ind)
hives.append(toolbox.hive((ind, c/norm)))
verbose = True
logbook = tools.Logbook()
logbook.header = "gen", "nswarm", "ngoptima", "muerror", "dispersion"
generation = 0
logbook.record(gen=generation, nswarm=len(hives), ngoptima=ngoptima,
muerror=averageFitnessQC(population),
dispersion=solowPolaskyQC(population, 1.0/dim))
while leftfes > 0 and ngoptima < max_ngoptima:
swarms = toolbox.generate(hives)
blob = list(chain(*swarms))
D = squareform(pdist(blob))
fitnesses = toolbox.feval(blob)
nelem = len(swarms[0])
for i, swarm in enumerate(swarms):
k = i*nelem
nbidx = np.arange(k, k+nelem)
for j, ind in enumerate(swarm):
D[k+j,nbidx] = np.inf
sortedline = np.argsort(D[k+j,:])
bestidx = next((l for l in sortedline
if fitnesses[l] > fitnesses[k+j]), -1)
ind.fitness.values = (fitnesses[k+j], D[k+j, bestidx])
checks = toolbox.update(hives, swarms)
nextgen = [hives[i] for i, ok in enumerate(checks) if ok]
xstarts = [creator.Centroid(x.centroid) for x in nextgen]
cfit = toolbox.feval(xstarts)
for x, fit in izip(xstarts, cfit):
x.fitness.values = (fit,)
uniques = list(remove_overlaps(benchmark.evaluate, xstarts, nextgen))
hives = [nextgen[i] for i in uniques]
xstarts = [xstarts[i] for i in uniques]
hof.update(xstarts)
hfit = [x.fitness.values[0] for x in hof]
ngoptima = benchmark.count_goptima(hof, hfit, 1e-5)
if len(hives) < 2:
samples = sampler(distribs, (min_, max_), dim, 2.*dim)
seeds, _ = nbcdm.raw_data_seeds_sel(benchmark.evaluate, samples, 10.)
for (xstart, c, (f1, f2)) in seeds:
ind = creator.Individual(xstart)
ind.fitness.values = (f1, f2)
hives.append(toolbox.hive((ind, 0.5*c/norm)))
leftfes -= len(samples)
leftfes -= len(swarms)*nelem + len(xstarts)
generation += 1
logbook.record(gen=generation, nswarm=len(hives), ngoptima=ngoptima,
muerror=0,#averageFitnessQC(xstarts),
dispersion=0)#solowPolaskyQC(xstarts, 1.0/dim))
print logbook.stream
print "Used FEs: {0}".format(benchmark.max_fes - leftfes)
print ngoptima
for ind in hof:
print "x: {0} -> {1}".format(ind, ind.fitness.values[0])
plotseeds(benchmark.evaluate, min_, max_, dim, samples=hof)
def plotseeds(feval, min_, max_, dim, samples=None, edges=None, seeds=None):
fig, ax = plt.subplots()
if dim == 2:
X = np.arange(min_, max_, 0.05)
Y = np.arange(min_, max_, 0.05)
X, Y = np.meshgrid(X, Y)
PTS = np.hstack((X.reshape(X.size, 1), Y.reshape(Y.size, 1)))
Z = np.array(map(feval, PTS), copy=True)
Z = Z.reshape(X.shape)
plt.contour(X, Y, Z, zdir='z', cmap=cm.jet, offset=np.min(Z))
if edges is not None:
mudist = np.mean([d for _, d in edges])
for (x, y), d in edges:
if d < 2*mudist and d > 0:
plt.plot([samples[x, 0], samples[y, 0]],
[samples[x, 1], samples[y, 1]], 'k')
if samples is not None:
sarr = np.array(samples)
plt.scatter(sarr[:, 0], sarr[:, 1], c='r', s=100)
if seeds is not None:
for x, r in seeds:
c = mpatches.Circle(x, r, alpha=0.6, fc="b", ec="b", lw=1)
ax.add_patch(c)
if dim == 1:
X = np.arange(min_, max_, 0.01)
Y = [feval(x) for x in X]
plt.plot(X,Y,'r')
if samples is not None:
F = [s.fitness.values[0] for s in samples]
plt.scatter(samples, F, c='k', s = 10)
if seeds is not None:
for x, r, _ in seeds:
c = mpatches.Circle((x, feval(x)), r, alpha=0.6, fc="b", ec="b", lw=1)
ax.add_patch(c)
plt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
ThomasMiconi/htmresearch | projects/capybara/sandbox/classification/knn/knn_example.py | 9 | 4351 | """Helper plot functions"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.metrics import confusion_matrix, accuracy_score
import itertools
def plot_loss(loss):
"""
Plot model loss per epoch.
:param loss: (array) loss of the model
"""
plt.figure(figsize=(5, 5), dpi=100)
plt.plot(range(len(loss)), loss)
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
def plot_confusion_matrix(y_true, y_pred, class_names=None,
compute_accuracy=True,
normalize=False,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
:param y_true: (array) target classes
:param y_pred: (array) predicted classes
:param class_names: (list) pretty names for the classes. Integers from 0 to
len(y_true) will be used if set to None.
:param compute_accuracy: (bool) whether to compute the accuracy and
display it in the title.
:param normalize: (bool) whether to normalize the confusion matrix
:param cmap: (mpl.Colormap) color map
:return cm: (np.array) confusion matrix
"""
cm = confusion_matrix(y_true, y_pred)
if compute_accuracy:
accuracy = accuracy_score(y_true, y_pred)
title = 'Confusion matrix (accuracy=%.2f)' % (accuracy * 100)
else:
title = 'Confusion matrix'
plt.figure(figsize=(6, 6), dpi=100)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if class_names is None:
class_names = range(len(set(list(y_true))))
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.grid(False)
return cm
def plot_decision_boundary(model, X, y, h=.01):
"""
Plot the decision boundary. For that, we will assign a color to each
point in the mesh [x_min, x_max]*[y_min, y_max].
:param model: (Object) model supporting predict(X) where X is 2D.
:param X: (array) 2D input.
:param y: (array) target classes for X.
:param h: (float) step size in the mesh .
"""
if type(y) != np.array:
y = np.array(y)
if type(X) != np.array:
X = np.array(X)
assert X.shape[1] == 2
# Create color maps
num_classes = len(set(list(y)))
cmap_light = ListedColormap(sns.color_palette('colorblind', num_classes))
cmap_bold = ListedColormap(sns.color_palette('colorblind', num_classes))
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
if y.ndim > 1:
c = [cmap_bold(y[i:, 0]) for i in range(len(y[:, ]))]
else:
c = [cmap_bold(y[i]) for i in range(len(y))]
plt.scatter(X[:, 0], X[:, 1], c=c, cmap=cmap_bold, edgecolor='black')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("Decision boundary")
def example():
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
plot_decision_boundary(clf, X, y)
y_pred = clf.predict(X)
y_true = y
cm = plot_confusion_matrix(y_true, y_pred)
print cm
plt.show()
if __name__ == '__main__':
example()
| agpl-3.0 |
dsquareindia/scikit-learn | examples/linear_model/plot_iris_logistic.py | 119 | 1679 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
amuramatsu/dwf | examples/AnalogOutIn.py | 1 | 1544 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
DWF Python Example 2
Modified by: MURAMATSU Atsushi <[email protected]>
Revised: 2016-04-21
Original Author: Digilent, Inc.
Original Revision: 10/17/2013
Requires:
Python 2.7, 3.3 or later
"""
import dwf
import time
import matplotlib.pyplot as plt
print("Version: " + dwf.FDwfGetVersion())
cdevices = dwf.DwfEnumeration()
print("Number of Devices: " + str(len(cdevices)))
if len(cdevices) == 0:
print("no device detected")
quit()
print("Opening first device")
hdwf = dwf.Dwf()
print("Configure and start first analog out channel")
dwf_ao = dwf.DwfAnalogOut(hdwf)
dwf_ao.nodeEnableSet(0, dwf_ao.NODE.CARRIER, True)
print("1 = Sine wave")
dwf_ao.nodeFunctionSet(0, dwf_ao.NODE.CARRIER, dwf_ao.FUNC.SINE)
dwf_ao.nodeFrequencySet(0, dwf_ao.NODE.CARRIER, 3000.0)
print()
dwf_ao.configure(0, True)
print("Configure analog in")
dwf_ai = dwf.DwfAnalogIn(hdwf)
dwf_ai.frequencySet(1e6)
print("Set range for all channels")
dwf_ai.channelRangeSet(-1, 4.0)
dwf_ai.bufferSizeSet(1000)
print("Wait after first device opening the analog in offset to stabilize")
time.sleep(2)
print("Starting acquisition")
dwf_ai.configure(True, True)
print(" waiting to finish")
while True:
if dwf_ai.status(True) == dwf_ai.STATE.DONE:
break
time.sleep(0.1)
print(" done")
print(" reading data")
rg = dwf_ai.statusData(0, 1000)
hdwf.close()
dc = sum(rg) / len(rg)
print("DC: " + str(dc) + "V")
plt.plot(rg)
plt.show()
| mit |
schreiberx/sweet | benchmarks_sphere/paper_jrn_nla_rexi_linear/sph_rexi_linear_paper_gaussian_ts_comparison_earth_scale_cheyenne_performance/postprocessing_output_h_err_vs_simtime.py | 1 | 3189 | #! /usr/bin/env python3
import sys
import matplotlib.pyplot as plt
import re
from matplotlib.lines import Line2D
#
# First, use
# ./postprocessing.py > postprocessing_output.txt
# to generate the .txt file
#
fig, ax = plt.subplots(figsize=(10,7))
ax.set_xscale("log", nonposx='clip')
ax.set_yscale("log", nonposy='clip')
mode = 'simtime'
#mode = 'dt'
with open('postprocessing_output_h.txt') as f:
lines = f.readlines()
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
markers = []
for m in Line2D.markers:
try:
if len(m) == 1 and m != ' ' and m != '':
markers.append(m)
except TypeError:
pass
linestyles = ['-', '--', ':', '-.']
if len(sys.argv) > 1:
output_filename = sys.argv[1]
else:
output_filename = "./postprocessing_output_h_err_vs_"+mode+".pdf"
if len(sys.argv) > 2:
plot_set = sys.argv[2:]
else:
plot_set = []
def plot(x, y, marker, linestyle, label):
# plot values and prev_name
print(label)
#print(values_err)
#print(values_time)
#print("")
if len(x) == 0:
return
if len(plot_set) != 0:
if prev_name not in plot_set:
return
ax.plot(x, y, marker=marker, linestyle=linestyle, label=label)
prev_name = ''
values_err = []
values_time = []
c = 2
for l in lines:
if l[-1] == '\n':
l = l[0:-1]
d = l.split("\t")
if d[0] == 'Running tests for new group:':
plot(values_time, values_err, markers[c % len(markers)], linestyles[c % len(linestyles)], prev_name)
for i, txt in enumerate(values_time):
ax.annotate("%.1f" % txt, (values_time[i]*1.03, values_err[i]*1.03))
prev_name = d[0]
values_err = []
values_time = []
c = c+1
continue
if len(d) != 5:
continue
if d[0] == 'SIMNAME':
continue
prev_name = d[0]
prev_name = prev_name.replace('script_ln2_b100_g9.81_h10000_f7.2921e-05_p0_a6371220_u0.0_rob1_fsph0_tsm_', '')
prev_name = prev_name.replace('_M0128_MPI_space01_time128', '')
prev_name = prev_name.replace('_M0128_MPI_space01_time001', '')
prev_name = prev_name.replace('_prcircle_nrm0_hlf0_pre1_ext00', '')
prev_name = prev_name.replace('_tso2_tsob2_REXICI', '')
prev_name = prev_name.replace('_C0040', '')
prev_name = prev_name.replace('_C0080', '')
prev_name = prev_name.replace('_C0160', '')
prev_name = prev_name.replace('_C0320', '')
prev_name = prev_name.replace('_C0640', '')
prev_name = prev_name.replace('_C1280', '')
prev_name = prev_name.replace('_C2560', '')
prev_name = prev_name.replace('_mr10.0_mi30.0', '')
prev_name = prev_name.replace('_n0064_sx50.0_sy50.0', '')
prev_name = prev_name.replace('_n0064', '')
prev_name = prev_name.replace('_sx50.0_sy50.0', '')
prev_name = re.sub(r"_mu.*", "", prev_name)
prev_name = re.sub(r"0000", "", prev_name)
values_err.append(float(d[1]))
if mode == 'simtime':
#
# SIMTIME
#
values_time.append(float(d[4]))
plt.xlabel("simulation time")
elif mode == 'dt':
#
# DT
#
m = re.search('_C([0-9]*)', d[0])
dt = float(m.group(1))
values_time.append(dt)
plt.xlabel("Timestep size")
plt.ylabel("Error")
plot(values_time, values_err, markers[c % len(markers)], linestyles[c % len(linestyles)], prev_name)
plt.legend()
plt.savefig(output_filename)
#plt.show()
| mit |
BeckResearchLab/USP-inhibition | scripts/utils.py | 2 | 8547 | #!/usr/bin/env python
"""
Perform data manipulation tasks in project workflow
"""
import os
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import boto
import numpy as np
import pandas as pd
import boto.s3
from boto.s3.key import Key
from scipy.stats import randint as sp_randint
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
__author__ = "Pearl Philip"
__credits__ = "David Beck"
__license__ = "BSD 3-Clause License"
__maintainer__ = "Pearl Philip"
__email__ = "[email protected]"
__status__ = "Development"
def create_notation_dataframe(filename):
"""
Returning Pandas dataframe of sample ID and molecular notation.
:param filename: File object containing molecular notation indexed by sample ID
:return: Dataframe of molecular notation indexed by sample ID.
"""
df = []
for line in filename:
# Splits the line into it's key and molecular string
words = line.split()
z = [int(words[0]), words[1]]
df.append(z)
df = pd.DataFrame(df)
df.columns = ['CID', 'SMILES']
df.sort_values(by='CID', inplace=True)
return df
def create_activity_dataframe(dataframe):
"""
Performing useful transformations on the acquired data for use in subsequent algorithm.
:param dataframe: Dataframe downloaded from NCBI database.
:return: df: Cleaned and sorted dataframe.
"""
# Eliminates first five text rows of csv
for j in range(5):
df = dataframe.drop(j, axis=0)
df = df.drop(['PUBCHEM_ACTIVITY_URL', 'PUBCHEM_RESULT_TAG',
'PUBCHEM_ACTIVITY_SCORE', 'PUBCHEM_SID',
'PUBCHEM_ASSAYDATA_COMMENT', 'Potency',
'Efficacy', 'Analysis Comment',
'Curve_Description', 'Fit_LogAC50',
'Fit_HillSlope', 'Fit_R2', 'Fit_ZeroActivity',
'Fit_CurveClass', 'Excluded_Points', 'Compound QC',
'Max_Response', 'Phenotype', 'Activity at 0.457 uM',
'Activity at 2.290 uM', 'Activity at 11.40 uM',
'Activity at 57.10 uM', 'PUBCHEM_ACTIVITY_OUTCOME',
'Fit_InfiniteActivity'], axis=1)
df.rename(columns={'PUBCHEM_CID': 'CID'}, inplace=True)
# Eliminates duplicate compound rows
df['dupes'] = df.duplicated('CID')
df = df[df['dupes'] == 0].drop(['dupes'], axis=1)
df = df.sort_values(by='CID')
return df
def upload_to_s3(aws_access_key_id, aws_secret_access_key, file_to_s3, bucket, key, callback=None, md5=None,
reduced_redundancy=False, content_type=None):
"""
Uploads the given file to the AWS S3 bucket and key specified.
:param aws_access_key_id: First part of AWS access key.
:param aws_secret_access_key: Second part of AWS access key.
:param file_to_s3: File object to be uploaded.
:param bucket: S3 bucket name as string.
:param key: Name attribute of the file object to be uploaded.
:param callback: Function accepts two integer parameters, the first representing the number of bytes that have been
successfully transmitted to S3 and the second representing the size of the to be transmitted object. Returns
boolean indicating success/failure of upload.
:param md5: MD5 checksum value to verify the integrity of the object.
:param reduced_redundancy: S3 option that enables customers to reduce their costs
by storing noncritical, reproducible data at lower levels of redundancy than S3's standard storage.
:param content_type: Set the type of content in file object.
:return: Boolean indicating success of upload.
"""
try:
size = os.fstat(file_to_s3.fileno()).st_size
except:
# Not all file objects implement fileno(), so we fall back on this
file_to_s3.seek(0, os.SEEK_END)
size = file_to_s3.tell()
conn = boto.connect_s3(aws_access_key_id, aws_secret_access_key)
bucket = conn.get_bucket(bucket, validate=True)
k = Key(bucket)
k.key = key
if content_type:
k.set_metadata('Content-Type', content_type)
sent = k.set_contents_from_file(file_to_s3, cb=callback, md5=md5,
reduced_redundancy=reduced_redundancy, rewind=True)
# Rewind for later use
file_to_s3.seek(0)
if sent == size:
return True
return False
def join_dataframes():
"""
Joining the dataframes of existing descriptor files from their urls into a single dataframe.
:return: Dataframe after join over key column.
"""
url_list = ['https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_constitution.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_con.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_kappa.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_estate.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_basak.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_property.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_charge.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_moe.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_burden.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_geary.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_moran.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_topology.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_geometric.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_cpsa.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_rdf.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_morse.csv',
'https://s3-us-west-2.amazonaws.com/pphilip-usp-inhibition/data/df_whim.csv'
]
url_exist_list = []
for url in url_list:
try:
r = urllib2.urlopen(url)
except urllib2.URLError as e:
r = e
if r.code < 400:
url_exist_list.append(url)
i = 0
df = [0] * len(url_exist_list)
for url in url_exist_list:
df[i] = pd.read_csv(url)
df[i].drop(df[i].columns[0], axis=1, inplace=True)
df[i].reset_index(drop=True, inplace=True)
i += 1
joined_df = df[0]
for i in df[1:]:
joined_df = joined_df.join(i)
return joined_df
def choose_features(x_train, y_train, x_test, column_names):
"""
Selecting the features of high importance to reduce feature space.
:param x_train: Training set of features.
:param x_test: Test set of features.
:param y_train: Training target values
:param column_names: Names of columns in x
"""
# Random forest feature importance
clf = RandomForestRegressor(n_jobs=-1, random_state=1, n_estimators=10)
clf.fit(x_train, y_train.ravel())
feature_importance = clf.feature_importances_
scores_table = pd.DataFrame({'feature': column_names, 'scores':
feature_importance}).sort_values(by=['scores'], ascending=False)
scores = scores_table['scores'].tolist()
n_features = [25, 50, 75, 100, 150, 200, 250, 300]
for n in n_features:
feature_scores = scores_table['feature'].tolist()
selected_features = feature_scores[:n]
x_train = pd.DataFrame(x_train, columns=column_names)
desired_x_train = x_train[selected_features]
x_test = pd.DataFrame(x_test, columns=column_names)
desired_x_test = x_test[selected_features]
desired_x_train.to_csv('../data/x_train_postprocessing_rfr_%d.csv' % n)
desired_x_test.to_csv('../data/x_test_postprocessing_rfr_%d.csv' % n)
pd.DataFrame(scores).to_csv('../data/feature_scores_rfr.csv')
return
def change_nan_infinite(dataframe):
"""
Replacing NaN and infinite values from the dataframe with zeros.
:param dataframe: Dataframe containing NaN and infinite values.
:return data: Data with no NaN or infinite values.
"""
dataframe.replace([np.inf, -np.inf], np.nan, inplace=True)
data = dataframe.fillna(0)
return data
| bsd-3-clause |
danmoser/pyhdust | pyhdust/spectools.py | 1 | 130208 | # -*- coding:utf-8 -*-
"""PyHdust *spectools* module: spectroscopy tools
Algumas definicoes: para todas as rotinas funcionarem, todos os espectros devem
estar agrupados num mesmo caminho (`path`), em estrutura de
noite/estrelas/espec.
Neste momento, as rotinas somente leem arquivos `*.cal.fits`. Para receber este
sufixo `.cal`, algumas informacoes no header sao necessarias:
* 'MJD-OBS' ou 'MJD' ou 'JD' ou 'DATE-OBS'
* 'CRVAL1' + 'CDELT1'
IMPORTANT NOTE: after the version 0.981, the "analline" function returns
FWHM instead of `depthcent`.
:license: GNU GPL v3.0 https://github.com/danmoser/pyhdust/blob/master/LICENSE
"""
from __future__ import print_function
import os as _os
import numpy as _np
import datetime as _dt
# import time as _time
from glob import glob as _glob
# from itertools import product as _iproduct
import pyhdust.phc as _phc
import pyhdust.jdcal as _jdcal
import pyhdust.input as _inp
import pyhdust.stats as _stt
import pyhdust as _hdt
from six import string_types as _strtypes
from shutil import copyfile as _copyfile
import warnings as _warn
import requests as _requests
# from lmfit import Model as _Model
try:
import astropy.io.fits as _pyfits
# import astropy.coordinates.sky_coordinate.SkyCoord as _SkyCoord
import matplotlib as _mpl
import matplotlib.pyplot as _plt
import matplotlib.patches as _mpatches
from matplotlib.ticker import MaxNLocator as _MaxNLocator
import matplotlib.gridspec as _gridspec
import scipy.interpolate as _interpolate
from scipy.optimize import curve_fit as _curve_fit
from scipy.stats import percentileofscore as _pos
from astropy.modeling import models as _models
from astropy.modeling import fitting as _fitting
import pandas as _pd
import wget as _wget
import xmltodict as _xmltodict
except ImportError:
_warn.warn('matplotlib and/or scipy and/or astropy and/or pandas module not installed!!!')
#try:
# import pyqt_fit.nonparam_regression as _smooth
# from pyqt_fit import npr_methods as _npr_methods
#except ImportError:
# _warn.warn('pyqt_fit module not installed!!!')
__author__ = "Daniel Moser"
__email__ = "[email protected]"
_outfold = ''
class Spec(object):
"""Definicao de classe espectro para conter toda a informacao util
para plots e analises.
EW in km/s
Para configurar uma ou mais linhas:
>>> spdtb = Spec()
>>> spdtb.lbc == 0
>>> #significa que vetor wl eh vetor velocidades, e nao comprimento de
>>> # onda.
>>> spdtb.lbc = 6564.
>>> spdtb2 = Spec()
>>> spdtb2.lbc = 4863.
Como usar (hard way):
>>> spdtb = Spec()
>>> #read spec `flux` and `wl` for a given `lbc`
>>> (spdtb.EW, spdtb.EC, spdtb.VR, spdtb.peaksep, spdtb.depthcent,\\
>>> spdtb.F0) = analline(wl, flux, lbc)
>>> spdtb.MJD = 1
>>> spdtb.file = file
And then:
>>> #to record it to the database:
>>> spdtb.addspec()
Para carregar uma tabela anterior, faca:
>>> spdtb = Spec()
>>> #(...) read new specs and then join with previous ones
>>> spdtb.data = _np.vstack((spdtb.data, _np.loadtxt('hdt/datafile.txt')))
>>> spdtb.metadata = _np.vstack(( spdtb.metadata, \\
>>> _np.loadtxt('hdt/metafile.txt') ))
>>> spdtb.updatecount() #to update the counter
Ou simplesmente (nome de arquivos default):
>>> spdtb.loaddata()
"""
def __init__(self, wl=None, flux=None, lbc=None, hwidth=1000., EW=_np.NaN,
EC=_np.NaN, VR=_np.NaN, peaksep=_np.NaN, depthcent=_np.NaN, F0=_np.NaN,
dateobs='', MJD=0., datereduc='', file='', gaussfit=False):
self.wl = wl
self.flux = flux
self.lbc = lbc
self.hwidth = hwidth
self.EW = EW
self.EC = EC
self.VR = VR
self.peaksep = peaksep
self.depthcent = depthcent
self.F0 = F0
self.file = file
self.datereduc = datereduc
self.dateobs = dateobs
self.MJD = MJD
self.count = 0
self.data = _np.empty(0)
self.metadata = _np.empty(0)
self.gaussfit = gaussfit
def reset(self):
"""Reset the class parameters
"""
self.wl = None
self.flux = None
self.EW = _np.NaN
self.EC = _np.NaN
self.VR = _np.NaN
self.peaksep = _np.NaN
self.depthcent = _np.NaN
self.F0 = _np.NaN
self.file = ''
self.datereduc = ''
self.dateobs = ''
self.MJD = 0.
def clear(self):
"""Clear the class parameters
"""
self.__init__()
def addspec(self):
"""Record the class parameters into the database
"""
self.count += 1
if self.count == 1:
self.data = _np.array( self.lastinfo() )
self.metadata = _np.array( self.lastmeta() )
else:
self.data = _np.vstack(( self.data, self.lastinfo() ))
self.metadata = _np.vstack(( self.metadata, self.lastmeta() ))
# if self.flux != None and self.wl != None and self.lbc != None:
# self.savespec()
def lastinfo(self):
"""Print the current class parameters (last spec)
"""
return self.MJD, self.EW, self.EC, self.VR, self.peaksep, \
self.depthcent, self.F0
def lastmeta(self):
"""Print the current class parameters (last spec)
"""
return self.MJD, self.dateobs, self.datereduc, self.file
def savedata(self, datafile=_outfold + '/datafile.txt',
metafile=_outfold + '/metafile.txt'):
"""Save current table
"""
header = ['MJD', 'EW', 'EC', 'VR', 'peaksep', 'depthcent', 'F0']
_np.savetxt(datafile, self.data, fmt='%12.6f',
header=(len(header) * '{:>12s}').format(*header))
_np.savetxt(metafile, self.metadata, fmt='%s', delimiter=',')
return
def loaddata(self, datafile=_outfold + '/datafile.txt',
metafile=_outfold + '/metafile.txt'):
"""Function to load a previous table
Usage:
>>> spdtb = Spec()
>>> spdtb.loaddata()
"""
self.data = _np.loadtxt(datafile)
if _os.path.exists(metafile):
self.metadata = _np.genfromtxt(metafile, dtype='str',
delimiter=',')
self.updatecount()
return
def updatecount(self, num=0):
if num > 0:
self.count = num
else:
self.count = len(self.data)
return
def loadspec(self, file):
"""Load a fits file (parameters `wl`, `flux`, `MJD`, `dateobs`,
`datareduc` and `file`).
Currently, only compatible for standard fits.
"""
if file.find('.fit') == -1:
_warn.warn("# ERROR! `loadspec` unrecognized format!")
return
(self.wl, self.flux, self.MJD, self.dateobs, self.datereduc,
self.file) = loadfits(file)
(self.EW, self.EC, self.VR, self.peaksep, self.depthcent, self.F0) = \
analline(self.wl, self.flux, self.lbc, hwidth=self.hwidth,
verb=False, gaussfit=self.gaussfit)
return
def plotspec(self, outname=''):
"""Export current spec into a PNG file.
"""
if self.wl is None or self.flux is None:
_warn.warn('wrong Spec() parameters! {0}'.format(self.file))
return
if outname == '':
path, file = _phc.trimpathname(self.file)
outname = _phc.rmext(file)
# Normalization:
flux = linfit(self.wl, self.flux)
wl = self.wl
fig = _plt.figure()
ax = fig.add_subplot(111)
ax.plot(wl, flux)
ax.set_ylabel('norm. flux')
ax.set_xlabel('wavelength (arb. units)')
ax.set_title(outname)
_plt.savefig('{0}/{1:.2f}_{2}.png'.format(_outfold, self.MJD, outname))
if self.lbc > 0:
vels = (self.wl - self.lbc) / self.lbc * _phc.c.cgs * 1e-5
idx = _np.where(_np.abs(vels) <= self.hwidth)
flux = linfit(vels[idx], flux[idx])
vels = vels[idx]
_plt.clf()
ax = fig.add_subplot(111)
ax.plot(vels, flux)
ax.set_ylabel('norm. flux')
ax.set_xlabel('vel. (km/s)')
ax.set_title('{0:.2f} {1} {2:.2f}'.format(self.MJD, outname,
self.lbc))
_plt.savefig('{0}/{1:.2f}_{2}_{3:.2f}.png'.format(_outfold,
self.MJD, outname, self.lbc))
_plt.close()
return
def shiftfits(fitsfile, newsh=None, verbose=False):
""" Update FITS spec header for a given shift value. """
imfits = _pyfits.open(fitsfile, mode='update')
if 'WLSHIFT' in imfits[0].header:
if verbose:
print('# WLSHIFT = {0} for {1}'.format(imfits[0].header['WLSHIFT'],
_phc.trimpathname(fitsfile)[1]))
else:
if verbose:
print('# No WLSHIFT available for {0}'.format(
_phc.trimpathname(fitsfile)[1]))
if newsh is None:
newsh = _phc.user_input('Type the new shift: ')
if newsh != '':
imfits[0].header['WLSHIFT'] = float(newsh)
imfits.close()
return
def checkshiftfits(fitslist, lbc=6562.8):
""" Do *shiftfits* sistematically
INPUT: list of files
OUTPUT: fits files header updated with WLSHIFT.
"""
fig, ax = _plt.subplots()
for f in fitslist:
data = loadfits(f)
vel, flx = lineProf(data[0], data[1], lbc=lbc)
good = False
imfits = _pyfits.open(f)
if 'WLSHIFT' in imfits[0].header:
shift0 = float(imfits[0].header['WLSHIFT'])
else:
shift0 = 0.
shift = 0
while not good:
ax.plot([0, 0], [0.7, 1.2], ls='--', color='gray')
veli = vel + shift*3e5/lbc
ax.plot(veli, flx)
_plt.show()
_plt.draw()
ri = _phc.user_input('\n# Is it good?(y/other): ')
if ri != 'y':
try:
shift = float(_phc.user_input('Type shift: '))
except ValueError:
shift = 0.
else:
good = True
ax.cla()
if shift != 0:
shiftfits(f, newsh=shift+shift0)
_plt.close(fig)
return
def loadfits(fitsfile):
"""load FITS spec
Out: wl, flux, MJD, dateobs, datereduc, fitsfile
"""
imfits = _pyfits.open(fitsfile)
flux = imfits[0].data
wl = _np.arange(len(flux)) * imfits[0].header['CDELT1'] +\
imfits[0].header['CRVAL1']
(MJD, dateobs, datereduc) = (0., '', '')
dtinfo = False
if not dtinfo and 'MJD-OBS' in imfits[0].header:
MJD = float(imfits[0].header['MJD-OBS'])
dtinfo = True
if not dtinfo and 'MJD' in imfits[0].header:
MJD = float(imfits[0].header['MJD'])
dtinfo = True
if not dtinfo and 'JD' in imfits[0].header:
if isinstance(imfits[0].header['JD'], _strtypes):
if len(imfits[0].header['JD']) > 0:
MJD = float(imfits[0].header['JD']) - 2400000.5
dtinfo = True
else:
MJD = imfits[0].header['JD'] - 2400000.5
dtinfo = True
if not dtinfo and 'DATE-OBS' in imfits[0].header:
if len(imfits[0].header['DATE-OBS']) > 0:
dtobs = imfits[0].header['DATE-OBS']
dtobs, tobs = check_dtobs(dtobs)
MJD = _jdcal.gcal2jd(*dtobs)[1] + tobs
dtinfo = True
if not dtinfo and 'FRAME' in imfits[0].header:
dtobs = imfits[0].header['FRAME']
dtobs, tobs = check_dtobs(dtobs)
MJD = _jdcal.gcal2jd(*dtobs)[1] + tobs
dtinfo = True
if not dtinfo:
MJD = _jdcal.MJD_JD2000
_warn.warn('No DATE-OBS information is available! {0}\nAssuming '
'MJD_JD2000'.format(fitsfile))
if 'DATE-OBS' in imfits[0].header:
dateobs = imfits[0].header['DATE-OBS']
elif 'FRAME' in imfits[0].header:
dateobs = imfits[0].header['FRAME']
if 'IRAF-TLM' in imfits[0].header:
datereduc = imfits[0].header['IRAF-TLM']
elif 'DATE' in imfits[0].header:
datereduc = imfits[0].header['DATE']
if 'WLSHIFT' in imfits[0].header:
shift = float(imfits[0].header['WLSHIFT'])
wl += shift
imfits.close()
return wl, flux, MJD, dateobs, datereduc, fitsfile
def vac2air(wl):
"""The IAU standard for conversion from air to vacuum wavelengths is given
in Morton (1991, ApJS, 77, 119). For vacuum wavelengths (VAC) in Angstroms,
convert to air wavelength (AIR) via:
AIR = VAC / (1.0 + 2.735182E-4 + 131.4182 / VAC^2 + 2.76249E8 / VAC^4 )
"""
return wl / (1.0 + 2.735182E-4 + 131.4182 / wl**2 + 2.76249E8 / wl**4 )
def air2vac(wl):
"""The IAU standard for conversion from air to vacuum wavelengths is given
in Morton (1991, ApJS, 77, 119). For vacuum wavelengths (VAC) in Angstroms,
convert to air wavelength (AIR) via:
AIR = VAC / (1.0 + 2.735182E-4 + 131.4182 / VAC^2 + 2.76249E8 / VAC^4 )
Fitting the inverse curve:
VAC = AIR / (1.0 - 2.73443407E-4 - 1.31275255E2 / AIR^2 - 2.75708212E8 /
AIR^4 )
"""
return wl / (1.0 - 2.73443407e-04 - 1.31275255e+02 / wl**2 -
2.75708212e+08 / wl**4)
def vel2wl(vel, lbc):
""" Vel. to wavelength. Vel must be in km/s and output is in `lbc` units.
"""
wl = (vel / _phc.c.cgs * 1e5 + 1) * lbc
return wl
def wl2vel(wl, lbc):
""" Wavelength to vel., in km/s. `wl` and `lbc` units must be the same. """
vels = (wl - lbc) / lbc * _phc.c.cgs * 1e-5
return vels
def hydrogenlinewl(ni, nf):
"""Generate H line transitions wavelengths in meters for VACUUM
Rydberg constant `R` was manually adjusted to fit Halpha and Hbeta lines.
"""
return (10967850. * (1. / nf**2 - 1. / ni**2))**-1.
def calcres_R(hwidth=1350, nbins=108):
"""
(h)Width in km/s.
*WARNING*: `width` in HDUST input is only half.
To HDUST effective R, multiple the input width by 2 he_re.
# R = lbd/Dlbd = _phc.c/Dv = _phc.c*nbins/width
# nbins = R*width/_phc.c
"""
return round(_phc.c.cgs * nbins / hwidth / 1e5)
def calcres_nbins(R=12000, hwidth=1350):
"""
(h)Width in km/s.
*WARNING*: `width` in HDUST input is only half.
To HDUST effective R, multiple the input width by 2 he_re.
# R = lbd/Dlbd = _phc.c/Dv = _phc.c*nbins/width
# nbins = R*width/_phc.c
"""
return round(R * hwidth * 1e5 / _phc.c.cgs)
def lineProf(x, flx, lbc, flxerr=_np.empty(0), hwidth=1000., ssize=0.05):
'''
lineProf() - retorna um array (flx) normalizado e um array x em
VELOCIDADES. `lbc` deve fornecido em mesma unidade de x para conversão
lambda -> vel. Se vetor x jah esta em vel., usar funcao linfit().
x eh importante, pois y pode ser nao igualmente amostrado.
x e y devem estar em ordem crescente.
ssize = % do tamanho de y; numero de pontos usados nas extremidades
para a media do contínuo. 'ssize' de .5 à 0 (exclusive).
OUTPUT: vel (array), flx (array)
'''
x = (x - lbc) / lbc * _phc.c.cgs * 1e-5 # km/s
idx = _np.where(_np.abs(x) <= 1.001 * hwidth)
if len(flxerr) == 0:
flux = linfit(x[idx], flx[idx], ssize=ssize) # yerr=flxerr,
if len(x[idx]) == 0:
_warn.warn('Wrong `lbc` in the lineProf function')
return x[idx], flux
else:
flux, flxerr = linfit(x[idx], flx[idx], yerr=flxerr[idx], ssize=ssize)
if len(x[idx]) == 0:
_warn.warn('Wrong `lbc` in the lineProf function')
return x[idx], flux, flxerr
def linfit(x, y, ssize=0.05, yerr=_np.empty(0)):
r'''
linfit() - retorna um array (y) normalizado, em posicoes de x
x eh importante, pois y pode ser nao igualmente amostrado.
x e y devem estar em ordem crescente.
ssize = % do tamanho de y; numero de pontos usados nas extremidades
para a media do contínuo. 'ssize' de .5 à 0 (exclusive).
OUTPUT: y, yerr (if given)
.. code:: python
#Example:
import numpy as np
import matplotlib.pyplot as plt
import pyhdust.phc as phc
import pyhdust.spectools as spt
wv = np.linspace(6500, 6600, 101)
flx = (np.arange(101)[::-1])/100.+1+phc.normgauss(4, x=wv,
xc=6562.79)*5
plt.plot(wv, flx)
normflx = spt.linfit(wv, flx)
plt.plot(wv, normflx, ls='--')
plt.xlabel(r'$\lambda$ ($\AA$)')
plt.ylabel('Flux (arb. unit)')
.. image:: _static/spt_linfit.png
:align: center
:width: 500
'''
ny = _np.array(y)[:]
if ssize < 0 or ssize > .5:
_warn.warn('Invalid ssize value...', stacklevel=2)
ssize = 0
ssize = int(ssize * len(y))
if ssize == 0:
ssize = 1
medx0, medx1 = _np.average(x[:ssize]), _np.average(x[-ssize:])
if ssize > 9:
medy0, medy1 = _np.median(ny[:ssize]), _np.median(ny[-ssize:])
else:
medy0, medy1 = _np.average(ny[:ssize]), _np.average(ny[-ssize:])
new_y = medy0 + (medy1 - medy0) * (x - medx0) / (medx1 - medx0)
idx = _np.where(new_y != 0)
ny[idx] = ny[idx] / new_y[idx]
if len(yerr) == 0.:
return ny
else:
yerr = yerr / _np.average(new_y)
return ny, yerr
def EWcalc(vels, flux, vw=1000):
"""
Supoe que o fluxo jah estah normalizado, e vetores ordenados.
Devolve o valor EW.
"""
idx = _np.where(_np.abs(vels) <= vw)
outvels = vels[idx]
normflux = flux[idx]
ew = 0.
if len(outvels) < 3:
# normflux = _np.ones(len(outvels))
return ew
for i in range(len(outvels) - 1):
dl = outvels[i + 1] - outvels[i]
# print(dl)
ew += (1. - (normflux[i + 1] + normflux[i]) / 2.) * dl
return ew
def absLineCalc(vels, flux, vw=1000, ssize=0.05):
r"""
Calculate the line flux (input velocity vector). The `flux` is
NON-normalized.
``ssize`` parameter controns the size of flux that will be evaluated at the
extreme of the input flux array to determine the continuum level.
``vels = (wv - lbc) / lbc * phc.c.cgs * 1e-5 # km/s``
Output in the same flux units times :math:`\Delta v` (both flux and *v*
input units).
"""
idx = _np.where(_np.abs(vels) <= vw)
vels = vels[idx]
flux = flux[idx]
if ssize < 0 or ssize > .5:
_warn.warn('Invalid ssize value...', stacklevel=2)
ssize = 0
ssize = int(ssize * len(flux))
if ssize == 0:
ssize = 1
medx0, medx1 = _np.average(vels[:ssize]), _np.average(vels[-ssize:])
if ssize > 9:
medy0, medy1 = _np.median(flux[:ssize]), _np.median(flux[-ssize:])
else:
medy0, medy1 = _np.average(flux[:ssize]), _np.average(flux[-ssize:])
new_y = medy0 + (medy1 - medy0) * (vels - medx0) / (medx1 - medx0)
base = _np.trapz(new_y, vels)
line = _np.trapz(flux, vels)
return line - base
def gauss_fit(x, y, a0=None, x0=None, sig0=None, emission=True, ssize=0.05):
""" Return the area of a fitting Gaussian.
"""
if ssize <= 0 or ssize >= .5:
_warn.warn('Invalid ssize value...', stacklevel=2)
ssize = 0
ssize = int(ssize * len(y))
if ssize == 0:
ssize = 1
medx0, medx1 = _np.average(x[:ssize]), _np.average(x[-ssize:])
if ssize > 6:
medy0, medy1 = _np.median(y[:ssize]), _np.median(y[-ssize:])
else:
medy0, medy1 = _np.average(y[:ssize]), _np.average(y[-ssize:])
new_y = medy0 + (medy1 - medy0) * (x - medx0) / (medx1 - medx0)
q = 95
func = _np.max
if not emission:
func = _np.min
q = 5
if a0 is None:
a0 = _np.abs(_np.percentile(y-new_y, q)) - _np.median(y-new_y)
if x0 is None:
x0 = x[_np.where(y-new_y == func(y-new_y))]
if sig0 is None:
sig0 = (_np.max(x)-_np.min(x))/10.
g_init = _models.Gaussian1D(amplitude=a0, mean=x0, stddev=sig0)
g_init.bounds['amplitude'] = (0, 2*a0)
# g_init.verblevel = 0
fit_g = _fitting.LevMarLSQFitter()
# print(a0, x0, sig0, _np.shape(a0), _np.shape(x0), _np.shape(sig0),
# _np.shape(x), _np.shape(y))
g = fit_g(g_init, x, y-new_y)
# print(g.parameters[0])
return g, new_y
def absLineDeb(wv, flux, lb0, lb1, vw=1000, ssize=0.05, a0=None, sig0=None,
allout=False):
""" Return the area of a fitting Gaussian with debblending.
"""
lbc = _np.average([lb0, lb1])
vels = (wv - lbc) / lbc * _phc.c.cgs * 1e-5
idx = _np.where(_np.abs(vels) <= vw*(1+ssize))
x = wv[idx]
y = flux[idx]
if ssize <= 0 or ssize >= .5:
_warn.warn('Invalid ssize value...', stacklevel=2)
ssize = 0
ssize = int(ssize * len(y))
if ssize == 0:
ssize = 1
nfxsig = _np.std(y)
emission = True
if _np.percentile(y, 5) + nfxsig < 1:
emission = False
if _np.percentile(y, 95) - 1.5*nfxsig > 1:
emission = True
medx0, medx1 = _np.average(x[:ssize]), _np.average(x[-ssize:])
if ssize > 6:
medy0, medy1 = _np.median(y[:ssize]), _np.median(y[-ssize:])
else:
medy0, medy1 = _np.average(y[:ssize]), _np.average(y[-ssize:])
new_y = medy0 + (medy1 - medy0) * (x - medx0) / (medx1 - medx0)
q = 95
if not emission:
q = 5
if a0 is None:
a0 = _np.abs(_np.percentile(y-new_y, q)) - _np.median(y-new_y)
if sig0 is None:
sig0 = (_np.max(x)-_np.min(x))/10.
g1 = _models.Gaussian1D(a0, lb0, sig0)
g1.bounds['amplitude'] = (0, 2*a0)
g1.bounds['mean'] = (lb0*0.9, lb1*0.99)
g2 = _models.Gaussian1D(a0, lb1, sig0)
g1.bounds['amplitude'] = (0, a0)
g2.bounds['mean'] = (lb0*1.01, lb1*1.1)
gg_init = ( g1 + g2 )
# gg_init.verblevel = 0
fitter = _fitting.SLSQPLSQFitter()
gg = fitter(gg_init, x, y-new_y, verblevel=0)
# print(gg.parameters[0], gg.parameters[0+3])
if not allout:
return ( gg.parameters[0]*gg.parameters[2]*_np.sqrt(2*_np.pi),
gg.parameters[0+3]*gg.parameters[2+3]*_np.sqrt(2*_np.pi) )
else:
return gg, new_y, idx
def absLineCalcWave(wv, flux, lbc, vw=1000, ssize=0.05, gauss=False,
allout=False, spcas=None):
r"""
Calculate the line flux (input velocity vector). The `flux` is
NON-normalized.
``ssize`` parameter controns the size of flux that will be evaluated at the
extreme of the input flux array to determine the continuum level.
``vels = (wv - lbc) / lbc * phc.c.cgs * 1e-5 # km/s``
Output in the same flux units times :math:`\Delta v` (both flux and *v*
input units).
"""
vels = (wv - lbc) / lbc * _phc.c.cgs * 1e-5
idx = _np.where(_np.abs(vels) <= vw)
wv = wv[idx]
flux = flux[idx]
if not gauss:
if ssize <= 0 or ssize >= .5:
_warn.warn('Invalid ssize value...', stacklevel=2)
ssize = 0
ssize = int(ssize * len(flux))
if ssize == 0:
ssize = 1
medx0, medx1 = _np.average(wv[:ssize]), _np.average(wv[-ssize:])
if ssize > 6:
medy0, medy1 = _np.median(flux[:ssize]), _np.median(flux[-ssize:])
else:
medy0, medy1 = ( _np.average(flux[:ssize]),
_np.average(flux[-ssize:]) )
new_y = medy0 + (medy1 - medy0) * (wv - medx0) / (medx1 - medx0)
if spcas is not None:
if spcas == 0:
idx = _np.where(wv > 25.95*1e4)
flux[idx] = new_y[idx]
elif spcas == 1:
idx = _np.where(wv < 25.95*1e4)
# print(len(idx[0]))
flux[idx] = new_y[idx]
base = _np.trapz(new_y, wv)
line = _np.trapz(flux, wv)
if not allout:
return line - base
else:
return line, base, idx
else:
# nflx = linfit(wv, flux)
nflx = flux
nfxsig = _np.std(nflx)
emission = True
if _np.percentile(nflx, 5) + nfxsig < 1:
emission = False
if _np.percentile(nflx, 95) - 1.5*nfxsig > 1:
emission = True
g, newy = gauss_fit(wv, nflx, emission=emission, ssize=ssize)
if not allout:
return g.parameters[0]*g.parameters[2]*_np.sqrt(2*_np.pi)
else:
return g, newy, idx
def ECcalc(vels, flux, ssize=.05, gaussfit=False, doublegf=True):
"""
Supoe que o fluxo jah estah normalizado, e vetores ordenados.
If `gaussfit=False`, the single maximum value is taken.
If `gaussfit=True`, then a single (`doublegf=False`) or a double
(`doublegf=True`) Gaussian fit is performed over the line profile to
determine its maximum.
Calcula o topo da emissao da linha, e retorna em que velocidade ela
ocorre.
"""
vels = _np.array(vels)
flux = _np.array(flux)
# if lncore > 0:
# idx = _np.where(_np.abs(vels) < lncore)
# vels = vels[idx]
# flux = flux[idx]
if len(flux) < 5:
return _np.NaN, 0.
if not gaussfit:
idx = _np.where(_np.max(flux) == flux)
if flux[idx][0] < 1:
return _np.NaN, 0.
if len(idx[0]) > 1:
idx = idx[0][0]
return flux[idx][0], vels[idx][0]
else:
# check if there is a peak
ssize = int(ssize * len(vels))
if ssize == 0:
ssize = 1
contmax = _np.max(_np.append(flux[:ssize], flux[-ssize:]))
fluxmax = _np.max(flux)
if fluxmax < 1.01 * contmax:
return _np.NaN, 0.
# Define model function to be used to fit to the data above
def gauss(x, *p):
A, mu, sigma = p
return A * _np.exp(-(x - mu)**2 / (2. * sigma**2)) + 1
#
ivc = _np.abs(vels - 0).argmin()
if doublegf:
i0 = _np.abs(flux[:ivc] - _np.max(flux[:ivc])).argmin()
i1 = _np.abs(flux[ivc:] - _np.max(flux[ivc:])).argmin() + ivc
try:
p0 = [1., vels[i0], 40.]
coeff0, tmp = _curve_fit(gauss, vels[:ivc], flux[:ivc], p0=p0)
p1 = [1., vels[i1], 40.]
coeff1, tmp = _curve_fit(gauss, vels[ivc:], flux[ivc:], p0=p1)
ECs = _np.array([coeff0[0] + 1., coeff1[0] + 1.])
EC = _np.max(ECs)
idx = _np.where(ECs == EC)[0]
# vel = _np.abs(coeff0[1] / 2) + _np.abs(coeff1[1] / 2)
if idx == 0:
vel = coeff0[1]
else:
vel = coeff1[1]
return EC, vel
except ValueError:
return _np.NaN, 0.
else:
try:
p0 = [1., 0, 40.]
coeff0, tmp = _curve_fit(gauss, vels, flux, p0=p0)
EC = coeff0[0] + 1.
return EC, coeff0[1]
except ValueError:
return _np.NaN, 0.
def VRcalc(vels, flux, vw=1000, gaussfit=False, ssize=0.05):
"""
Calcula o PICO para os dois lados (azul/vermelho) da linha, ajustando
a velocidade de repouso (TBD).
"""
# calcula e aplica correcao de vel. repousp
vc = 0.
vels += vc
# faz o teste de tamanho
if len(vels) < 5:
vw = 0
ew0, ew1 = (_np.NaN, _np.NaN)
return ew0, ew1, vc
# corta em vw
idx = _np.where(_np.abs(vels) <= vw)
outvels = vels[idx]
normflux = flux[idx]
#
ivc = _np.abs(outvels - 0).argmin()
if not gaussfit:
V = _np.max(normflux[:ivc])
R = _np.max(normflux[ivc:])
else:
# check if there is a peak
ssize = int(ssize * len(vels))
if ssize == 0:
ssize = 1
contmax = _np.max(_np.append(flux[:ssize], flux[-ssize:]))
fluxmax = _np.max(flux)
if fluxmax < 1.01 * contmax:
# print('# Bad profile!')
return 0, 0, vc
# Define model function to be used to fit to the data above
def gauss(x, *p):
A, mu, sigma = p
return A * _np.exp(-(x - mu)**2 / (2. * sigma**2)) + 1.
#
ivc = _np.abs(vels - 0).argmin()
i0 = _np.abs(flux[:ivc] - _np.max(flux[:ivc])).argmin()
i1 = _np.abs(flux[ivc:] - _np.max(flux[ivc:])).argmin() + ivc
try:
p0 = [1., vels[i0], 40.]
coeff0, tmp = _curve_fit(gauss, vels[:ivc], flux[:ivc], p0=p0)
p1 = [1., vels[i1], 40.]
coeff1, tmp = _curve_fit(gauss, vels[ivc:], flux[ivc:], p0=p1)
V = coeff0[0] + 1.
R = coeff1[0] + 1.
except ValueError:
return 1., 1., vc
return V, R, vc
def PScalc(vels, flux, vc=0., ssize=.05, gaussfit=False):
"""
Calcula peak_separation
`doublegaussfit` = True, do it before and after zero velocity. False, use
maximum (default).
"""
# check if there is a peak
ssize = int(ssize * len(vels))
if ssize == 0:
ssize = 1
contmax = _np.max(_np.append(flux[:ssize], flux[-ssize:]))
fluxmax = _np.max(flux)
if fluxmax < 1.01 * contmax:
return _np.NaN, _np.NaN
vels += vc
ivc = _np.abs(vels - 0).argmin()
i0 = _np.abs(flux[:ivc] - _np.max(flux[:ivc])).argmin()
i1 = _np.abs(flux[ivc:] - _np.max(flux[ivc:])).argmin() + ivc
if not gaussfit:
return vels[i0], vels[i1]
else:
# Define model function to be used to fit to the data above
def gauss(x, *p):
A, mu, sigma = p
return A * _np.exp(-(x - mu)**2 / (2. * sigma**2)) + 1.
#
try:
p0 = [1., vels[i0], 20.]
coeff0, tmp = _curve_fit(gauss, vels[:ivc], flux[:ivc], p0=p0)
p1 = [1., vels[i1], 20.]
coeff1, tmp = _curve_fit(gauss, vels[ivc:], flux[ivc:], p0=p1)
return coeff0[1], coeff1[1]
except ValueError:
print('# PScalc error...')
# print vels[i0], flux[i0], vels[i1], flux[i1]
return 0, 0
def FWHM(vels, flux, halfmax, vmax=350., flxincr=.01):
""" Calc. FWHM (Full-Width at Half Maximum) based on the value of the
Half Maximum
TODO: Gaussfit"""
if len(vels) < 5 or len(flux) < 5:
_warn.warn('# No valid line profile for FHWM')
return _np.NaN
vels = _np.array(vels)
flux = _np.array(flux)
# remove vels bigger than maxvel
idx = _np.where(_np.abs(vels) < vmax)
vels = vels[idx]
flux = flux[idx]
difflx = _np.abs(flux - halfmax)
# remove diff bigger than hmf*halfmax
i = 0
idx = _np.where(difflx < halfmax * flxincr*i)
while len(vels[idx]) < 2:
i += 1
idx = _np.where(difflx < halfmax * flxincr*i)
vels = vels[idx]
difflx = difflx[idx]
#
# difvels: ordered vels based on the flux difference
# idx = _np.argsort(difflx)
# difvels = vels[idx][:4]
#
# difvels: ordered vels closest to the 0 vel.
idx = _np.argsort(_np.abs(vels))
difvels = vels[idx][:2]
return _np.sum(_np.abs(difvels))
def DCcalc(vels, flux, vmax=None, vc=0., ssize=0.05):
"""
Calculo, na presenca de emissao, da profundidade do reverso central.
Se fluxo máximo < 1.01*contínuo, retorna
TODO: gauss fit
Return flux at `vmax` (maximum considered velocity), and flux at `v0`.
Depth of the central reversal is `flux[ivmax] - flux[ivc]`.
"""
if len(flux) < 5:
return _np.NaN, _np.NaN
vels += vc
ivc = _np.abs(vels - 0).argmin()
# check if there is a peak
ssize = int(ssize * len(vels))
if ssize == 0:
ssize = 1
contmax = _np.max(_np.append(flux[:ssize], flux[-ssize:]))
fluxmax = _np.max(flux)
if fluxmax < 1.01 * contmax:
return flux[ivc], flux[ivc]
# if a vmax is not given...
if not isinstance(vmax, (int, long, float)):
vmax = _np.abs(flux - _np.max(flux)).argmin()
vmax = vels[vmax]
ivmax = _np.abs(vels - vmax).argmin()
return flux[ivmax], flux[ivc]
def analline(lbd, flux, lbdc, hwidth=1000, verb=True, gaussfit=False,
doublegf=True):
"""
Return the analysis of a line.
Both lbd and flux need to be ordered (a normalization IS FORCED).
lbd,lbdc must have the same unit, and width in km/s is required.
The line will be cutted so that the total DeltaLambda will be 2*width
if `lbdc` <= 0, lbd array is assumed to be a velocity array (in km/s)!
| EXAMPLE: Using sed2data. lbc = 0.6565 (halpha), obs = 1 (width==1000)
| analline(lbd=sed2data[obs,:,2], flux=sed2data[obs,:,3], lbc=lbc)
The EW is the equivalent width in km/s,
EC is the Emission over Continuum ratio,
VR ratio,
peaksep in km/s,
FWHM is the Full-Width at Half Maximum (emission as maximum)
F0 is the depth of rest wavelength normalized to the continuum
OUTPUT: EW, EC, VR, peaksep, FWHM, F0
"""
if lbdc > 0:
vels = (lbd - lbdc) / lbdc * _phc.c.cgs * 1e-5
else:
vels = lbd
# check if the file have the desired info.
if vels[0] > -hwidth * .95 or vels[-1] < hwidth * .95:
if verb:
_warn.warn('spec out of range (wavelength)! Check hwidth!')
return _np.NaN, _np.NaN, _np.NaN, _np.NaN, _np.NaN, _np.NaN
idx = _np.where(_np.abs(vels) <= hwidth)
vels = vels[idx]
flux = flux[idx]
# Normalization:
flux = linfit(vels, flux)
# Output:
EW = EWcalc(vels, flux, vw=hwidth)
EC, velEC = ECcalc(vels, flux, gaussfit=gaussfit, doublegf=doublegf)
ew0, ew1, vc = VRcalc(vels, flux, vw=hwidth, gaussfit=gaussfit)
if ew1 == 0 or EC is _np.NaN:
VR = 1
else:
VR = ew0 / ew1
if EC is _np.NaN:
peaksep = _np.NaN
else:
vel0, vel1 = PScalc(vels, flux, gaussfit=gaussfit)
peaksep = vel1 - vel0
if peaksep is _np.NaN:
EC = peaksep
VR = peaksep
EC2, F0 = DCcalc(vels, flux, vmax=velEC)
# depthcent = EC2 - F0
if EC2 < 1:
EC2 = 1.
fwhm = FWHM(vels, flux, (EC2 + F0) / 2., vmax=_np.abs(velEC))
else:
fwhm = FWHM(vels, flux, EC/2, vmax=hwidth)
return EW, EC, VR, peaksep, fwhm, F0
def kurlog(file=None, output=None):
""" Generate a list of teff and logg present in a Kurucz file.
If output is not specified, it is saved as `file`+.log """
if file is None:
file = _os.path.join(_hdt.hdtpath(), 'refs', 'fp00k0.pck')
teffs = []
loggs = []
fp = open(file)
for i, line in enumerate(fp):
if line.find('TEFF') > -1:
teffs += [float(line.split()[1])]
loggs += [float(line.split()[3])]
fp.close()
return teffs, loggs
def kuruczflux(teff, logg, wavrange=None):
""" Return fluxes from a Kurucz model.
Fluxes are in ergs/cm**2/s/hz/ster and wavelength in nm (wavrange must be
in nm).
As tabelas do Kurucz sao erg/s/sr/cm2/Hz. Entao, tem q multiplicar 4pi para
ter o fluxo observado. Abaixo, a conversao das unidades Kurucz para
erg/s/cm2/A usuais.
# erg/s/sr/cm2/Hz:
lK15k, K15k, info = spt.kuruczflux(5777, 3., range=[100,1000])
lK15k*= 1e1 #Ang
K15k = 2.99792458E+18*K15k*(lK15k)**-2*4*np.pi #erg/s/cm2/A
OUTPUT: wv, flux, info"""
kurfile = _os.path.join(_hdt.hdtpath(), 'refs', 'fp00k0.pck')
kurwvlines = (174 - 22)
kurflxcol = 10
# wave
read = _phc.readrange(kurfile, 22, 22 + kurwvlines)
wave = _np.array([val for line in read for val in line.split()],
dtype=float)
# choose best
bestT = _np.inf
bestg = _np.inf
fp = open(kurfile)
for i, line in enumerate(fp):
if line.find('TEFF') > -1:
readT = float(line.split()[1])
if _np.abs(readT - teff) <= _np.abs(bestT - teff):
bestT = readT
readg = float(line.split()[3])
if _np.abs(readg - logg) <= _np.abs(bestg - logg):
bestg = readg
i0 = i + 1
fp.close()
best = [bestT, bestg]
# read best flux
read = _phc.readrange(kurfile, i0, i0 + kurwvlines)
flux = _np.array([val for line in read for val in
(line[i:i + kurflxcol] for i in range(0, len(line) - 1, kurflxcol))],
dtype=float)
# cut range
if wavrange is None:
return wave, flux, best
else:
idx = _np.where((wave > wavrange[0]) & (wave < wavrange[-1]))
return wave[idx], flux[idx], best
def plot_all(fs2list, obsl=None, fmt=['png'], out=None, lbc=.6564,
hwidth=1000., solidfiles=True, xax=0, philist=[0], figname=None,
nolabels=False, obsidx=False):
r""" plot_all-like routine
``obsl`` list, in degrees. It will find the closest values. It the find
:math:`\Delta\theta > 3^\circ`, a warning message is displayed. The ``obs``
index can be used if ``obsidx = True``.
``solinefiles`` keep solid lines for files (changes only colors), and
change line shapes between observers. If ``False``, do the opposite.
"""
if isinstance(fs2list, _strtypes):
fs2list = [fs2list]
if not isinstance(obsl, list) and obsl is not None:
_warn.warn('Wrong `obsl` format (None or list)', stacklevel=2)
return
fig = _plt.figure(figsize=(9, 9))
lins, cols = (3, 2)
gs = _gridspec.GridSpec(lins, cols)
gs.update(hspace=0.25)
axt = _plt.subplot(gs[0, 1])
ax0 = _plt.subplot(gs[1, 0])
ax1 = _plt.subplot(gs[1, 1])
ax2 = _plt.subplot(gs[2, 0])
ax3 = _plt.subplot(gs[2, 1])
xtitle = 'radial scale'
for f in fs2list:
m = _inp.HdustMod(f)
tfile = _os.path.join(m.proj, m.modn, m.modn+m.suf+'*avg.temp')
tfile = _glob(tfile)
if len(tfile) > 0:
npt, rplus, lev = (0, 0, 0)
tfile.sort()
tfile = tfile[-1]
ncr, ncmu, ncphi, nLTE, nNLTE, Rstar, Ra, beta, data, pcr, pcmu, \
pcphi = _hdt.readtemp(tfile)
for phiidx in range(0, len(philist)):
icphi = philist[phiidx]
x = data[0, :, 0, icphi]
if (xax == 0):
x = _np.log10(x / Rstar - 1.)
xtitle = r'$\log_{10}(r/R_*-1)$'
elif (xax == 1):
x = x / Rstar
xtitle = r'$r/R_*$'
elif (xax == 2):
x = 1. - Rstar / x
xtitle = r'$1-R_*/r$'
y = data[3 + lev, :, ncmu / 2 + npt + rplus, icphi]
y = y / 1000.
axt.plot(x, y, 'o-')
fs2d = _hdt.readfullsed2(f)
iobs = range(len(fs2d))
if obsl is not None:
if not obsidx:
iobs = [_phc.find_nearest(_np.arccos(fs2d[:, 0, 0])*180/_np.pi,
ob, idx=True) for ob in obsl]
else:
iobs = obsl
for ob in iobs:
obfmt = r'{:.1f}$^\circ$, {:.1f}$^\circ$'.format(_np.arccos(
fs2d[ob, 0, 0])*180/_np.pi, _np.arccos(fs2d[ob, 0, 1]))
if solidfiles:
pdict = {'color': _phc.cycles(fs2list.index(f)),
'dashes': _phc.dashes(iobs.index(ob))}
else:
pdict = {'dashes': _phc.dashes(fs2list.index(f)),
'color': _phc.cycles(iobs.index(ob))}
ax0.plot(fs2d[ob, :, 2], fs2d[ob, :, 3],
label=_os.path.basename(f), **pdict)
ax1.plot(fs2d[ob, :, 2], fs2d[ob, :, 3],
label=obfmt, **pdict)
ax2.plot(fs2d[ob, :, 2], fs2d[ob, :, 7]*100, **pdict)
ax3.plot(*lineProf(fs2d[ob, :, 2], fs2d[ob, :, 3], lbc=lbc,
hwidth=hwidth), **pdict)
axt.set_xlabel(xtitle, labelpad=1)
axt.set_ylabel(r'Temperature (10$^3$ K)')
ax0.set_xlim([.37, 1.])
ax0.autoscale(axis='y', tight=True)
ax0.set_yscale('log')
ax0.set_xlabel(r'$\mu$m')
ax0.set_ylabel(r'$\lambda F_\lambda/F$')
ax1.set_xlim([1., 100.])
ax1.autoscale(axis='y', tight=True)
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_xlabel(r'$\mu$m', labelpad=1)
ax1.yaxis.tick_right()
ax1.yaxis.set_label_position("right")
ax1.yaxis.set_ticks_position('both')
ax1.set_ylabel(r'$\lambda F_\lambda/F$')
ax2.set_xlim([.37, .9])
ax2.autoscale(axis='y', tight=True)
ax2.set_xlabel(r'$\mu$m')
ax2.set_ylabel('P (%)')
ax3.set_xlim([-hwidth, hwidth])
ax3.set_xlabel(r'km/s')
ax3.yaxis.tick_right()
ax3.yaxis.set_label_position("right")
ax3.yaxis.set_ticks_position('both')
ax3.set_ylabel('Normalized Flux')
if not nolabels:
ax1.legend(loc='best', fancybox=True, framealpha=0.5, fontsize=9,
labelspacing=0.05)
if len(fs2list) > 1 and not nolabels:
ax0.legend(loc='best', fancybox=True, framealpha=0.5, fontsize=8,
labelspacing=0.05)
_phc.savefig(fig, fmt=fmt, figname=figname) # figname='outname')
return
def splitKurucz(filen, path=None):
"""
Split atmospheric Kurucz file (e.g., 'ap00k0.dat') into individual models.
INPUT: file, path (strings)
OUTPUT: *files written
"""
if path is None:
path = _os.getcwd()
allk = _np.loadtxt(filen, dtype=str, delimiter='\n')
src = _os.path.splitext(_os.path.split(filen)[1])[0]
if not _os.path.exists(src):
_os.mkdir(src)
src = _os.path.join(src, src)
for i in range(0, len(allk) - 1):
if 'EFF' in allk[i]:
iref = i
teff = int(allk[i].split()[1][:-1])
logg = float(allk[i].split()[3][:-3])
elif 'DECK6 72' in allk[i]:
allk[i] = allk[i].replace('DECK6 72', 'DECK6 71')
elif 'EFF' in allk[i + 1]:
_np.savetxt(src+'tef%05dg%.1f.dat' % (teff, logg),
allk[iref:i + 1], fmt='%s')
_np.savetxt(src+'tef%05dg%.1f.dat' % (teff, logg), allk[iref:], fmt='%s')
return
def writeFits(flx, lbd, extrahead=None, savename=None, verbose=False,
path=None, lbdc=None, externhd=None):
""" Write a 1D spectra FITS.
| INPUT: flux array, lbd array, extrahead flag+info, save name.
| - lbd array: if len(lbd)==2: lbd = [CRVAL1, CDELT1]
| else: CDELT1 = (lbd[-1]-lbd[0])/(len(lbd)-1)
| CRVAL1 = lbd[0]
| WARNING: lbd must be in ANGSTROMS (FITS default). It can also be
| velocities. In this case, it must be in km/s and lbdc is given in
| ANGSTROM.
| - extrahead: matrix (n,2). Example: [['OBJECT','Achernar'], ['COMMENT',
| 'value']]
`externhd` = copy the header from an external file.
OUTPUT: write FITS file.
"""
if path is None or path == '':
path = _os.getcwd()
if path[-1] != ['/']:
path += '/'
if lbdc is not None:
lbd = (lbd / _phc.c.cgs * 1e5 + 1) * lbdc
hdu = _pyfits.PrimaryHDU(flx)
hdulist = _pyfits.HDUList([hdu])
if externhd is not None:
extf = _pyfits.open(externhd)
hdulist[0].header = extf[0].header
hdulist[0].header['BZERO'] = 0.
hdulist[0].header['CRVAL1'] = lbd[0]
if len(lbd) == 2:
hdulist[0].header['CDELT1'] = lbd[1]
else:
hdulist[0].header['CDELT1'] = (lbd[-1] - lbd[0]) / (len(lbd) - 1)
if extrahead is not None:
for e in extrahead:
hdulist[0].header[e[0]] = e[1]
if savename is None:
savename = 'spec_{0}'.format(_phc.dtflag())
if savename.find('.fit') == -1:
savename += '.fits'
hdu.writeto(path + savename, clobber=True)
if verbose:
print('# FITS file {0}{1} saved!'.format(path, savename))
return
def averagespecs(speclist, n=999, path='', objname='OBJECT'):
""" Average specs taken in the same MJD, in groups of approx. `n`
elements.
OUTPUT: Files written. """
if len(path) > 0 and path[-1] != '/':
path += '/'
speclist = _np.array(speclist)
obsdates = []
for sp in speclist:
data = loadfits(sp)
obsdates.append(data[2])
obsdates = _np.array(obsdates)
# Sorting things
idx = _np.argsort(obsdates)
speclist = speclist[idx]
obsdates = obsdates[idx]
# Same day
iMJD = []
for m in obsdates:
iMJD.append(divmod(m, 1)[0])
idxMJD = _np.unique(iMJD)
# Do the avgs based on the MJD
for i in idxMJD:
idx = _np.where(iMJD == i)
N = len(speclist[idx])
for j in _phc.splitequal(N/n, N):
fidx = speclist[idx][j[0]:j[1]]
data = loadfits(fidx[0])
wl = data[0]
newdate = _np.average( obsdates[idx][j[0]:j[1]] )
MJD = int(divmod(newdate, 1)[0])
MJDfrac = int(round( divmod(newdate, 1)[1]*10000 ))
fluxes = _np.zeros(len(wl))
for f in fidx:
data = loadfits(f)
fluxes += _np.interp(wl, data[0], data[1])
flx = fluxes/len(fidx)
outname = 'alpEri_PUCHEROS_VIS_{0}_{1:04d}_avg.fits'.format(MJD,
MJDfrac)
writeFits( flx, wl, savename=outname, path=path, extrahead=[
['OBJECT', objname], ['Comment', 'Averaged from {0} spectra'.
format(len(fidx))], ['MJD-OBS', newdate] ] )
return
def cardelli(lbd, flux, ebv=0., Rv=3.1):
"""
Milky Way Extinction law from Cardelli et al. 1989
`lbd` must be in microns.
OUTPUT: Corrected flux.
"""
x = 1. / _np.array(lbd) # CCM x is 1/microns
a, b = _np.ndarray(x.shape, x.dtype), _np.ndarray(x.shape, x.dtype)
if any((x < 0.3) | (10 < x)):
raise ValueError('Some wavelengths outside CCM 89 extinction curve ' +
'range')
irs = (0.3 <= x) & (x <= 1.1)
opts = (1.1 <= x) & (x <= 3.3)
nuv1s = (3.3 <= x) & (x <= 5.9)
nuv2s = (5.9 <= x) & (x <= 8)
fuvs = (8 <= x) & (x <= 10)
# CCM Infrared
a[irs] = .574 * x[irs]**1.61
b[irs] = -0.527 * x[irs]**1.61
# CCM NIR/optical
a[opts] = _np.polyval((.32999, -.7753, .01979, .72085, -.02427, -.50447,
.17699, 1), x[opts] - 1.82)
b[opts] = _np.polyval((-2.09002, 5.3026, -.62251, -5.38434, 1.07233,
2.28305, 1.41338, 0), x[opts] - 1.82)
# CCM NUV
a[nuv1s] = 1.752 - .316 * x[nuv1s] - 0.104 / ((x[nuv1s] - 4.67)**2 + .341)
b[nuv1s] = -3.09 + 1.825 * x[nuv1s] + 1.206 / ((x[nuv1s] - 4.62)**2 + .263)
y = x[nuv2s] - 5.9
Fa = -.04473 * y**2 - .009779 * y**3
Fb = -.2130 * y**2 - .1207 * y**3
a[nuv2s] = 1.752 - .316 * x[nuv2s] - 0.104 / \
((x[nuv2s] - 4.67)**2 + .341) + Fa
b[nuv2s] = -3.09 + 1.825 * x[nuv2s] + \
1.206 / ((x[nuv2s] - 4.62)**2 + .263) + Fb
# CCM FUV
a[fuvs] = _np.polyval((-.070, .137, -.628, -1.073), x[fuvs] - 8)
b[fuvs] = _np.polyval((.374, -.42, 4.257, 13.67), x[fuvs] - 8)
AlbAv = a + b / Rv
return flux * 10**(-AlbAv * Rv * ebv / 2.5)
def fitzpatrick(wave, flux, ebv, Rv=3.1, LMC2=False, AVGLMC=False):
"""
Deredden a flux vector using the Fitzpatrick (1999) parameterization
Parameters
----------
wave : array
Wavelength in Angstrom
flux : array
Calibrated flux vector, same number of elements as wave.
ebv : float, optional
Color excess E(B-V). If a positive ebv is supplied,
then fluxes will be dereddened rather than reddened.
The default is 3.1.
AVGLMC : boolean
If True, then the default fit parameters c1,c2,c3,c4,gamma,x0
are set to the average values determined for reddening in the
general Large Magellanic Cloud (LMC) field by
Misselt et al. (1999, ApJ, 515, 128). The default is
False.
LMC2 : boolean
If True, the fit parameters are set to the values determined
for the LMC2 field (including 30 Dor) by Misselt et al.
Note that neither `AVGLMC` nor `LMC2` will alter the default value
of Rv, which is poorly known for the LMC.
Returns
-------
new_flux : array
Dereddened flux vector, same units and number of elements
as input flux.
Notes
-----
.. note::
This function was ported from the IDL Astronomy User's Library.
The following five parameters allow the user to customize
the adopted extinction curve. For example, see Clayton et al. (2003,
ApJ, 588, 871) for examples of these parameters in different
interstellar environments.
x0 - Centroid of 2200 A bump in microns (default = 4.596)
gamma - Width of 2200 A bump in microns (default =0.99)
c3 - Strength of the 2200 A bump (default = 3.23)
c4 - FUV curvature (default = 0.41)
c2 - Slope of the linear UV extinction component
(default = -0.824 + 4.717/R)
c1 - Intercept of the linear UV extinction component
(default = 2.030 - 3.007*c2
"""
# x = 10000./ wave # Convert to inverse microns
x = 1. / wave # microns
curve = x * 0.
# Set some standard values:
x0 = 4.596
gamma = 0.99
c3 = 3.23
c4 = 0.41
c2 = -0.824 + 4.717 / Rv
c1 = 2.030 - 3.007 * c2
if LMC2:
x0 = 4.626
gamma = 1.05
c4 = 0.42
c3 = 1.92
c2 = 1.31
c1 = -2.16
elif AVGLMC:
x0 = 4.596
gamma = 0.91
c4 = 0.64
c3 = 2.73
c2 = 1.11
c1 = -1.28
# Compute UV portion of A(lambda)/E(B-V) curve using FM fitting function
# and R-dependent coefficients
xcutuv = _np.array([10000.0 / 2700.0])
xspluv = 10000.0 / _np.array([2700.0, 2600.0])
iuv = _np.where(x >= xcutuv)[0]
N_UV = len(iuv)
iopir = _np.where(x < xcutuv)[0]
Nopir = len(iopir)
if (N_UV > 0):
xuv = _np.concatenate((xspluv, x[iuv]))
else:
xuv = xspluv
yuv = c1 + c2 * xuv
yuv = yuv + c3 * xuv**2 / ((xuv**2 - x0**2)**2 + (xuv * gamma)**2)
yuv = yuv + c4 * (0.5392 * (_np.maximum(xuv, 5.9) - 5.9)**2 + 0.05644 * (
_np.maximum(xuv, 5.9) - 5.9)**3)
yuv = yuv + Rv
yspluv = yuv[0:2] # save spline points
if (N_UV > 0):
curve[iuv] = yuv[2::] # remove spline points
# Compute optical portion of A(lambda)/E(B-V) curve
# using cubic spline anchored in UV, optical, and IR
xsplopir = _np.concatenate(([0], 10000.0 / _np.array([26500.0, 12200.0,
6000.0, 5470.0, 4670.0, 4110.0])))
ysplir = _np.array([0.0, 0.26469, 0.82925]) * Rv / 3.1
ysplop = _np.array((_np.polyval([-4.22809e-01, 1.00270, 2.13572e-04][::-1],
Rv ), _np.polyval([-5.13540e-02, 1.00216, -7.35778e-05][::-1], Rv ),
_np.polyval([ 7.00127e-01, 1.00184, -3.32598e-05][::-1], Rv ),
_np.polyval([ 1.19456, 1.01707, -5.46959e-03, 7.97809e-04,
-4.45636e-05][::-1], Rv ) ))
ysplopir = _np.concatenate((ysplir, ysplop))
if (Nopir > 0):
tck = _interpolate.splrep(_np.concatenate((xsplopir, xspluv)),
_np.concatenate((ysplopir, yspluv)), s=0)
curve[iopir] = _interpolate.splev(x[iopir], tck)
# Now apply extinction correction to input flux vector
curve *= -ebv
return flux * 10.**(0.4 * curve)
def sort_specs(specs, path=None):
""" Specs in an (N,2) array, where specs[:,0] are the files paths and
specs[:,1] the instrument name.
Return ordered_specs"""
if path is not None:
if path[-1] != '/':
path += '/'
else:
path = ''
nsp = _np.shape(specs)[0]
MJDs = _np.zeros(nsp)
specs = _np.array(specs)
lims = [_np.inf, -_np.inf]
for i in range(nsp):
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(path +
specs[i][0])
MJDs[i] = MJD
if MJDs[i] < lims[0]:
lims[0] = MJDs[i]
if MJDs[i] > lims[1]:
lims[1] = MJDs[i]
return specs[MJDs.argsort()], lims
def convgaussFunc(wl, flx, lbc, hwidth=1000., convgauss=0., frac=0., ssize=.05,
wlout=False):
""" Do a Gaussian convolution of a given Line Profile with a Gaussian.
`wl`, `flx`, `lbc`: wavelenght and flux of the spectrum containing the
line, and its value.
`hwidth`, `ssize`: width to be keeped around the line (km/s), and the
region (in percentage) where the continuum level will be evaluted around
the selected region.
`convgauss`: if bigger then 0., do the convolution. Its values is the sigma
of the gaussian conv. profile (in km/s).
`frac`: controls the intensity of the convolution. `frac`=0 means pure
profile output and `frac`=1 a pure gaussian output with the same EW value.
`wlout`: returns a wavelength array instead of a velocity array (standar)
OUTPUT: vel/wl, flux (arrays)
"""
(x, yo) = lineProf(wl, flx, lbc=lbc, hwidth=hwidth + 3 * convgauss,
ssize=ssize)
y1 = yo
y2 = 0.
if convgauss > 0 and frac > 0:
step = _np.abs(_np.min([x[j + 1] - x[j] for j in range(len(x) - 1)]))
xn = _np.arange(-hwidth - 3 * convgauss,
hwidth + 3 * convgauss + step, step)
cf = _phc.normgauss(convgauss, x=xn)
yo = _np.interp(xn, x, yo)
x = xn
y1 = yo * (1 - frac)
y2 = _np.convolve(yo * frac, cf / _np.trapz(cf), 'same')
if wlout:
x = (x / _phc.c.cgs * 1e5 + 1) * lbc
return x, y1 + y2
# def gaussfold(wl, flx, sig, lbc, hwidth=1000., ssize=.05):
# """Translation from gaussfold.pro"""
# (x, yo) = lineProf(wl, flx, lbc=lbc, hwidth=hwidth+3*sig, ssize=ssize)
# x = (x / _phc.c.cgs * 1e5 + 1) * lbc
# lammax = _np.max(x)
# lammin = _np.min(x)
# dlambda = sig / 17.
# interlam = lammin + dlambda * _np.arange( (lammax-lammin)/dlambda+1 )
# interflux = _np.interp( interlam, wl, flx )
# fwhm_pix = sig / dlambda
# window = fwhm_pix(17*fwhm_pix).astype(int)
# gauss = _phc.psf_gaussian(window, sig=fwhm_pix, norm=True, ndim=1)
# fold = _phc.convol
# fluxfold = _np.interp( lam, interlam, fold )
# _warn('# Function not implemented!!')
# return None
def cutpastrefspec(ivl, iflx, irefvl, ireflx, hwidth, ssize=.05):
""" Cut and paste a given line profile into a reference line profile.
Both profiles (with any resolution) must be normalized and given in vel.
It was designed to solve the problem of Achernar's Halpha line wings
problem and it works like this: given a reference profile (`refvl`,
`reflx`), the selected profile will be cutted at the `hwidth` position
and them pasted in the corresponding position (and intensity level) of
the reference spectrum.
OUTPUT: refvl, reflx
"""
flx = _np.interp(irefvl, ivl, iflx)
i0 = _np.abs(irefvl + hwidth).argmin()
i1 = _np.abs(irefvl - hwidth).argmin()
ssize = int(ssize * len(flx))
if ssize == 0:
ssize = 1
refav = _np.average( ireflx[i0 - ssize / 2:i0 + ssize / 2 + 1] ) / 2. + \
_np.average( ireflx[i1 - ssize / 2:i1 + ssize / 2 + 1] ) / 2.
av = _np.average( flx[i0 - ssize / 2:i0 + ssize / 2 + 1] ) / 2. + \
_np.average( flx[i1 - ssize / 2:i1 + ssize / 2 + 1] ) / 2.
flx += refav - av
reflx = _np.array(ireflx).copy()
reflx[i0:i1 + 1] = flx[i0:i1 + 1]
return irefvl, reflx
def load_specs_fits(speclist, ref, lbc, lncore=None, hwidth=None,
gaussfit=False, plotcut=0):
""" Load a list of specs and do the *line core cut & paste*
`lncore`: cut and paste hwidth of the line center. It can be None, and
must be < hwidth. If hwidth is None, it is assumed to be 1000 km/s.
`speclist` : ['path+file.fits', ...]
`ref`: reference spectra to do the cut & paste
`plotcut`: if plotcut > 0, save the cutted spectra in steps of this
variable.
OUTPUT: dtb_obs
"""
if hwidth is None:
hwidth = 1000.
# do core cut?
docore = lncore < hwidth
if lncore is None:
docore = False
# load ref
refwl, reflx = loadfits(ref[0])[0:2]
refvl, reflx = lineProf(refwl, reflx, lbc=lbc)
# load specs
dtb_obs = Spec(lbc=lbc, hwidth=hwidth, gaussfit=gaussfit)
for i in range(_np.shape(speclist)[0]):
print(speclist[i])
dtb_obs.loadspec(speclist[i])
vl, flx = lineProf(dtb_obs.wl, dtb_obs.flux, lbc=lbc)
if docore:
cuted = cutpastrefspec(vl, flx, refvl, reflx, lncore)
dtb_obs.flux = cuted[1]
dtb_obs.wl = (cuted[0]/_phc.c.cgs*1e5+1)*lbc
(dtb_obs.EW, dtb_obs.EC, dtb_obs.VR, dtb_obs.peaksep,
dtb_obs.depthcent, dtb_obs.F0) = analline(dtb_obs.wl,
dtb_obs.flux, dtb_obs.lbc, hwidth=lncore, verb=False,
gaussfit=dtb_obs.gaussfit)
else:
(dtb_obs.EW, dtb_obs.EC, dtb_obs.VR, dtb_obs.peaksep,
dtb_obs.depthcent, dtb_obs.F0) = analline(dtb_obs.wl,
dtb_obs.flux, dtb_obs.lbc, hwidth=hwidth, verb=False,
gaussfit=dtb_obs.gaussfit)
dtb_obs.addspec()
# complementary plot
if plotcut > 0 and docore:
fig0, ax = _plt.subplots()
for i in range(_np.shape(speclist)[0]):
dtb_obs.loadspec(speclist[i])
vl, flx = lineProf(dtb_obs.wl, dtb_obs.flux, lbc=lbc)
cuted = cutpastrefspec(vl, flx, refvl, reflx, lncore)
if i % plotcut == 0:
ax.plot(cuted[0], cuted[1])
_phc.savefig(fig0)
return dtb_obs
def plot_spec_info(speclist, dtb_obs, mAEW=False, mgray=None):
""" Standard plot of the Spec class (EW, E/C, V/R, peak-sep., FWHM, F0)
OUTPUT: figure (fig pyplot)
"""
if mAEW:
dtb_obs.data[:, 1] *= 1000*dtb_obs.lbc/_phc.c.cgs*1e5
# Legend, Markers and colors idx...
instm = list(_np.unique(speclist[:, 1]))
# coridx = [ phc.cycles(instm.index(i)) for i in speclist[:, 1]]
cores = _phc.gradColor(range(len(instm)), cmapn='inferno')
coridx = [ cores[instm.index(i)] for i in speclist[:, 1] ]
coridx = _np.array(coridx)
mkidx = [ _phc.cycles(instm.index(i), 'mk') for i in speclist[:, 1]]
mkidx = _np.array(mkidx)
# Plots
fig = _plt.figure()
lins, cols = (7, 1)
gssteps = [slice(0, 2), 2, 3, 4, 5, 6]
gs = _gridspec.GridSpec(lins, cols)
axs = [_plt.subplot(gs[g, :]) for g in gssteps]
# EW
axs[0].invert_yaxis()
axs[-1].set_xlabel('Julian date - 2400000.5')
ylabels = [u'EW (m\u00c5)', 'E/C', 'V/R', ('pk. sep.'+'\n'+'(km/s)'),
'FWHM'+'\n'+'(km/s)', r'F${\lambda 0}$']
for i, ax in enumerate(axs):
# binned
x, y = _phc.bindata(dtb_obs.data[:, 0], dtb_obs.data[:, i+1])
# yi = _savgol(y, 3, 1)
ax.plot(x, y, color='gray', zorder=0)
# points
for uniquem in set(mkidx):
idx = _np.where(mkidx == uniquem)
ax.plot(dtb_obs.data[:, 0][idx], dtb_obs.data[:, i+1][idx],
color=coridx[idx][0], marker=uniquem, ls='')
ax.set_ylabel(ylabels[i])
#
xlim = axs[0].get_xlim()
axs[2].plot(xlim, [1, 1], ls=":", color='k', zorder=1)
for i in range(1, len(axs)):
# ax.locator_params(axis='y', nbins=4)
axs[i].yaxis.set_major_locator(_MaxNLocator(nbins=4, prune='upper'))
if i in [1, 2, 3]:
axs[i].get_yticklabels()[-1].set_visible(False)
for ax in axs[:-1]:
ax.set_xticklabels([])
# Legend
for i in range(len(instm)):
# axs[0].plot([np.NaN], [np.NaN], label=instm[i], color=phc.cycles(i),
# marker=phc.cycles(i, 'mk'), ls='')
axs[0].plot([_np.NaN], [_np.NaN], label=instm[i], color=cores[i],
marker=_phc.cycles(i, 'mk'), ls='')
axs[0].legend(loc='best', fancybox=True, framealpha=0.5, fontsize=8,
labelspacing=0.05, ncol=2)
# bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.
fig.subplots_adjust(hspace=0.01)
# Gray
for ax in axs:
ax.set_xlim(xlim)
if mgray is not None:
ylim = ax.get_ylim()
rect = _mpatches.Rectangle([mgray[0], ylim[0]],
mgray[1]-mgray[0], ylim[1]-ylim[0], ec="gray", fc='gray',
alpha=0.5, zorder=1)
ax.add_patch(rect)
if len(mgray) == 4:
if mgray is not None:
ylim = ax.get_ylim()
rect = _mpatches.Rectangle([mgray[2], ylim[0]],
mgray[3]-mgray[2], ylim[1]-ylim[0], ec="gray", fc='gray',
alpha=0.5, zorder=1, hatch='//')
ax.add_patch(rect)
return fig
# TODO: Check if obsolete
def normalize_range(lb, spec, a, b):
"""This function is obsolete and must be removed.
Still here for compatibility issues.
"""
a2 = (spec[b] - spec[a]) / (lb[b] - lb[a])
a1 = spec[a] - a2 * lb[a]
return spec / (a1 + a2 * lb)
def normalize_spec(lb, flx, q=2, diff=0.03, perc=0, nlbp=50):
""" Normalize a spectrum using the non-parametric regression algorithm of
Local Polynomial Kernel (order=``q``).
If perc > 0, a "percentile filter" is applyed to the spectrum (divided in
nlbp bins).
INPUT: lb, flx
OUTPUT: norm_flx
"""
def linear_model(x, *coef):
result = 0
for i in range(len(coef)):
result += coef[i]*x**i
return result
if perc <= 0:
Initial_guess = [0.,0.]
coef1, cov1 = _curve_fit(linear_model, lb, flx, Initial_guess)
idx0 = _np.where(flx != 0)
ilb = lb[idx0]
iflx = flx[idx0]
idxi = _np.where(_np.abs(linear_model(ilb, *coef1)/iflx-1) < diff)
xsi = ilb[idxi]
ysi = iflx[idxi]
else:
xsi, ysi = _phc.bindata(lb, flx, nbins=nlbp, perc=perc)
xsi = xsi.reshape(-1,1)
Initial_guess = _np.zeros(q+1)
coef2, cov2 = _curve_fit(linear_model, xsi, ysi, Initial_guess)
k2 = linear_model(lb, *coef2)
return flx/k2
def renorm(vl, y):
""" Renormalize ``y`` so that the equivalent width is preserved when the
continuum is shifted to 1.
"""
ext = _np.mean([y[0], y[-1]])
a0 = _np.trapz(y, vl)
A = ((a0-_np.trapz(_np.tile(1, len(vl)), vl))/
(a0-_np.trapz(_np.tile(ext, len(vl)), vl)))
B = 1-A*ext
return A*y+B
def normEW(vl, y, area=None):
""" Normalize ``y`` curve to have a specific area. If ``area is None``,
then the normalized equivalent width is preserved.
"""
if area is None:
area = _np.trapz(linfit(vl, y), vl)
y0 = linfit(vl, y)-1
a1 = _np.trapz(y0, vl)
a0 = _np.trapz(_np.tile([1], len(vl)), vl)
f = (area-a0)/a1
return f*y0+1
def checksubdirs(path, star, lbc, hwidth=1000, showleg=True, plots=False):
"""
Faz o que tem que fazer.
"""
if not _os.path.exists('{0}/{1}'.format(path, star)):
_os.system('mkdir {0}/{1}'.format(path, star))
nights = [o for o in _os.listdir(path) if _os.path.isdir('{0}/{1}'.
format(path, o))]
fig = _plt.figure()
ax = fig.add_subplot(111)
spdtb = Spec()
spdtb.lbc = lbc
spdtb.hwidth = 1000.
for night in nights:
targets = [o for o in _os.listdir('%s/%s' % (path, night)) if
_os.path.isdir('%s/%s/%s' % (path, night, o))]
for target in targets:
if target.find(star) > -1:
scal = _glob('%s/%s/%s/*.cal.fits' % (path, night, target))
if len(scal) > 0:
for cal in scal:
spdtb.loadspec(cal)
spdtb.addspec()
if not _np.isnan(spdtb.EW):
if plots:
spdtb.plotspec()
vels = (spdtb.wl - lbc) / lbc * _phc.c.cgs * 1e-5
idx = _np.where(_np.abs(vels) <= hwidth)
flux = linfit(vels[idx], spdtb.flux[idx])
vels = vels[idx]
leg = spdtb.MJD
ax.plot(vels, flux, label=leg, alpha=0.7,
color=_phc.colors[_np.mod(spdtb.count,
len(_phc.colors))])
else:
print('# Data not reduced for %s at %s!' % (star, night))
ax.set_xlim([-hwidth, hwidth])
ax.set_ylim([-1, 5])
if showleg:
legend = _plt.legend(loc=(0.75, .05), labelspacing=0.1)
_plt.setp(legend.get_texts(), fontsize='small')
_plt.savefig('{0}/{1}_at_{2}.png'.format(_outfold, star, lbc))
_plt.close()
spdtb.savedata(datafile='{0}/{1}.txt'.format(_outfold, star),
metafile='{0}/meta_{1}.txt'.format(_outfold, star))
return
def VREWcalc(vels, flux, vw=1000):
"""
Supoe que o fluxo jah estah normalizado, e vetores ordenad_os.
Calcula o ew para os dois lados (azul/vermelho) da linha, ajustando
a velocidade de repouso (TBD).
"""
# calcula e aplica correcao de vel. repousp
vc = 0.
vels += vc
# corta em vw, e faz o teste de tamanho
if len(vels) < 5:
vw = 0
if vw > 0:
idx = _np.where(_np.abs(vels) <= vw)
outvels = vels[idx]
normflux = flux[idx]
else:
ew0 = 0.
ew1 = 0.
return ew0, ew1, vc
#
ivc = _np.abs(outvels - 0).argmin()
ew0 = 0.
for i in range(0, ivc):
dl = outvels[i + 1] - outvels[i]
ew0 += (1. - (normflux[i + 1] + normflux[i]) / 2.) * dl
ew1 = 0.
for i in range(ivc, len(outvels) - 1):
dl = outvels[i + 1] - outvels[i]
ew1 += (1. - (normflux[i + 1] + normflux[i]) / 2.) * dl
return ew0, ew1, vc
def normcontinuum_std(flux, ssize=.05):
"""
Assumes that the `flux` vector is normalized.
`ssize` is the percentage of the flux vector to be sampled as continuum
(0-1.); default=0.05.
It returns the standard deviation of the normalized continuum (around 1.0).
"""
# averaging borders
ny = _np.array(flux)[:]
if ssize < 0 or ssize > .5:
_warn.warn('Invalid ssize value...', stacklevel=2)
ssize = 0
ssize = int(ssize * len(ny))
if ssize == 0:
ssize = 1
if ssize > 1:
continuum = _np.concatenate( (ny[:ssize], ny[-ssize:]) )
if _np.abs(1-_np.average(continuum)) < 0.05:
return _stt.mad( continuum )
# Whole averaging
mp = ssize*100
pp = ssize*100
p50 = _pos(ny, 1.)
if p50 > 100-pp:
_warn.warn('The continuum of this spec is too low! <1: '
'Is is nomalized?')
pp = 100-p50
elif p50 < mp:
_warn.warn('The continuum of this spec is too high! >1: '
'Is is nomalized?')
mp = p50
p45 = _np.percentile(ny, p50-mp)
p55 = _np.percentile(ny, p50+pp)
continuum = ny[_np.where((ny > p45) & (ny < p55))]
return _stt.mad(continuum)
def plotSpecData(dtb, limits=None, civcfg=[1, 'm', 2013, 1, 1],
fmt=['png'], ident=None, lims=None, setylim=False, addsuf=''):
""" Plot spec class database `vs` MJD e civil date
Plot originally done to London, Canada, 2014.
INPUT: civcfg = [step, 'd'/'m'/'y', starting year, month, day]
`lims` sequence: 'EW', 'E/C', 'V/R', 'Pk. sep. (km/s)', 'E-F0', 'F0'
`lims` = [[-2,4+2,2],[1.,1.4+.1,0.1],[.6,1.4+.2,.2],[0,400+100,100],
[.30,.45+.05,.05],[0.6,1.20+.2,.2]]
If `lims` is defined, `setylim` can be set to True.
OUTPUT: Written image."""
if isinstance(dtb, _strtypes):
print('# Loading dtb {0}'.format(dtb))
dtb = _np.loadtxt(dtb)
if ident is not None:
idref = _np.unique(ident)
ylabels = ['EW', 'E/C', 'V/R', 'Pk. sep. (km/s)', 'E-F0', 'F0']
fig, ax = _plt.subplots(6, 1, sharex=True, figsize=(9.6, 8))
icolor = 'blue'
for i in range(1, len(ylabels) + 1):
ax[i - 1].plot(*_phc.bindata(dtb[:, 0], dtb[:, i], 20))
for j in range(len(dtb[:, 0])):
if ident is not None:
idx = _np.where(ident[j] == idref)[0]
icolor = _phc.colors[idx]
ax[i - 1].plot(dtb[j, 0], dtb[j, i], 'o', color=icolor)
ax[i - 1].set_ylabel(ylabels[i - 1])
if lims is not None:
if lims[i - 1][-1] != 0:
ax[i - 1].set_yticks(_np.arange(*lims[i - 1]))
if setylim:
ax[i - 1].set_ylim([ lims[i - 1][0], lims[i - 1][1] ])
if ident is not None:
for id in idref:
idx = _np.where(id == idref)[0]
icolor = _phc.colors[idx]
ax[0].plot([], [], 'o', color=icolor, label=id)
ax[0].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
prop={'size': 6})
if limits is None:
# limits = ax[0].get_xlim()
limits = [dtb[0, 0], dtb[-1, 0]]
else:
ax[0].set_xlim(limits)
mjd0, mjd1 = limits
ax[5].set_xlabel('MJD')
ticks = _phc.gentkdates(mjd0, mjd1, civcfg[0], civcfg[1],
dtstart=_dt.datetime(civcfg[2], civcfg[3], civcfg[4]).date())
mjdticks = [_jdcal.gcal2jd(date.year, date.month, date.day)[1] for date in
ticks]
# ticks = [dt.datetime(*jdcal.jd2gcal(jdcal.MJD_0, date)[:3]).date() for \
# date in ax[0].get_xticks()]
# mjdticks = ax[0].get_xticks()
for i in range(1, 6 + 1):
ax2 = ax[i - 1].twiny()
ax2.set_xlim(limits)
ax2.set_xticks(mjdticks)
ax2.set_xticklabels(['' for date in ticks])
if i == 1:
ax2.set_xlabel('Civil date')
ax2.set_xticklabels([date.strftime("%d %b %y") for date in ticks])
_plt.setp( ax2.xaxis.get_majorticklabels(), rotation=45 )
_plt.subplots_adjust(left=0.13, right=0.8, top=0.88, bottom=0.06,
hspace=.15)
for f in fmt:
print ('SpecQ{1}.{0}'.format(f, addsuf))
_plt.savefig('SpecQ{1}.{0}'.format(f, addsuf), transparent=True)
_plt.close()
return
def din_spec(metadata, lbc=6562.86, hwidth=1500., res=50, interv=None,
fmt=['png'], outname='din_spec', pxsize=8, vmin=None, vmax=None, avg=True,
cmapn='inferno', refspec=None, figsize=None):
""" Plot dynamical specs. from metadata table of the Spec class.
`interv` controls the interval between specs (in days).
`res` is the resolution in km/s.
By default (`avg`=True), the average of spectra in that bin is show. If
`avg`=False, the nearest bin-centered (in time) spectra will be shown.
if `refspec` is not None, them it will be a difference spectra.
"""
# Define MJD and bins
dates = _np.array(metadata[:, 0], dtype=float)
t0 = _np.min(dates)
tf = _np.max(dates)
if interv is None:
interv = _np.linspace(t0, tf, 21)
else:
interv = _np.arange(t0, tf + interv, interv)
dt = interv[1] - interv[0]
# Select specs
wl0 = _np.arange(-hwidth, hwidth + res, res)
# Load refspec, if required
baselevel = 1.
if refspec is not None:
wl, flux, tmp, tmp, tmp, tmp = loadfits(refspec)
wl, flux = lineProf(wl, flux, lbc=lbc, hwidth=hwidth)
refflx = _np.interp(wl0, wl, flux)
baselevel = 0
fluxes = _np.zeros(( len(wl0), len(interv) )) + baselevel
for i in range(len(interv)):
# method 1
if not avg:
date = _phc.find_nearest(dates, interv[i])
if date < interv[i] + dt / 2 and date > interv[i] - dt / 2:
j = list(dates).index(date)
wl, flux, tmp, tmp, tmp, tmp = loadfits(metadata[j, 3])
wl, flux = lineProf(wl, flux, lbc=lbc, hwidth=hwidth)
if refspec is None:
fluxes[:, i] = _np.interp(wl0, wl, flux)
else:
flux = _np.interp(wl0, wl, flux)
fluxes[:, i] = flux - refflx
# method 2
else:
k = 0
for j in range(len(dates)):
if dates[j] < interv[i] + dt / 2 and dates[j] > interv[i] - \
dt / 2:
wl, flux, tmp, tmp, tmp, tmp = loadfits(metadata[j, 3])
wl, flux = lineProf(wl, flux, lbc=lbc, hwidth=hwidth)
fluxes[:, i] += _np.interp(wl0, wl, flux)
k += 1
if k > 0:
# fluxes[:,i]/= k
wl = vel2wl(wl0, lbc)
tmp, fluxes[:, i] = lineProf(wl, fluxes[:, i], lbc=lbc,
hwidth=hwidth)
if refspec is not None:
fluxes[:, i] = fluxes[:, i] - refflx
if all(fluxes[:, i] == baselevel):
fluxes[:, i] = _np.NaN
# Create image
img = _np.empty((pxsize * len(interv), len(wl0)))
for i in range(len(interv)):
img[i * pxsize:(i + 1) * pxsize] = _np.tile(fluxes[:, i], pxsize).\
reshape(pxsize, len(wl0))
# Save image
if figsize is None:
fig, ax = _plt.subplots(figsize=(len(wl0) / 16, pxsize *
len(interv) / 16), dpi=80)
else:
fig, ax = _plt.subplots(figsize=figsize)
# _plt.figure(figsize=(len(wl0) / 16, pxsize * len(interv) / 16), dpi=80)
# print _np.min(img), _np.max(img)
cmapn = _plt.get_cmap(cmapn)
cmapn.set_bad('k', 1.)
ax.imshow(img, vmin=vmin, vmax=vmax, cmap=cmapn, origin='lower')
ax.set_xlabel(r'Velocity (km s$^{-1}$)')
ax.set_ylabel(r'Julian Day - 2400000.5')
# ax.set_xlim([-hwidth, hwidth])
ax.set_yticks(_np.linspace(pxsize*len(interv)*.1, pxsize*len(interv)*.9,
8))
ax.set_yticklabels([int(round((tf-t0)*t/(pxsize*len(interv))+t0))
for t in ax.get_yticks()], rotation='vertical')
ax.set_xticklabels([int(round(t*2.*hwidth/(len(wl0)-1)-hwidth)) for
t in ax.get_xticks()]) # , rotation='vertical')
# fig.tight_layout()
ax.xaxis.set_tick_params(color='gray', width=1.1)
ax.yaxis.set_tick_params(color='gray', width=1.1)
fig.gca().invert_yaxis()
_phc.savefig(fig, fmt=fmt, figname=outname)
return
def plot_line_str(fig, ax, lbc='', ylabel='', fs=14, xlim=None, dlim=None,
cmapn='gnuplot', lfs=10, ylim=None):
""" Line plotting structure """
if lbc is not '':
ax.set_title(r'$\lambda_c$ = {0:.1f} $\AA$'.format(lbc), size=fs)
if ylabel is not '':
ax.set_ylabel(ylabel, size=fs)
if xlim is not None:
ax.xlims = ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
ax.set_xlabel(r'Velocity (km s$^{-1}$)', size=fs)
# reverse to keep order consistent
ax.legend()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], loc='upper right', labelspacing=0.1,
fancybox=True, framealpha=0.5, fontsize=lfs) # loc=(1.05, .01)
rect = _mpatches.Rectangle([0.835, 0.01], 0.15, 0.44, ec="black",
fc='white', transform=ax.transAxes, zorder=10, alpha=0.5)
ax.add_patch(rect)
ax3 = fig.add_axes([0.82, 0.12, 0.025, 0.35])
# ax3.set_axis_bgcolor('white')
cmap = _plt.get_cmap(cmapn)
norm = _mpl.colors.Normalize(vmin=dlim[0], vmax=dlim[1])
cb = _mpl.colorbar.ColorbarBase(ax3, cmap=cmap, norm=norm,
orientation='vertical')
cb.set_label('MJD', size=fs)
fig.subplots_adjust(left=0.1, right=0.95, top=0.94, bottom=0.1)
# , hspace=0.3, wspace=.3)
return fig, ax
def spec_time(speclist, lbc=6562.8, ref_spec=("/data/Dropbox/work/"
"sci_16-15aeri/alpEri_FEROS_2000AVE.mt"), mod_lbc=.656461, MJDref=None,
mod_ref=("/data/Dropbox/work/sci_16-15aeri/"
"fullsed_mod03_VDDn0_1p4e12_Be_aeri2014.sed2"),
fmt=['png', 'pdf'], outname=None, cmapn='inferno', hwidth=1000.,
outpath='', figsize=(5, 7), ysh=0.01):
""" Plot specs over time as suggested by Rivi.
``speclist`` is an array of strings to the path of the `*.fits` files.
``ref_spec`` is a reference `*.fits` and ``mod_ref`` an hdust reference
model. They are ignored if the path is not found.is
``ysh`` control the vertical separation of the profiles.
"""
if outname is None or outname is "":
outname = _phc.dtflag()
MJDs = [_np.inf, 0]
for sp in speclist:
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(sp)
if MJD < MJDs[0]:
MJDs[0] = MJD
if MJD > MJDs[1]:
MJDs[1] = MJD
if MJDref is None:
MJDref = MJDs[0]
elif MJDs[0] > MJDref:
MJDs[0] = MJDref
# Plot
extrem = [_np.inf, 0]
fig, ax = _plt.subplots()
for sp in speclist:
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(sp)
vel, flux = lineProf(wl, flux, lbc, hwidth=hwidth)
if len(flux) == 0:
raise NameError('Wrong lbc in spt.spe')
if cmapn is not None:
cor = _phc.gradColor([MJD], min=MJDs[0], max=(MJDs[1]+
0.1*(MJDs[1]-MJDs[0])), cmapn=cmapn)[0]
else:
cor = 'k'
# print(MJD, MJDs, extrem, ysh, (MJD-MJDs[0])*ysh, flux, sp)
ax.plot(vel, flux+(MJD-MJDs[0])*ysh, color=cor)
if _np.max(flux+(MJD-MJDs[0])*ysh) > extrem[1]:
extrem[1] = _np.max(flux+(MJD-MJDs[0])*ysh)
if _np.min(flux+(MJD-MJDs[0])*ysh) < extrem[0]:
extrem[0] = _np.min(flux+(MJD-MJDs[0])*ysh)
# print(extrem)
if _os.path.exists(ref_spec):
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(ref_spec)
vel, flux = lineProf(wl, flux, lbc, hwidth=hwidth)
# ax.text(650., 0.8, 'Reference', horizontalalignment='center',
ax.text(800., 0.8, 'Reference', horizontalalignment='center',
verticalalignment='center') # , transform=ax.transAxes)
ax.plot(vel, flux+(MJDref-MJDs[0])*ysh, color='k', ls=':')
# print(MJDref, MJDs, ysh, extrem, _np.min(flux), _np.max(flux))
if _np.min(flux+(MJDref-MJDs[0])*ysh) < extrem[0]:
extrem[0] = _np.min(flux+(MJDref-MJDs[0])*ysh)
ax.plot(vel+5, flux+(57655-MJDs[0])*ysh, color='k', ls='--')
ax.text(800, 1.06+(57655-MJDs[0])*ysh, 'Reference',
horizontalalignment='center', verticalalignment='center')
print('A!')
if _np.max(flux+(57655-MJDs[0])*ysh) > extrem[1]:
print('B!')
extrem[1] = _np.max(flux+(57655-MJDs[0])*ysh)
if _os.path.exists(mod_ref):
s2d = _hdt.readfullsed2(mod_ref)
vel, flux = lineProf(s2d[4, :, 2], s2d[4, :, 3], mod_lbc,
hwidth=hwidth)
ax.plot(vel, flux+(56910-MJDs[0])*ysh, color='k', ls='--')
ax.text(800, 1.06+(56910-MJDs[0])*ysh, 'model',
horizontalalignment='center', verticalalignment='center')
ax.set_xlabel(r'Velocity (km s$^{-1}$)')
ax.set_ylabel(r'Julian Day - 2400000.5')
ax.set_ylim(extrem)
ax.set_xlim([-hwidth, hwidth])
# ax.set_yticks(_np.arange(56300, 57000+100, 100))
yref = [1., 1+_np.diff(MJDs)*ysh]
# yMJDs = _np.arange(56300, 57100, 100)
yMJDs = _np.arange(MJDs[0], MJDs[1], 100)
ax.set_yticks(list(_phc.renormvals(yMJDs, MJDs, yref)))
ax.set_yticklabels(yMJDs, rotation='vertical')
fig.set_size_inches(figsize)
fig.subplots_adjust(left=0.1, right=0.94, top=0.99, bottom=0.04)
ax.minorticks_on()
ax3 = ax.twinx()
ax3.set_yticks(list(_phc.renormvals(yMJDs, MJDs, yref)))
ax3.set_yticklabels([])
ax3.minorticks_on()
ax2 = ax.twinx()
ax2.spines['right'].set_position(('axes', 1.05))
ax2.set_ylabel('Civil date')
# dtminticks = _phc.gentkdates(56201., 57023., 1, 'm')
dtminticks = _phc.gentkdates(MJDs[0], MJDs[1], 1, 'm')
i = 1
# dtticks = _phc.gentkdates(56201., 57023., 3, 'm')
dtticks = _phc.gentkdates(MJDs[0], MJDs[1], 3, 'm')
mjdticks = [_jdcal.gcal2jd(date.year, date.month, date.day)[1] for date in
dtticks]
while dtticks[0] not in dtminticks:
dtminticks = _phc.gentkdates(yMJDs[0]+i, yMJDs[-1], 1, 'm')
i += 1
minjdticks = [_jdcal.gcal2jd(date.year, date.month, date.day)[1] for date
in dtminticks]
ax2.set_yticks(list(_phc.renormvals(mjdticks, MJDs, yref)))
ax2.set_yticks(list(_phc.renormvals(minjdticks, MJDs, yref)), minor=True)
xlabs = [date.strftime('%Y-%m-%d') for date in dtticks]
# xlabs[1::2] = ['']*len(xlabs[1::2])
ax2.set_yticklabels(xlabs, rotation='vertical')
ax2.set_ylim(extrem)
ax3.set_ylim(extrem)
ax.xaxis.set_tick_params(length=8, width=1.5)
ax.xaxis.set_tick_params(length=6, which='minor')
ax.yaxis.set_tick_params(length=4, which='minor')
ax.yaxis.set_tick_params(length=8, width=1.5)
ax2.yaxis.set_tick_params(length=4, which='minor')
ax2.yaxis.set_tick_params(length=8, width=1.5)
ax3.yaxis.set_tick_params(length=4, which='minor')
ax3.yaxis.set_tick_params(length=8, width=1.5)
# , fontsize=10)
_phc.savefig(fig, figname=outpath+outname, fmt=fmt)
return
def spec_time_Achernar(speclist, lbc=6562.8, fmt=['png', 'pdf'], outname=None,
cmapn='inferno', hwidth=1000., outpath='', figsize=(5, 15), ysh=0.01):
""" Plot specs over time as suggested by Rivi """
if outname is None or outname is "":
outname = _phc.dtflag()
MJDs = [_np.inf, 0]
for sp in speclist:
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(sp)
if MJD < MJDs[0]:
MJDs[0] = MJD
if MJD > MJDs[1]:
MJDs[1] = MJD
MJDref = 56245
if MJDs[0] > MJDref:
MJDs[0] = MJDref
# Plot
extrem = [_np.inf, 0]
fig, ax = _plt.subplots()
for sp in speclist:
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(sp)
vel, flux = lineProf(wl, flux, lbc, hwidth=hwidth)
if len(flux) == 0:
raise NameError('Wrong lbc in spt.spe')
if cmapn is not None:
cor = _phc.gradColor([MJD], min=MJDs[0], max=(MJDs[1]+
0.1*(MJDs[1]-MJDs[0])), cmapn=cmapn)[0]
else:
cor = 'k'
# print(MJD, MJDs, extrem, ysh, (MJD-MJDs[0])*ysh, flux, sp)
ax.plot(vel, flux+(MJD-MJDs[0])*ysh, color=cor)
if _np.max(flux+(MJD-MJDs[0])*ysh) > extrem[1]:
extrem[1] = _np.max(flux+(MJD-MJDs[0])*ysh)
if _np.min(flux+(MJD-MJDs[0])*ysh) < extrem[0]:
extrem[0] = _np.min(flux+(MJD-MJDs[0])*ysh)
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits('/data/Dropbox/work'
'/sci_16-15aeri/alpEri_FEROS_2000AVE.mt')
vel, flux = lineProf(wl, flux, 6561.8, hwidth=hwidth)
ax.text(650., 0.8, 'photospheric ref.', horizontalalignment='center',
verticalalignment='center') # , transform=ax.transAxes)
ax.plot(vel, flux+(MJDref-MJDs[0])*ysh, color='k', ls=':')
if _np.min(flux+(MJDref-MJDs[0])*ysh) < extrem[0]:
extrem[0] = _np.min(flux+(MJDref-MJDs[0])*ysh)
s2d = _hdt.readfullsed2('/data/Dropbox/work/sci_16-15aeri/'
'fullsed_mod03_VDDn0_1p4e12_Be_aeri2014.sed2')
vel, flux = lineProf(s2d[4, :, 2], s2d[4, :, 3], .656461, hwidth=hwidth)
ax.plot(vel, flux+(56910-MJDs[0])*ysh, color='k', ls='--')
ax.text(800, 1.06+(56910-MJDs[0])*ysh, 'model',
horizontalalignment='center', verticalalignment='center')
ax.set_xlabel(r'Velocity (km s$^{-1}$)')
ax.set_ylabel(r'Julian Day - 2400000.5')
ax.set_ylim(extrem)
ax.set_xlim([-hwidth, hwidth])
# ax.set_yticks(_np.arange(56300, 57000+100, 100))
yref = [1., 1+_np.diff(MJDs)*ysh]
yMJDs = _np.arange(56300, 57100, 100)
ax.set_yticks(list(_phc.renormvals(yMJDs, MJDs, yref)))
ax.set_yticklabels(yMJDs, rotation='vertical')
fig.set_size_inches(figsize)
fig.subplots_adjust(left=0.1, right=0.94, top=0.99, bottom=0.04)
ax.minorticks_on()
ax3 = ax.twinx()
ax3.set_yticks(list(_phc.renormvals(yMJDs, MJDs, yref)))
ax3.set_yticklabels([])
ax3.minorticks_on()
ax2 = ax.twinx()
ax2.spines['right'].set_position(('axes', 1.05))
ax2.set_ylabel('Civil date')
dtminticks = _phc.gentkdates(56201., 57023., 1, 'm')
i = 1
dtticks = _phc.gentkdates(56201., 57023., 3, 'm')
mjdticks = [_jdcal.gcal2jd(date.year, date.month, date.day)[1] for date in
dtticks]
while dtticks[0] not in dtminticks:
dtminticks = _phc.gentkdates(yMJDs[0]+i, yMJDs[-1], 1, 'm')
i += 1
minjdticks = [_jdcal.gcal2jd(date.year, date.month, date.day)[1] for date
in dtminticks]
ax2.set_yticks(list(_phc.renormvals(mjdticks, MJDs, yref)))
ax2.set_yticks(list(_phc.renormvals(minjdticks, MJDs, yref)), minor=True)
xlabs = [date.strftime('%Y-%m-%d') for date in dtticks]
# xlabs[1::2] = ['']*len(xlabs[1::2])
ax2.set_yticklabels(xlabs, rotation='vertical')
ax2.set_ylim(extrem)
ax3.set_ylim(extrem)
ax.xaxis.set_tick_params(length=8, width=1.5)
ax.xaxis.set_tick_params(length=6, which='minor')
ax.yaxis.set_tick_params(length=4, which='minor')
ax.yaxis.set_tick_params(length=8, width=1.5)
ax2.yaxis.set_tick_params(length=4, which='minor')
ax2.yaxis.set_tick_params(length=8, width=1.5)
ax3.yaxis.set_tick_params(length=4, which='minor')
ax3.yaxis.set_tick_params(length=8, width=1.5)
# , fontsize=10)
_phc.savefig(fig, figname=outpath+outname, fmt=fmt)
return
def extractfromsplot(file, splot):
"""Ce = center; Co = core
#LcCe, LcCo, lcGW, lcEW, lvCe, lcCo, lvEW, lrCe, LrCo, lrEW
"""
out = _np.array(10 * [_np.NaN])
readflag = False
for line in splot:
if line.find(']:') > 0 and readflag:
readflag = False
if line.find(file) > 0:
readflag = True
if readflag:
info = line.split()
# if _re.match("^\d+?\.\d+?$", info[0]) is not None:
try:
float(info[0])
info = _np.array(info, dtype=float)
if info[0] > 6556 and info[0] < 6556 + 4.33:
if len(info) == 4:
out[6] = float(info[3])
elif len(info) == 7:
out[4] = float(info[0])
out[5] = float(info[4])
elif info[0] > 6556 + 4.33 and info[0] < 6556 + 2 * 4.33:
if len(info) == 4:
out[3] = float(info[3])
elif len(info) == 7:
out[0] = float(info[0])
out[1] = float(info[4])
out[2] = float(info[5])
elif info[0] > 6556 + 2 * 4.33 and info[0] < 6556 + 3 * 4.33:
if len(info) == 4:
out[9] = float(info[3])
elif len(info) == 7:
out[7] = float(info[0])
out[8] = float(info[4])
except:
pass
return out
def check_dtobs(dtobs):
""" Check if the dtobs fits the float format. Required for MJD calc. """
if 'T' in dtobs:
dtobs = dtobs.replace('.', '')
tobs, dtobs = dtobs.split('T')
if len(tobs) == 10:
dtobs, tobs = tobs, dtobs
tobs = tobs.split(':')
tobs = float(tobs[0]) * 3600 + float(tobs[1]) * 60 + float(tobs[2])
tobs /= (24 * 3600)
else:
tobs = 0.
if dtobs[4] == '-':
dtobs = dtobs.split('-')
elif dtobs[2] == '/':
dtobs = dtobs.split('/')[::-1]
else:
_warn.warn('Wrong "DATE-OBS" in header! {0}'.format(dtobs))
raise SystemExit(1)
dtobs = _np.array(dtobs, dtype='int32')
return dtobs, tobs
# TODO: Check if obsolete
def overplotsubdirs(path, star, limits=(6540, 6600), showleg=True):
"""
Realiza o plot de espectros da estrela `star` dentre do diretorio `path`.
Atualmente, faz o plot entre os valores `limits` (Angstroms).
Gera os arquivos `path/star/star.log` e `path/star/star_specs.png`.
"""
# path = _os.getcwd()
# star = _phc.user_input('Type the star name: ')
# ref0 = 6540
# ref1 = 6600
ref0, ref1 = limits
if not _os.path.exists('{0}/{1}'.format(path, star)):
_os.system('mkdir {0}/{1}'.format(path, star))
f0 = open('{0}/{1}/{1}.log'.format(path, star), 'w')
nights = [o for o in _os.listdir(path) if _os.path.isdir('{0}/{1}'.
format(path, o))]
i = 0
for night in nights:
targets = [o for o in _os.listdir('%s/%s' % (path, night)) if
_os.path.isdir('%s/%s/%s' % (path, night, o))]
for target in targets:
if target.find(star) > -1:
scal = _glob('%s/%s/%s/*.cal.fits' % (path, night, target))
if len(scal) > 0:
srv = _glob('%s/%s/%s/*.rv.fits' % (path, night, target))
if len(srv) != len(scal):
print('# Specs without dopcor at %s!' % night)
srv = scal
# legendl += (night,)
for cal in scal:
imfits = _pyfits.open(cal)
spec = imfits[0].data
lbda = _np.arange(len(spec)) * \
imfits[0].header['CDELT1'] + \
imfits[0].header['CRVAL1']
# a = _phc.user_input('type to continue: ')
if lbda[-1] > 6560: # and flag == '1':
min_dif = min(abs(lbda - ref0))
a0 = _np.where(abs(lbda - ref0) == min_dif)[0][0]
min_dif = min(abs(lbda - ref1))
a1 = _np.where(abs(lbda - ref1) == min_dif)[0][0]
spec = normalize_range(lbda, spec, a0, a1)
msg = '{0}, {1}, {2}'.format((0.1 * i), night, cal)
print(msg)
f0.writelines(msg + '\n')
try:
leg = imfits[0].header['DATE-OBS']
except:
leg = imfits[0].header['FRAME']
_plt.plot(lbda, spec, label=leg, alpha=0.7,
color=_phc.colors[_np.mod(i,
len(_phc.colors))])
i += 1
else:
print('# Data not reduced for %s at %s!' % (star, night))
msg = '{0}, {1}, {2}'.format('NC', night, 'None')
f0.writelines(msg + '\n')
if showleg:
legend = _plt.legend(loc=(0.75, .05), labelspacing=0.1)
_plt.setp(legend.get_texts(), fontsize='small')
_plt.xlim([ref0, ref1])
_plt.ylim([-1, 5])
# _plt.xlabel('vel. (km/s)')
_plt.savefig('{0}/{1}/{1}_specs.png'.format(path, star))
_plt.close()
f0.close()
#
# Ha = False # False do HeI 6678
#
# for i in range(len(ifits)):
# imfits = _pyfits.open(ifits[i])
# print imfits[0].header[3]
# specs[i][:len(imfits[0].data)] = imfits[0].data
# lbds[i] = _np.arange(len(specs[i]))*imfits[0].header['CDELT1']+
# imfits[0].header['CRVAL1']
# if Ha:
# if i == 0:
# lbds[i] = (lbds[i]-6561.5)/6561.5*3e5
# else:
# lbds[i] = (lbds[i]-6562.8)/6562.8*3e5
# else:
# if i == 0:
# lbds[i] = (lbds[i]-6676.8)/6676.8*3e5
# else:
# lbds[i] = (lbds[i]-6678.)/6678.*3e5
#
# a = _np.where( abs(lbds[i]+1000) == min(abs(lbds[i]+1000)) )
# b = _np.where( abs(lbds[i]-1000) == min(abs(lbds[i]-1000)) )
#
# specs[i] = normalize_range(lbds[i],specs[i],a,b)
#
# legendl += [imfits[0].header['DATE-OBS']]
#
# figure(2)
# for i in range(len(specs)):
# plot(lbds[i], specs[i], label=legendl[i])
#
# legend(legendl, 'lower right')
# legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #'lower right'
# xlim([-1000,1000])
# if Ha:
# title('Halpha profile from LNA-Janot for Achernar')
# ylim([.65,1.1])
# else:
# title('HeI 6678 profile from LNA-Janot for Achernar')
# ylim([.9,1.05])
#
# legend = _plt.legend(legendl, loc=(0.75, .05), labelspacing=0.1)
# _plt.setp(legend.get_texts(), fontsize='small')
#
# xlabel('vel. (km/s)')
print('# Plot done!')
return
def diffplotsubdirs(path, star, limits=(6540, 6600)):
"""
Realiza o plot de espectros da estrela `star` dentre do diretorio `path`.
Atualmente, faz o plot entre os valores `limits` (Angstroms).
Gera os arquivos `path/star/star.log` e `path/star/star_specs_dif.png`.
"""
ref0, ref1 = limits
if not _os.path.exists('{0}/{1}'.format(path, star)):
_os.system('mkdir {0}/{1}'.format(path, star))
# f0 = open('{0}/{1}/{1}.log'.format(path, star), 'w')
nights = [o for o in _os.listdir(path) if _os.path.isdir('{0}/{1}'.
format(path, o))]
i = 0
for night in nights:
targets = [o for o in _os.listdir('%s/%s' % (path, night)) if
_os.path.isdir('%s/%s/%s' % (path, night, o))]
for target in targets:
if target.find(star) > -1:
scal = _glob('%s/%s/%s/*.cal.fits' % (path, night, target))
if len(scal) > 0:
srv = _glob('%s/%s/%s/*.rv.fits' % (path, night, target))
if len(srv) != len(scal):
print('# Specs with dopcor at %s!' % night)
srv = scal
# legendl += (night,)
for cal in scal:
imfits = _pyfits.open(cal)
spec = imfits[0].data
lbda = _np.arange(len(spec)) * imfits[0].\
header['CDELT1'] + imfits[0].header['CRVAL1']
# a = _phc.user_input('type to continue: ')
if lbda[0] > 5500: # and flag == '1':
min_dif = min(abs(lbda - ref0))
a0 = _np.where(abs(lbda - ref0) == min_dif)[0][0]
min_dif = min(abs(lbda - ref1))
a1 = _np.where(abs(lbda - ref1) == min_dif)[0][0]
spec = normalize_range(lbda, spec, a0, a1) + \
(0.1 * i)
# print (0.1 * i)
try:
leg = imfits[0].header['DATE-OBS']
except:
leg = imfits[0].header['FRAME']
_plt.plot([ref0, ref1], [1 + 0.1 * i, 1 + 0.1 * i],
'k--', alpha=0.5)
_plt.plot(lbda, spec, label=leg,
color=_phc.colors[i])
i += 1
else:
print('# Data not reduced for %s at %s!' % (star, night))
legend = _plt.legend(loc=(0.75, .05), labelspacing=0.1)
_plt.setp(legend.get_texts(), fontsize='small')
_plt.xlim([ref0, ref1])
# _plt.xlabel('vel. (km/s)')
_plt.savefig('{0}/{1}/{1}_specs_dif.png'.format(path, star))
#
# Ha = False # False do HeI 6678
#
# for i in range(len(ifits)):
# imfits = _pyfits.open(ifits[i])
# print imfits[0].header[3]
# specs[i][:len(imfits[0].data)] = imfits[0].data
# lbds[i] = _np.arange(len(specs[i]))*imfits[0].header['CDELT1']+
# imfits[0].header['CRVAL1']
# if Ha:
# if i == 0:
# lbds[i] = (lbds[i]-6561.5)/6561.5*3e5
# else:
# lbds[i] = (lbds[i]-6562.8)/6562.8*3e5
# else:
# if i == 0:
# lbds[i] = (lbds[i]-6676.8)/6676.8*3e5
# else:
# lbds[i] = (lbds[i]-6678.)/6678.*3e5
#
# a = _np.where( abs(lbds[i]+1000) == min(abs(lbds[i]+1000)) )
# b = _np.where( abs(lbds[i]-1000) == min(abs(lbds[i]-1000)) )
#
# specs[i] = normalize_range(lbds[i],specs[i],a,b)
#
# legendl += [imfits[0].header['DATE-OBS']]
#
# figure(2)
# for i in range(len(specs)):
# plot(lbds[i], specs[i], label=legendl[i])
#
# legend(legendl, 'lower right')
# legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #'lower right'
# xlim([-1000,1000])
# if Ha:
# title('Halpha profile from LNA-Janot for Achernar')
# ylim([.65,1.1])
# else:
# title('HeI 6678 profile from LNA-Janot for Achernar')
# ylim([.9,1.05])
#
# legend = _plt.legend(legendl, loc=(0.75, .05), labelspacing=0.1)
# _plt.setp(legend.get_texts(), fontsize='small')
#
# xlabel('vel. (km/s)')
print('# Plot done!')
return
def refplotsubdirs(path, star, limits=(6540, 6600)):
"""
Realiza o plot de espectros da estrela `star` dentre do diretorio `path`.
Atualmente, faz o plot entre os valores `limits` (Angstroms).
Gera os arquivos `path/star/star.log` e
`path/star/star_specs_REFERENCIA.png`.
"""
ref0, ref1 = limits
if not _os.path.exists('{0}/{1}'.format(path, star)):
_os.system('mkdir {0}/{1}'.format(path, star))
f0 = open('{0}/{1}/{1}.log'.format(path, star), 'w')
nights = [o for o in _os.listdir(path) if
_os.path.isdir('{0}/{1}'.format(path, o))]
i = 0
for night in nights:
targets = [o for o in _os.listdir('%s/%s' % (path, night)) if
_os.path.isdir('%s/%s/%s' % (path, night, o))]
for target in targets:
if target.find(star) > -1:
scal = _glob('%s/%s/%s/*.cal.fits' % (path, night, target))
if len(scal) > 0:
srv = _glob('%s/%s/%s/*.rv.fits' % (path, night, target))
if len(srv) != len(scal):
srv = scal
for cal in scal:
imfits = _pyfits.open(cal)
spec = imfits[0].data
lbda = _np.arange(len(spec)) * imfits[0].\
header['CDELT1'] + imfits[0].header['CRVAL1']
# a = _phc.user_input('type to continue: ')
if lbda[0] > 5500: # and flag == '1':
min_dif = min(abs(lbda - ref0))
a0 = _np.where(abs(lbda - ref0) == min_dif)[0][0]
min_dif = min(abs(lbda - ref1))
a1 = _np.where(abs(lbda - ref1) == min_dif)[0][0]
spec = normalize_range(lbda, spec, a0, a1)
# print (0.1 * i)
leg = imfits[0].header['DATE-OBS']
refleg = '2012-11-20T23:51:37.392'
refleg = '2008-06-13'
if leg == refleg:
f0 = open('{0}/{1}/ref.txt'.format(path, star),
'w')
f0.writelines([str(x) + '\t' for x in lbda])
f0.writelines('\n')
f0.writelines([str(x) + '\t' for x in spec])
f0.writelines('\n')
f0.close()
i += 1
else:
print('# Data not reduced for %s at %s!' % (star, night))
f0 = open('{0}/{1}/ref.txt'.format(path, star))
lines = f0.readlines()
f0.close()
specref = _np.array(lines[1].split(), dtype=float)
lbdaref = _np.array(lines[0].split(), dtype=float)
func = _interpolate.interp1d(lbdaref, specref) # , kind='cubic')
lbdaref = _np.linspace(ref0, ref1, 5000)
specref = func(lbdaref)
i = 0
for night in nights:
targets = [o for o in _os.listdir('%s/%s' % (path, night)) if
_os.path.isdir('%s/%s/%s' % (path, night, o))]
for target in targets:
if target.find(star) > -1:
scal = _glob('%s/%s/%s/*.cal.fits' % (path, night, target))
if len(scal) > 0:
srv = _glob('%s/%s/%s/*.rv.fits' % (path, night, target))
if len(srv) != len(scal):
print('# Specs without dopcor at %s!' % night)
srv = scal
# legendl += (night,)
for cal in scal:
imfits = _pyfits.open(cal)
spec = imfits[0].data
lbda = _np.arange(len(spec)) * imfits[0].\
header['CDELT1'] + imfits[0].header['CRVAL1']
# a = _phc.user_input('type to continue: ')
if lbda[0] > 5500: # and flag == '1':
min_dif = min(abs(lbda - ref0))
a0 = _np.where(abs(lbda - ref0) == min_dif)[0][0]
min_dif = min(abs(lbda - ref1))
a1 = _np.where(abs(lbda - ref1) == min_dif)[0][0]
spec = normalize_range(lbda, spec, a0, a1)
func = _interpolate.interp1d(lbda, spec)
# , kind='cubic')
# Tive problemas de 'out-of-bounds'... um espectro
# estava desordenado:
# print imfits[0].header['CDELT1'],
# imfits[0].header['CRVAL1'], cal
spec = func(lbdaref)
# print (0.1 * i)
try:
leg = imfits[0].header['DATE-OBS']
except:
leg = imfits[0].header['FRAME']
if i < 130:
_plt.plot(lbdaref, spec - specref, label=leg,
alpha=0.8, color=_phc.colors[i])
i += 1
else:
print('# Data not reduced for %s at %s!' % (star, night))
legend = _plt.legend(loc=(0.75, .05), labelspacing=0.1)
_plt.setp(legend.get_texts(), fontsize='small')
_plt.xlim([ref0, ref1])
_plt.title('Ref.= %s' % refleg)
# _plt.xlabel('vel. (km/s)')
_plt.savefig('{0}/{1}/{1}_specs_{2}.png'.format(path, star, refleg[:10]))
#
# Ha = False # False do HeI 6678
#
# for i in range(len(ifits)):
# imfits = _pyfits.open(ifits[i])
# print imfits[0].header[3]
# specs[i][:len(imfits[0].data)] = imfits[0].data
# lbds[i] = _np.arange(len(specs[i]))*imfits[0].header['CDELT1']+\
# imfits[0].header['CRVAL1']
# if Ha:
# if i == 0:
# lbds[i] = (lbds[i]-6561.5)/6561.5*3e5
# else:
# lbds[i] = (lbds[i]-6562.8)/6562.8*3e5
# else:
# if i == 0:
# lbds[i] = (lbds[i]-6676.8)/6676.8*3e5
# else:
# lbds[i] = (lbds[i]-6678.)/6678.*3e5
#
# a = _np.where( abs(lbds[i]+1000) == min(abs(lbds[i]+1000)) )
# b = _np.where( abs(lbds[i]-1000) == min(abs(lbds[i]-1000)) )
#
# specs[i] = normalize_range(lbds[i],specs[i],a,b)
#
# legendl += [imfits[0].header['DATE-OBS']]
#
# figure(2)
# for i in range(len(specs)):
# plot(lbds[i], specs[i], label=legendl[i])
#
# legend(legendl, 'lower right')
# legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #'lower right'
# xlim([-1000,1000])
# if Ha:
# title('Halpha profile from LNA-Janot for Achernar')
# ylim([.65,1.1])
# else:
# title('HeI 6678 profile from LNA-Janot for Achernar')
# ylim([.9,1.05])
#
# legend = _plt.legend(legendl, loc=(0.75, .05), labelspacing=0.1)
# _plt.setp(legend.get_texts(), fontsize='small')
#
# xlabel('vel. (km/s)')
print('# Plot done!')
return
def overplotsubdirs2(path, star, limits=(6540, 6600)):
"""
Realiza o plot de espectros da estrela `star` dentre do diretorio `path`.
Atualmente, faz o plot entre os valores `limits` (Angstroms).
Ha' um criterio de escolha de espectros aqui (rudimentar).
Gera os arquivos `path/star/star.log` e `path/star/star_specs2.png`.
"""
ref0, ref1 = limits
if not _os.path.exists('{0}/{1}'.format(path, star)):
_os.system('mkdir {0}/{1}'.format(path, star))
f0 = open('{0}/{1}/{1}.log'.format(path, star), 'w')
nights = [o for o in _os.listdir(path) if _os.path.isdir('{0}/{1}'.
format(path, o))]
ax = _plt.figure()
i = 0
for night in nights:
targets = [o for o in _os.listdir('%s/%s' % (path, night)) if
_os.path.isdir('%s/%s/%s' % (path, night, o))]
for target in targets:
if target.find(star) > -1:
scal = _glob('%s/%s/%s/*.cal.fits' % (path, night, target))
if len(scal) > 0:
srv = _glob('%s/%s/%s/*.rv.fits' % (path, night, target))
if len(srv) != len(scal):
print('# Specs without dopcor at %s!' % night)
srv = scal
# legendl += (night,)
for cal in scal:
imfits = _pyfits.open(cal)
spec = imfits[0].data
lbda = _np.arange(len(spec)) * imfits[0].\
header['CDELT1'] + imfits[0].header['CRVAL1']
# a = _phc.user_input('type to continue: ')
if lbda[0] > 5500: # and flag == '1':
min_dif = min(abs(lbda - ref0))
a0 = _np.where(abs(lbda - ref0) == min_dif)[0][0]
min_dif = min(abs(lbda - ref1))
a1 = _np.where(abs(lbda - ref1) == min_dif)[0][0]
spec = normalize_range(lbda, spec, a0, a1)
# print (0.1 * i), night
prtcolor = _phc.colors[i]
try:
leg = imfits[0].header['DATE-OBS']
except:
leg = imfits[0].header['FRAME']
check = False
if leg.find('2012-11-20T23:51:37.392') != -1:
leg = '2012-11-20'
prtcolor = _phc.colors[0]
check = True
elif leg.find('22/01/2013') != -1:
leg = '2013-01-22'
check = True
# elif leg.find('03/07/2013') != -1:
# leg = '2013-07-03'
# check = True
elif leg.find('28/07/2013') != -1:
leg = '2013-07-28'
check = True
elif leg.find('2013-11-12T01:30:38.938') != -1:
leg = '2013-11-12'
check = True
else:
print(leg)
if check:
print(cal)
_plt.plot(lbda, spec, label=leg, alpha=0.7,
color=prtcolor)
i += 1
else:
msg = '# Data not reduced for %s at %s!' % (star, night)
print(msg)
f0.writelines(msg)
font = { 'size': 16, }
legend = _plt.legend(loc=(0.75, .05), labelspacing=0.1)
# _plt.setp(legend.get_texts(), fontsize='small')
_plt.xlim([ref0, ref1])
_plt.ylim([.58, 1.2])
_plt.xlabel(r'wavelength ($\AA$)', fontdict=font)
_plt.ylabel('Normalized flux', fontdict=font)
# _plt.xlabel('vel. (km/s)')
_plt.savefig('{0}/{1}/{1}_specs2.png'.format(path, star))
_plt.close()
f0.close()
#
# Ha = False # False do HeI 6678
#
# for i in range(len(ifits)):
# imfits = _pyfits.open(ifits[i])
# print imfits[0].header[3]
# specs[i][:len(imfits[0].data)] = imfits[0].data
# lbds[i] = _np.arange(len(specs[i]))*imfits[0].header['CDELT1']+
# imfits[0].header['CRVAL1']
# if Ha:
# if i == 0:
# lbds[i] = (lbds[i]-6561.5)/6561.5*3e5
# else:
# lbds[i] = (lbds[i]-6562.8)/6562.8*3e5
# else:
# if i == 0:
# lbds[i] = (lbds[i]-6676.8)/6676.8*3e5
# else:
# lbds[i] = (lbds[i]-6678.)/6678.*3e5
#
# a = _np.where( abs(lbds[i]+1000) == min(abs(lbds[i]+1000)) )
# b = _np.where( abs(lbds[i]-1000) == min(abs(lbds[i]-1000)) )
#
# specs[i] = normalize_range(lbds[i],specs[i],a,b)
#
# legendl += [imfits[0].header['DATE-OBS']]
#
# figure(2)
# for i in range(len(specs)):
# plot(lbds[i], specs[i], label=legendl[i])
#
# legend(legendl, 'lower right')
# legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #'lower right'
# xlim([-1000,1000])
# if Ha:
# title('Halpha profile from LNA-Janot for Achernar')
# ylim([.65,1.1])
# else:
# title('HeI 6678 profile from LNA-Janot for Achernar')
# ylim([.9,1.05])
#
# legend = _plt.legend(legendl, loc=(0.75, .05), labelspacing=0.1)
# _plt.setp(legend.get_texts(), fontsize='small')
#
# xlabel('vel. (km/s)')
print('# Plot done!')
return
def overPlotLineSeries(fullseds, obsers=[0], lbc=.6564606, fmt=['png'],
convgauss=0., frac=0., addsuf='', labels=None, hwidth=1000., ssize=.05,
outpath='', ylim=[.7, 2.2], cmapn='gnuplot'):
"""Generate overplot spec. line from a HDUST mod list, separated by
observers.
Observers config. must be the same between models in `fullseds` list.
If `convgauss` > 0, do a gaussian convolution.
"""
if labels is None:
labels = [''] * len(fullseds)
for obs in obsers:
fig, ax = _plt.subplots()
fig2, ax2 = _plt.subplots()
k = obsers.index(obs)
for file in fullseds:
i = fullseds.index(file)
sed2data = _hdt.readfullsed2(file)
obsdegs = (_np.arccos(sed2data[:, 0, 0]) * 180 / _np.pi)[obsers]
obsdegs = list(obsdegs)
(x, yo) = lineProf(sed2data[obs, :, 2], sed2data[obs, :, 3],
lbc=lbc, hwidth=hwidth + 3 * convgauss, ssize=ssize)
y1 = yo
y2 = 0.
if convgauss > 0:
step = _np.min([x[j + 1] - x[j] for j in range(len(x) - 1)])
xn = _np.arange(-hwidth-3*convgauss, hwidth+3*convgauss+step,
step)
cf = _phc.normgauss(convgauss, x=xn)
yo = _np.interp(xn, x, yo)
x = xn
y1 = yo * (1 - frac)
y2 = _np.convolve(yo * frac, cf / _np.trapz(cf), 'same')
ax2.plot(x, y1, color=_phc.colors[_np.mod(i,
len(_phc.colors))])
ax2.plot(x, y2, color=_phc.colors[_np.mod(i,
len(_phc.colors))])
y = y1 + y2
# y = linfit(x, y1+y2)
if file == fullseds[0]:
ax.plot(x, y, label='{0:02.1f} deg. {1}'.format(obsdegs[k],
labels[i]), color=_phc.colors[_np.mod(i, len(_phc.colors))])
# ew0 = EWcalc(x, y, vw=hwidth)
else:
ax.plot(x, y, color=_phc.colors[_np.mod(i, len(_phc.colors))],
label=labels[i])
# ewf = EWcalc(x, y, vw=hwidth)
plot_line_str(fig, ax, lbc=lbc, ylim=ylim, cmapn=cmapn, xlim=[-hwidth,
hwidth])
figname = outpath + 'modsover_lbc{1:.4f}_obs{0:02.1f}{2}'.\
format(obsdegs[k], lbc, addsuf)
_phc.savefig(fig, figname, fmt)
plot_line_str(fig2, ax2, lbc=lbc, ylim=ylim, cmapn=cmapn,
xlim=[-hwidth, hwidth])
figname = outpath + 'modsover_lbc{1:.4f}_obs{0:02.1f}{2}Extra'.\
format(obsdegs[k], lbc, addsuf)
_phc.savefig(fig, figname, fmt)
return
def overPlotLineFits(specs, lbc=.6564606, fmt=['png'], hwidth=1500.,
ylim=None, yzero=False, addsuf='', dlim=None, cmapn='jet', xlim=None,
outpath=''):
"""Generate overplot spec. line from a FITS file list.
"""
fig, ax = _plt.subplots()
for spec in specs:
i = specs.index(spec)
print("# Reading {0}...".format(_phc.trimpathname(spec)[1]))
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(spec)
(x, y) = lineProf(wl, flux, lbc=lbc, hwidth=hwidth)
if dateobs.find('-') > 0:
dateobs = dateobs[:10]
elif dateobs.find('/') > 0:
dtobs = dateobs.split('/')[::-1]
dateobs = "-".join(dtobs)
if dlim is None:
cor = _phc.colors[_np.mod(i, len(_phc.colors))]
else:
cor = _phc.gradColor([MJD], min=dlim[0], max=dlim[1],
cmapn=cmapn)[0]
ax.plot(x, y, label='{0}'.format(dateobs), color=cor)
ylabel = 'Overplotted spectra'
fig, ax = plot_line_str(fig, ax, lbc=lbc, ylabel=ylabel, xlim=xlim,
dlim=dlim, cmapn=cmapn, ylim=ylim)
figname = outpath + 'fitsover_lbc{1:.4f}{0}'.format(addsuf, lbc)
_phc.savefig(fig, figname, fmt)
return
def incrPlotLineSeries(fullseds, obsers=[0], lbc=.6564606, fmt=['png'],
addsuf='', outpath=''):
"""Generate incremented spec. line from a HDUST mod list, separated by
observers. The increment is 0.1 for each file in fullseds sequence.
Observers config. must be the same between models in `fullseds` list.
"""
for obs in obsers:
fig, ax = _plt.subplots()
k = obsers.index(obs)
for file in fullseds:
i = fullseds.index(file)
sed2data = _hdt.readfullsed2(file)
obsdegs = (_np.arccos(sed2data[:, 0, 0]) * 180 / _np.pi)[obsers]
obsdegs = list(obsdegs)
(x, y) = lineProf(sed2data[obs, :, 2], sed2data[obs, :, 3],
lbc=lbc)
if file == fullseds[0]:
ax.plot(x, y + 0.1 * i, label='{0:02.1f} deg.'.format(
obsdegs[k]), color=_phc.colors[_np.mod(i,
len(_phc.colors))])
else:
ax.plot(x, y + 0.1 * i, color=_phc.colors[_np.mod(i,
len(_phc.colors))])
ax.set_title(u'lbc = {0:.5f} $\mu$m'.format(lbc))
ax.legend(loc='best', fancybox=True, framealpha=0.5)
figname = outpath + 'modsincr_lbc{1:.4f}_obs{0:02.1f}{2}'.\
format(obsdegs[k], lbc, addsuf)
for f in fmt:
print('# Saved {1}.{0}'.format(f, figname))
fig.savefig(figname + '.{0}'.format(f), transparent=True)
_plt.close()
return
def incrPlotLineFits(specs, lbc=.6564606, fmt=['png'], hwidth=1500.,
yzero=False, addsuf='', dlim=None, cmapn='jet', xlim=None, outpath='',
ylim=None):
"""Generate incremented spec. line from FITS files list.
The increment is 0.1 for each file in sequence.
"""
fig, ax = _plt.subplots()
for spec in specs:
i = specs.index(spec)
print("# Reading {0}...".format(_phc.trimpathname(spec)[1]))
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(spec)
(x, y) = lineProf(wl, flux, lbc=lbc, hwidth=hwidth)
if dateobs.find('-') > 0:
dateobs = dateobs[:10]
elif dateobs.find('/') > 0:
dtobs = dateobs.split('/')[::-1]
dateobs = "-".join(dtobs)
if dlim is None:
cor = _phc.colors[_np.mod(i, len(_phc.colors))]
else:
cor = _phc.gradColor([MJD], min=dlim[0], max=dlim[1],
cmapn=cmapn)[0]
ax.plot(x, y + 0.1 * i, label='{0}'.format(dateobs), color=cor)
if yzero:
ylim = ax.get_ylim()
ax.plot([0, 0], ylim, ls='-', color='Gray')
ylabel = 'Spaced spectra'
fig, ax = plot_line_str(fig, ax, lbc=lbc, ylabel=ylabel, xlim=xlim,
dlim=dlim, cmapn=cmapn, ylim=ylim)
figname = outpath + 'fitsincr_lbc{1:.4f}{0}'.format(addsuf, lbc)
_phc.savefig(fig, figname, fmt)
return
def diffPlotLineSeries(fullseds, obsers=[0], lbc=.6564606, fmt=['png'],
rvel=None, rflx=None, hwidth=1000., outpath='', addsuf=''):
"""Generate overplot of DIFFERENCE spec. line from a HDUST mod list.
The model will be linearly interpolated
with the reference spec. If none is given as reference,
then it assumes the first of the list.
It is recommend to run first (rvel, rflx) = lineProf(rvel, rflx,
lbc=lbc, hwidth=hwidth).
Observers config. must be the same between models in
`fullseds` list.
"""
for obs in obsers:
fig, ax = _plt.subplots()
k = obsers.index(obs)
for file in fullseds:
i = fullseds.index(file)
sed2data = _hdt.readfullsed2(file)
obsdegs = (_np.arccos(sed2data[:, 0, 0]) * 180 / _np.pi)[obsers]
obsdegs = list(obsdegs)
(x, y) = lineProf(sed2data[obs, :, 2], sed2data[obs, :, 3],
lbc=lbc, width=hwidth)
if rvel is None or rflx is None:
refspec = _hdt.readfullsed2(fullseds[0])
(vel, flx) = lineProf(refspec[obs, :, 2], refspec[obs, :, 3],
lbc=lbc, hwidth=hwidth)
else:
flx = _np.interp(x, rvel, rflx)
if file == fullseds[0]:
ax.plot(x, y - flx, label='{0:02.1f} deg.'.format(obsdegs[k]),
color=_phc.colors[_np.mod(i, len(_phc.colors))])
else:
ax.plot(x, y - flx, color=_phc.colors[_np.mod(i,
len(_phc.colors))])
ax.set_title(u'lbc = {0:.5f} $\mu$m'.format(lbc))
ax.set_ylabel('Difference spectra (spec - ref.)')
ax.legend(fontsize=8, loc='best', fancybox=True, framealpha=0.5)
figname = outpath + 'modsdiff_lbc{1:.4f}_obs{0:02.1f}{2}'.\
format(obsdegs[k], lbc, addsuf)
for f in fmt:
print('# Saved {1}.{0}'.format(f, figname))
_plt.close()
return
def diffPlotLineFits(specs, lbc=.6564606, fmt=['png'], xlim=None,
rvel=None, rflx=None, hwidth=1500., addsuf='', cmapn='jet', dlim=None,
outpath='', ylim=None):
"""Generate overplot of DIFFERENCE spec. line from a FITS files list.
The observations will be linearly interpolated
with the reference spec. If none is given as reference,
then it assumes the first of the list.
It is recommend to run first (rvel, rflx) = lineProf(rvel, rflx,
lbc=lbc, hwidth=hwidth).
If `cmap` is None or empty, the phc.colors vector is read.
"""
fig, ax = _plt.subplots()
for spec in specs:
i = specs.index(spec)
print("# Reading {0}...".format(_phc.trimpathname(spec)[1]))
wl, flux, MJD, dateobs, datereduc, fitsfile = loadfits(spec)
(x, y) = lineProf(wl, flux, lbc=lbc, hwidth=hwidth)
if rvel is None or rflx is None:
# wl0, flux0, MJD, dateobs0, datereduc, fitsfile = \
# loadfits(specs[0])
# (rvel,flx) = lineProf(wl0, flux0, lbc=lbc, hwidth=hwidth)
# flx = _np.interp(x, rvel, rflx)
rvel = x
rflx = y
flx = y[:]
else:
flx = _np.interp(x, rvel, rflx)
# if spec == specs[0]:
# ax.plot(x, y-flx, label='{0}'.format(dateobs), \
# color= _phc.colors[_np.mod(i, len(_phc.colors))])
# else:
# ax.plot(x, y-flx, color= _phc.colors[_np.mod(i,
# len(_phc.colors))])
if dateobs.find('-') > 0:
dateobs = dateobs[:10]
elif dateobs.find('/') > 0:
dtobs = dateobs.split('/')[::-1]
dateobs = "-".join(dtobs)
if dlim is None:
cor = _phc.colors[_np.mod(i, len(_phc.colors))]
else:
cor = _phc.gradColor([MJD], min=dlim[0], max=dlim[1],
cmapn=cmapn)[0]
ax.plot(x, y - flx, label='{0}'.format(dateobs), color=cor)
ylabel = 'Difference spectra'
fig, ax = plot_line_str(fig, ax, lbc=lbc, ylabel=ylabel, xlim=xlim,
dlim=dlim, cmapn=cmapn, ylim=ylim)
figname = outpath + 'fitsdiff_lbc{1:.4f}{0}'.format(addsuf, lbc)
_phc.savefig(fig, figname, fmt)
return
def diffPlotLineObs(fullseds, obsers=[0], lbc=.6564606, fmt=['png'],
rvel=None, rflx=None, hwidth=1000., addsuf='', outpath=''):
"""Generate overplot of DIFFERENCE spec. line from a HDUST OBSERVERS list.
The model will be linearly interpolated
with the reference spec. If none is given as reference,
then it assumes the first observer of the list.
It is recommend to run first (rvel, rflx) = lineProf(rvel, rflx,
lbc=lbc, hwidth=hwidth).
Observers config. must be the same between models in
`fullseds` list.
"""
for file in fullseds:
fig, ax = _plt.subplots()
sed2data = _hdt.readfullsed2(file)
obsdegs = (_np.arccos(sed2data[:, 0, 0]) * 180 / _np.pi)[obsers]
obsdegs = list(obsdegs)
for obs in obsers:
i = obsers.index(obs)
(x, y) = lineProf(sed2data[obs, :, 2], sed2data[obs, :, 3],
lbc=lbc, hwidth=hwidth)
if rvel is None or rflx is None:
(vel, flx) = lineProf(sed2data[obsers[0], :, 2],
sed2data[obsers[0], :, 3], lbc=lbc, hwidth=hwidth)
else:
flx = _np.interp(x, rvel, rflx)
ax.plot(x, y - flx, label='{0:02.1f} deg.'.format(obsdegs[i]),
color=_phc.colors[_np.mod(i, len(_phc.colors))])
ax.set_title(u'lbc={0:.5f}$\mu$m, {1}'.format(lbc,
_phc.trimpathname(file)[1]))
ax.set_ylabel('Difference spectra (spec - ref.)')
ax.legend(fontsize=8, loc='best', fancybox=True, framealpha=0.5)
figname = outpath + 'modsdiff_lbc{1:.4f}{0}'.format(addsuf, lbc)
for f in fmt:
print('# Saved {1}.{0}'.format(f, figname))
fig.savefig(figname + '.{0}'.format(f), transparent=True)
_plt.close()
return
def max_func_pts(x, y, ws=0.01, avgbox=3):
""" `ws` window size where the maximum will be evaluated. Example: `ws=0.02`
corresponds to 2% of the length of the input. """
x, y = (_np.array(x), _np.array(y))
N = len(x)
parts = _phc.splitequal(N*ws, N)
n = len(parts)
xout, yout = (_np.zeros(n), _np.zeros(n))
for i in range(n):
p = parts[i]
Y = y[p[0]:p[1]]
X = x[p[0]:p[1]]
idx = _np.argsort(Y)
xout[i] = _np.average(X[idx][-avgbox:])
yout[i] = _np.average(Y[idx][-avgbox:])
return xout, yout
def sum_ec(fwl, fflx):
dmin = _np.inf
wlmin = _np.inf
wlmax = 0
for f in fwl:
if _np.min(_np.diff(f)) < dmin:
dmin = _np.min(_np.diff(f))
if _np.min(f) < wlmin:
wlmin = _np.min(f)
if _np.max(f) > wlmax:
wlmax = _np.max(f)
swl = _np.arange(wlmin, wlmax, dmin)
sflx = _np.zeros(len(swl))
for i in range(len(fwl)):
idx = _np.where( (swl > _np.min(fwl[i])) & (swl < _np.max(fwl[i])) )
sflx[idx] += _np.interp(swl[idx], fwl[i], fflx[i])
return swl, sflx
def lbdc2range(lbdc):
""" Function doc
"""
dl = lbdc[1] - lbdc[0]
return _np.linspace(lbdc[0] - dl / 2, lbdc[-1] + dl / 2, len(lbdc) + 1)
def classify_specs(list_of_specs, starid, instrument, calib, comment=''):
"""Do useful things with a list of FITS specs to classify them.
It will (1) generate figures of the specs, with line info; (2) classify
the band of observation; (3) copy the file with a standard name.
"""
lines = [6562.79, 4861.35, 4340.472, 4101.734, 21655.2488]
lnames = ['Ha', 'Hb', 'Hc', 'Hd', "Brg"]
list_of_specs = list(list_of_specs)
list_of_specs.sort()
for s in list_of_specs:
print(s)
wl, flux, MJD, dateobs, datereduc, fitsfiles = loadfits(s)
fig, ax = _plt.subplots()
ax.plot(wl, flux, label=dateobs)
wlrange = [_np.min(wl), _np.max(wl)]
flxrange = [_np.min(flux), _np.max(flux)]
band = 'unknown'
# print(wlrange)
for l in lines:
if _phc.is_inside_ranges(l, wlrange):
ax.plot([l, l], flxrange, '--', color='gray')
if wlrange[0] > l*0.91 and wlrange[1] < l*1.09:
band = lnames[lines.index(l)]
if band == 'unknown':
if wlrange[1] > 9000 and wlrange[1] < 25000:
band = 'nIR'
if wlrange[0] > 5300 and wlrange[1] < 11000:
band = 'RI'
if wlrange[0] < 4100 and wlrange[1] < 6000:
band = 'BV'
if wlrange[0] < 3700 and wlrange[1] < 6000:
band = 'UV'
if wlrange[0] < 4700 and wlrange[1] > 6700:
band = 'Vis'
ax.set_title(s)
ax.legend()
figname = _os.path.splitext(s)
_phc.savefig(fig, figname=list(figname)[0])
expname = '{}_{}_{}'.format(starid, instrument, band)
if len(comment) > 0:
expname += '_' + comment
expname += "_{}_{:04d}".format( int(MJD), int(round(1e4*(MJD % 1))) )
expname += ".{}.fits".format(calib)
_copyfile(s, expname)
def automatic_BeSS(RA, DEC, size='0.2', date_lower='1000-01-01', date_upper="3000-01-01", band_lower='6.4e-7', band_upper='6.7e-7'):
"""
This is a script for downloading BeSS spectra, directly from the database website,
using VO Table and pandas dataframes
Parameters
----------
RA : str
Right ascension [° J200] as string
DEC : str
Declination [° J2000] as string
size: str
Radius of the cone search in degree as string
date_lower: str
Initial date in format yyyy-mm-dd as string
date_upper: str
Final date in format yyyy-mm-dd as string
band lower: str
Initial wavelength [meters] in scientific notation as string
band_upper: str
Final wavelength [meters] in scientific notation as string
Returns
-------
None, the routine downloads file in the script directory.
Example
-------
#Halpha for 25 Cyg from 2019-10-01 to 2020-03-27
>>> RA = "299.979"
>>> DEC = "37.04"
>>> date_lower = "2019-10-01"
>>> date_upper = "2020-03-27"
>>> automatic_BeSS(RA, DEC, size='0.1', date_lower, date_upper, band_lower='6.4e-7', band_upper='6.7e-7')
#Data downloaded in the script directory
-------
#Download all Ha data of a star
>>> automatic_BeSS(RA="299.979", DEC="37.04")
Routine written by Pedro Ticiani dos Santos
IMPORTANT NOTE: When using this function, the downloaded files go to the script
directory. This is something still undergoing work.
"""
user_url = 'http://basebe.obspm.fr/cgi-bin/ssapBE.pl?POS={0},{1}&SIZE={2}&BAND={3}/{4}&TIME={5}/{6}'.format(RA, DEC, size, band_lower, band_upper, date_lower, date_upper)
r = _requests.get(url = user_url)
# xml parsed => dict
global_dict = _xmltodict.parse(r.text)
# Interesting data selection
entries_list = global_dict['VOTABLE']['RESOURCE']['TABLE']['DATA']['TABLEDATA']['TR']
# Dataframe init (eq. Table)
df01 = _pd.DataFrame()
# Browse through the entries and record it in the dataframe df01
for item in entries_list:
# Create a row for the dataframe
p01 = {'Fits URL': item['TD'][0],
'Target name': item['TD'][45],
"Target class": item['TD'][46],
"vo_format": item['TD'][1]}
# add row in progress in the dataframe
df01 = df01.append(p01, ignore_index=True)
# Dataframe init
df02 = _pd.DataFrame()
# Iteration on each row
for item in entries_list:
vo_url_fits = item['TD'][0]
try:
# Download of each file in progress with his url
file_bess = _wget.download(vo_url_fits)
# Opening FITS
fits_in_progress = _pyfits.open(file_bess)
# Retrieve header information for fits in progress
header_fits_ip = fits_in_progress[1].header
# catch potential errors
except IOError:
print("Error downloading fits file.")
# Create a row for the dataframe
# with VO Table value + Header infos
p02 = {'Fits URL': item['TD'][0],
'Target name': item['TD'][45],
"Target class": item['TD'][46],
"Resolution" : header_fits_ip['SPEC_RES'],
"Creation Date" : header_fits_ip['DATE']}
# add row in progress in the dataframe
df02 = df02.append(p02, ignore_index=True)
# if you want to download only the first file, change : to 1.
download = _pyfits.open(_wget.download(df02.iloc[0][:]))
# THE FILES DOWNLOADED ARE IN VOTABLE FORMAT. Some scripts must be changed
# in the .fits reading part when extracting wavelength and flux values.
# MAIN ###
if __name__ == "__main__":
pass
| gpl-3.0 |
AndreLamurias/IBRel | src/classification/ner/ensemble.py | 2 | 8188 | import logging
import math
import sys
import os
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn import svm, tree
from sklearn.externals import joblib
from classification.model import Model
from classification.results import ResultsNER
class EnsembleModel(Model):
def __init__(self, path, etype, **kwargs):
super(EnsembleModel, self).__init__(path, etype=etype, **kwargs)
self.basedir = "models/ensemble/"
self.goldstd = kwargs.get("goldstd")
self.data = {}
self.offsets = []
self.pipeline = Pipeline(
[
#('clf', SGDClassifier(loss='hinge', penalty='l1', alpha=0.0001, n_iter=5, random_state=42)),
#('clf', SGDClassifier())
# ('clf', svm.NuSVC(nu=0.01 ))
('clf', RandomForestClassifier(class_weight={False:1, True:1}, n_jobs=-1, criterion="entropy", warm_start=True))
# ('clf', tree.DecisionTreeClassifier(criterion="entropy")),
# ('clf', MultinomialNB())
# ('clf', GaussianNB())
#('clf', svm.SVC(kernel="rbf", degree=2, C=1)),
#('clf', svm.SVC(kernel="linear", C=2))
#('clf', DummyClassifier(strategy="constant", constant=True))
])
def train(self):
#train_data, labels, offsets = self.generate_data(self.etype)
print "training ensemble classifier..."
#print self.train_data, self.train_labels
pipeline = self.pipeline.fit(self.train_data, self.train_labels)
if not os.path.exists(self.basedir + self.path):
os.makedirs(self.basedir + self.path)
print "Training complete, saving to {}/{}/{}.pkl".format(self.basedir, self.path, self.path)
joblib.dump(pipeline, "{}/{}/{}.pkl".format(self.basedir, self.path, self.path))
def load_tagger(self):
self.pipeline = joblib.load("{}/{}/{}.pkl".format(self.basedir, self.path, self.path))
def test(self, corpus):
#train_data, labels, offsets = self.generate_data(self.etype, mode="test")
pred = self.pipeline.predict(self.train_data)
#print pred
#results = self.process_results(corpus)
results = ResultsNER(self.path)
results.corpus = corpus
for i, p in enumerate(pred):
if p:
sentence = corpus.get_sentence(self.offsets[i][0])
eid = sentence.tag_entity(self.offsets[i][1], self.offsets[i][2], self.etype, source=self.path, score=1)
results.entities[eid] = sentence.entities.get_entity(eid, self.path)
#else:
# print self.offsets[i]
return results
def load_data(self, corpus, flist, mode="train", doctype="all"):
"""
Use scikit to train a pipeline to classify entities as correct or incorrect
features consist in the classifiers that identified the entity
:param modelname:
:return:
"""
features = set()
gs_labels = set()
# collect offsets from every model (except gold standard) and add classifier score
all_models = set()
# merge the results of this set with another set
for did in corpus.documents:
# logging.debug(did)
for sentence in corpus.documents[did].sentences:
#print sentence.entities.elist.keys()
if "goldstandard_{}".format(self.etype) in sentence.entities.elist:
sentence_eids = [e.eid for e in sentence.entities.elist["goldstandard_{}".format(self.etype)]]
# print sentence_eids, [e.eid for e in sentence.entities.elist["results/{}".format(self.goldstd)]]
else:
sentence_eids = []
# print "no gold standard", sentence.entities.elist.keys()
if "results/{}".format(self.goldstd) not in sentence.entities.elist:
print sentence.sid, "not entities", "results/{}".format(self.goldstd), sentence.entities.elist.keys()
continue
for e in sentence.entities.elist["results/{}".format(self.goldstd)]:
#print sentence_eids, e.eid
# logging.info("%s - %s" % (self.sid, s))
# use everything except what's already combined and gold standard
# if any([word in e.text for word in self.stopwords]):
# logging.info("ignored stopword %s" % e.text)
# continue
# eid_alt = e.sid + ":" + str(e.dstart) + ':' + str(e.dend)
#if not s.startswith("goldstandard") and s.endswith(etype):
# next_eid = "{0}.e{1}".format(e.sid, len(combined))
# eid_offset = Offset(e.dstart, e.dend, text=e.text, sid=e.sid, eid=next_eid)
# check for perfect overlaps only
offset = (sentence.sid, e.start, e.end)
if offset not in self.offsets:
self.offsets.append(offset)
self.data[offset] = {}
#print e.text.encode("utf8"),
for f in e.scores:
features.add(f)
#print f, ":", e.scores[f],
self.data[offset][f] = e.scores[f]
#print
if mode == "train" and e.eid in sentence_eids:
#for e in sentence.entities.elist[s]:
#offset = (sentence.sid, e.start, e.end)
gs_labels.add(offset)
#print gs_labels
# else:
# print mode, e.eid in sentence_eids, e.eid, sentence_eids
self.train_data = []
self.train_labels = []
features = sorted(list(features))
print "using these features...", features
# print gs_labels
for o in self.offsets:
of = []
for f in features:
if f in self.data[o]:
of.append(self.data[o][f])
else:
of.append(0)
self.train_data.append(of)
if mode == "train" and o in gs_labels:
self.train_labels.append(True)
else:
# print o, gs_labels
self.train_labels.append(False)
print "labels", set(self.train_labels)
# print features
# for i, l in enumerate(train_labels[:10]):
# print train_data[i], l
#return train_data, train_labels, offsets
def get_ensemble_results(self, ensemble, corpus, model):
"""
Go through every entity in corpus and if it was predicted true by the ensemble, save to entities,
otherwise, delete it.
"""
for did in corpus.documents:
for sentence in corpus.documents[did].sentences:
new_entities = []
for entity in sentence.entities.elist[model]:
sentence_type = "A"
if sentence.sid.endswith("s0"):
sentence_type = "T"
id = (did, "{0}:{1}:{2}".format(sentence_type, entity.dstart, entity.dend), "1")
if id not in ensemble.ids:
logging.debug("this is new! {0}".format(entity))
continue
predicted_index = ensemble.ids.index(id)
#logging.info(predicted_index)
if ensemble.predicted[predicted_index][1] > 0.5:
self.entities[entity.eid] = entity
#logging.info("good entity: {}".format(entity.text.encode("utf8")))
new_entities.append(entity)
#else:
# logging.info("bad entity: {}".format(entity.text.encode("utf8")))
sentence.entities.elist[self.name] = new_entities
self.corpus = corpus | mit |
joshua-cogliati-inl/moose | scripts/memory_logger.py | 4 | 49156 | #!/usr/bin/env python
from tempfile import TemporaryFile, SpooledTemporaryFile
import os, sys, re, socket, time, pickle, csv, uuid, subprocess, argparse, decimal, select, platform, signal
class Debugger:
"""
The Debugger class is the entry point to our stack tracing capabilities.
It determins which debugger to inherit based on parsed arguments and
platform specs.
"""
def __init__(self, arguments):
if arguments.debugger == 'lldb':
self.debugger = lldbAPI(arguments)
else:
self.debugger = DebugInterpreter(arguments)
def getProcess(self, pid):
return self.debugger.getProcess(pid)
def getStackTrace(self, getProcess_tuple):
return self.debugger.getStackTrace(getProcess_tuple)
class lldbAPI:
def __init__(self, arguments):
self.debugger = lldb.SBDebugger.Create()
self.debugger.SetAsync(True)
def __del__(self):
lldb.SBDebugger.Destroy(self.debugger)
def getProcess(self, pid):
# Create and attach to the pid and return our debugger as a tuple
target = self.debugger.CreateTargetWithFileAndArch(None, None)
return target, pid
def getStackTrace(self, process_tuple):
target, pid = process_tuple
lldb_results = []
# reuse the process object if available
if target.process.id is not 0:
process = target.Attach(lldb.SBAttachInfo(target.process.id), lldb.SBError())
else:
process = target.Attach(lldb.SBAttachInfo(int(pid)), lldb.SBError())
# test if we succeeded at attaching to PID process
if process:
# grab thread information
lldb_results.append(process.GetThreadAtIndex(0).__str__())
# iterate through all frames and collect back trace information
for i in xrange(process.GetThreadAtIndex(0).GetNumFrames()):
lldb_results.append(process.GetThreadAtIndex(0).GetFrameAtIndex(i).__str__())
# Unfortunately we must detach each time we perform a stack
# trace. This severely limits our sample rate. It _appears_ to
# to be a bug in LLDB's Python API. Otherwise we would be able to:
#
# process.Stop()
# ..collect back trace..
# process.Continue()
#
# instead we have to:
process.Detach()
return '\n'.join(lldb_results)
else:
return ''
class DebugInterpreter:
"""
Currently, interfacing with LLDB via subprocess is impossible. This is due to lldb not printing
to stdout, or stderr when displaying the prompt to the user (informing the user, the debugger
is ready to receive input). However, this class may someday be able to, which is why
the self.debugger variable is present.
"""
def __init__(self, arguments):
self.last_position = 0
self.debugger = arguments.debugger
def _parseStackTrace(self, gibberish):
not_gibberish = re.findall(r'\(' + self.debugger + '\) (#.*)\(' + self.debugger + '\)', gibberish, re.DOTALL)
if len(not_gibberish) != 0:
return not_gibberish[0]
else:
# Return a blank line, as to not pollute the log. Gibberish here
# usually indicates a bunch of warnings or information about
# loading symbols
return ''
def _waitForResponse(self, dbg_stdout):
# Allow a maximum of 5 seconds to obtain a debugger prompt position.
# Otherwise we can hang indefinitely
end_queue = time.time() + float(5)
while time.time() < end_queue:
dbg_stdout.seek(self.last_position)
for line in dbg_stdout:
if line == '(' + self.debugger + ') ':
self.last_position = dbg_stdout.tell()
return True
time.sleep(0.01)
return False
def getProcess(self, pid):
# Create a temporary file the debugger can write stdout/err to
dbg_stdout = SpooledTemporaryFile()
# Create and attach to running proccess
process = subprocess.Popen([which(self.debugger)], stdin=subprocess.PIPE, stdout=dbg_stdout, stderr=dbg_stdout)
for command in [ 'attach ' + pid + '\n' ]:
if self._waitForResponse(dbg_stdout):
try:
process.stdin.write(command)
except:
return (False, self.debugger, 'quit unexpectedly')
else:
return (False, 'could not attach to process in allotted time')
return (process, dbg_stdout)
def getStackTrace(self, process_tuple):
process, dbg_stdout = process_tuple
# Store our current file position so we can return to it and read
# the eventual entire stack trace output
batch_position = dbg_stdout.tell()
# Loop through commands necessary to create a back trace
for command in ['ctrl-c', 'bt\n', 'c\n']:
if command == 'ctrl-c':
process.send_signal(signal.SIGINT)
else:
if self._waitForResponse(dbg_stdout):
process.stdin.write(command)
else:
dbg_stdout.seek(batch_position)
return self.detachProcess(process_tuple)
# Return to previous file position so that we can return the entire
# stack trace
dbg_stdout.seek(batch_position)
return self._parseStackTrace(dbg_stdout.read())
def detachProcess(self, process):
process, dbg_stdout = process
# Offset the position due to ctrl-c not generating a newline event
tmp_position = (dbg_stdout.tell() - 1)
for command in ['ctrl-c', 'quit\n', 'y\n']:
if command == 'ctrl-c':
process.send_signal(signal.SIGINT)
else:
# When these two variables are not equal, its a safe assumption the
# debugger is ready to receive input
if tmp_position != dbg_stdout.tell():
tmp_position = dbg_stdout.tell()
try:
process.stdin.write(command)
except:
# Because we are trying to detach and quit the debugger just pass
pass
# Always return True for a detach call. What would we do if it failed anyway?
# Why am I even leaving a comment about this?
return True
class Server:
def __init__(self, arguments):
self.arguments = arguments
self.arguments.cwd = os.getcwd()
# Test to see if we are starting as a server
if self.arguments.pbs == True:
if os.getenv('PBS_NODEFILE') != None:
# Initialize an agent, strictly for holding our stdout logs. Give it the UUID of 'server'
self.agent = Agent(self.arguments, 'server')
if self.arguments.recover:
self.logfile = WriteCSV(self.arguments.outfile[0], False)
else:
self.logfile = WriteCSV(self.arguments.outfile[0], True)
self.client_connections = []
self.startServer()
else:
print 'I could not find your PBS_NODEFILE. Is PBS loaded?'
sys.exit(1)
# If we are not a server, start the single client
else:
self.startClient()
def startServer(self):
# Setup the TCP socket
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.bind((socket.gethostname(), 0))
self.server_socket.listen(5)
(self.host, self.port) = self.server_socket.getsockname()
# We will store all connections (sockets objects) made to the server in a list
self.client_connections.append(self.server_socket)
# Launch the actual binary we want to track
self._launchJob()
# Now launch all pbs agents
self._launchClients()
# This is a try so we can handle a keyboard ctrl-c
try:
# Continue to listen and accept active connections from agents
# until all agents report a STOP command.
AGENTS_ACTIVE = True
while AGENTS_ACTIVE:
read_sockets, write_sockets, error_sockets = select.select(self.client_connections,[],[])
for sock in read_sockets:
if sock == self.server_socket:
# Accept an incomming connection
self.client_connections.append(self.server_socket.accept()[0])
else:
# Deal with the data being sent to the server by its agents
self.handleAgent()
# Check to see if _all_ agents are telling the server to stop
agent_count = len(self.agent.agent_data.keys())
current_count = 0
for agent in self.agent.agent_data.keys():
if self.agent.agent_data[agent]['STOP']:
current_count += 1
# if All Agents have reported a STOP command, begin to exit
if current_count == agent_count:
AGENTS_ACTIVE = False
# Gotta get out of the for loop somehow...
break
# Sleep a bit before reading additional data
time.sleep(self.arguments.repeat_rate[-1])
# Close the server socket
self.server_socket.close()
# Close the logfile as the server is about to exit
self.logfile.close()
# Cancel server operations if ctrl-c was pressed
except KeyboardInterrupt:
print 'Canceled by user. Wrote log:', self.arguments.outfile[0]
sys.exit(0)
# Normal exiting procedures
print '\n\nAll agents have stopped. Log file saved to:', self.arguments.outfile[0]
sys.exit(0)
def startClient(self):
Client(self.arguments)
def _launchClients(self):
# Read the environment PBS_NODEFILE
self._PBS_NODEFILE = open(os.getenv('PBS_NODEFILE'), 'r')
nodes = set(self._PBS_NODEFILE.read().split())
# Print some useful information about our setup
print 'Memory Logger running on Host:', self.host, 'Port:', self.port, \
'\nNodes:', ', '.join(nodes), \
'\nSample rate (including stdout):', self.arguments.repeat_rate[-1], 's (use --repeat-rate to adjust)', \
'\nRemote agents delaying', self.arguments.pbs_delay[-1], 'second/s before tracking. (use --pbs-delay to adjust)\n'
# Build our command list based on the PBS_NODEFILE
command = []
for node in nodes:
command.append([ 'ssh', node,
'bash --login -c "source /etc/profile && ' \
+ 'sleep ' + str(self.arguments.pbs_delay[-1]) + ' && ' \
+ os.path.abspath(__file__) \
+ ' --call-back-host ' \
+ self.host + ' ' + str(self.port) \
+ '"'])
# remote into each node and execute another copy of memory_logger.py
# with a call back argument to recieve further instructions
for pbs_node in command:
subprocess.Popen(pbs_node, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Launch the binary we intend to track
def _launchJob(self):
subprocess.Popen(self.arguments.run[-1].split(), stdout=self.agent.log, stderr=self.agent.log)
# A connection has been made from client to server
# Capture that data, and determin what to do with it
def handleAgent(self):
# Loop through all client connections, and receive data if any
for agent_socket in self.client_connections:
# Completely ignore the server_socket object
if agent_socket == self.server_socket:
continue
# Assign an AgentConnector for the task of handling data between client and server
reporting_agent = AgentConnector(self.arguments, agent_socket)
# OK... get data from a client and begin
new_data = reporting_agent.readData()
if new_data != None:
# There should be only one dictionary key (were reading data from just one client at a time)
agent_uuid = new_data.keys()[0]
# Update our dictionary of an agents data
self.agent.agent_data[agent_uuid] = new_data[agent_uuid]
# Modify incoming Agents timestamp to match Server's time (because every node is a little bit off)
if self.arguments.recover:
self.agent.agent_data[agent_uuid]['TIMESTAMP'] = GetTime().now - self.agent.delta
else:
self.agent.agent_data[agent_uuid]['TIMESTAMP'] = GetTime().now
# update total usage for all known reporting agents
total_usage = 0
for one_agent in self.agent.agent_data.keys():
total_usage += self.agent.agent_data[one_agent]['MEMORY']
self.agent.agent_data[agent_uuid]['TOTAL'] = int(total_usage)
# Get any stdout thats happened thus far and apply it to what ever agent just sent us data
self.agent.agent_data[agent_uuid]['STDOUT'] = self.agent._getStdout()
# Write to our logfile
self.logfile.write(self.agent.agent_data[agent_uuid])
# Check for any agents sending a stop command. If we find one,
# set some zeroing values, and close that agent's socket.
if self.agent.agent_data[agent_uuid]['STOP']:
self.agent.agent_data[agent_uuid]['MEMORY'] = 0
agent_socket.close()
if agent_socket != self.server_socket:
self.client_connections.remove(agent_socket)
# Go ahead and set our server agent to STOP as well.
# The server will continue recording samples from agents
self.agent.agent_data['server']['STOP'] = True
# If an Agent has made a request for instructions, handle it here
update_client = False
if new_data[agent_uuid]['REQUEST'] != None:
for request in new_data[agent_uuid]['REQUEST'].iteritems():
if new_data[agent_uuid]['REQUEST'][request[0]] == '':
update_client = True
# We only support sending any arguments supplied to ther server, back to the agent
for request_type in dir(self.arguments):
if request[0] == str(request_type):
self.agent.agent_data[agent_uuid]['REQUEST'][request[0]] = getattr(self.arguments, request[0])
# If an Agent needed additional instructions, go ahead and re-send those instructions
if update_client:
reporting_agent.sendData(self.agent.agent_data[agent_uuid])
class Client:
def __init__(self, arguments):
self.arguments = arguments
# Initialize an Agent with a UUID based on our hostname
self.my_agent = Agent(arguments, str(uuid.uuid3(uuid.NAMESPACE_DNS, socket.gethostname())))
# Initialize an AgentConnector
self.remote_server = AgentConnector(self.arguments)
# If client will talk to a server (PBS)
if self.arguments.call_back_host:
# We know by initializing an agent, agent_data contains the necessary message asking for further instructions
self.my_agent.agent_data[self.my_agent.my_uuid] = self.remote_server.sendData(self.my_agent.agent_data)
# Apply new instructions received from server (this basically updates our arguments)
for request in self.my_agent.agent_data[self.my_agent.my_uuid]['REQUEST'].iteritems():
for request_type in dir(self.arguments):
if request[0] == str(request_type):
setattr(self.arguments, request[0], request[1])
# Requests have been satisfied, set to None
self.my_agent.agent_data[self.my_agent.my_uuid]['REQUEST'] = None
# Change to the same directory as the server was when initiated (needed for PBS stuff)
os.chdir(self.arguments.cwd)
# Client will not be talking to a server, save data to a file instead
else:
# Deal with --recover
if self.arguments.recover:
# Do not overwrite the file
self.logfile = WriteCSV(self.arguments.outfile[0], False)
else:
# Overwrite the file
self.logfile = WriteCSV(self.arguments.outfile[0], True)
# Lets begin!
self.startProcess()
# This function handles the starting and stoping of the sampler process.
# We loop until an agent returns a stop command.
def startProcess(self):
AGENTS_ACTIVE = True
# If we know we are the only client, go ahead and start the process we want to track.
if self.arguments.call_back_host == None:
subprocess.Popen(self.arguments.run[-1].split(), stdout=self.my_agent.log, stderr=self.my_agent.log)
# Delay just a bit to keep from recording a possible zero memory usage as the binary starts up
time.sleep(self.arguments.sample_delay[0])
# This is a try so we can handle a keyboard ctrl-c
try:
# Continue to process data until an Agent reports a STOP command
while AGENTS_ACTIVE:
# Take a sample
current_data = self.my_agent.takeSample()
# Handle the data supplied by the Agent.
self._handleData(current_data)
# If an Agent reported a STOP command, go ahead and begin the shutdown phase
if current_data[current_data.keys()[0]]['STOP']:
AGENTS_ACTIVE = False
# Sleep just a bit between samples, as to not saturate the machine
time.sleep(self.arguments.repeat_rate[-1])
# An agent reported a stop command... so let everyone know where the log was saved, and exit!
if self.arguments.call_back_host == None:
print 'Binary has exited and a log file has been written. You can now attempt to view this file by running' \
'\nthe memory_logger with either the --plot or --read arguments:\n\n', sys.argv[0], '--plot', self.arguments.outfile[0], \
'\n\nSee --help for additional viewing options.'
# Cancel server operations if ctrl-c was pressed
except KeyboardInterrupt:
self.logfile.close()
print 'Canceled by user. Wrote log:', self.arguments.outfile[0]
sys.exit(0)
# Everything went smooth.
sys.exit(0)
# Figure out what to do with the sampled data
def _handleData(self, data):
# Sending the sampled data to a server
if self.arguments.call_back_host:
self.remote_server.sendData(data)
# Saving the sampled data to a file
else:
# Compute the TOTAL memory usage to be how much our one agent reported
# Because were the only client doing any work
data[self.my_agent.my_uuid]['TOTAL'] = data[self.my_agent.my_uuid]['MEMORY']
self.logfile.write(data[self.my_agent.my_uuid])
# If the agent has been told to stop, close the database file
if self.my_agent.agent_data[self.my_agent.my_uuid]['STOP'] == True:
self.logfile.close()
class AgentConnector:
"""
Functions used to communicate to and from Client and Server.
Both Client and Server classes use this object.
readData()
sendData('message', socket_connection=None)
if sendData's socket_connection is None, it will create a new connection to the server
based on supplied arguments
"""
def __init__(self, arguments, connection=None):
self.arguments = arguments
self.connection = connection
self.CREATED_CONNECTION = False
# If the connection is None, meaning this object was instanced by a client,
# we must create a connection to the server first
if self.connection == None and self.arguments.call_back_host != None:
self.CREATED_CONNECTION = True
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.connect((self.arguments.call_back_host[0], int(self.arguments.call_back_host[1])))
# read all data sent by an agent
def readData(self):
# Get how much data there is to receive
# The first eight bytes is our data length
data_width = int(self.connection.recv(8))
tmp_received = ''
# We need to receive precisely the ammount of data the
# client is trying to send us.
while len(tmp_received) < data_width:
if data_width - len(tmp_received) > 1024:
tmp_received += self.connection.recv(1024)
else:
tmp_received += self.connection.recv(data_width - (len(tmp_received)))
# unpickle the received message
return self._unpickleMessage(tmp_received)
# send data to an agent
def sendData(self, message):
# pickle the data up, and send the message
self.connection.sendall(self._pickleMessage(message))
# If we had to create the socket (connection was none), and this client/agent is requesting
# instructions, go ahead and read the data that _better be there_ sent to us by the server.
if self.CREATED_CONNECTION and message[message.keys()[0]]['REQUEST'] != None:
return self.readData()
# The following two functions pickle up the data for easy socket transport
def _pickleMessage(self, message):
t = TemporaryFile()
pickle.dump(message, t)
t.seek(0)
str_msg = t.read()
str_len = len(str_msg)
message = "%-8d" % (str_len,) + str_msg
return message
def _unpickleMessage(self, message):
t = TemporaryFile()
t.write(message)
t.seek(0)
try:
return pickle.load(t)
except KeyError:
print 'Socket data was not pickled data: ', message
except:
raise
class WriteCSV:
def __init__(self, logfile, overwrite):
if overwrite:
self.file_object = open(logfile, 'w', 1)
else:
self.file_object = open(logfile, 'a', 1)
csv.field_size_limit(sys.maxsize)
self.log_file = csv.writer(self.file_object, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
# Close the logfile
def close(self):
self.file_object.close()
# Write a CSV row
def write(self, data):
formatted_string = self._formatString(data)
self.log_file.writerow(formatted_string)
# Format the CSV output
def _formatString(self, data):
# We will be saving this data in CSV format. Before we do, lets format it a bit here
format_order = ['TIMESTAMP', 'TOTAL', 'STDOUT', 'STACK', 'HOSTNAME', 'MEMORY']
formatted_text = []
for item in format_order:
# We have to handle python's way of formatting floats to strings specially
if item == 'TIMESTAMP':
formatted_text.append('%.6f' % data[item])
else:
formatted_text.append(data[item])
return formatted_text
class Agent:
"""
Each agent object contains its own sampled log data. The Agent class is responsible for
collecting and storing data. machine_id is used to identify the agent.
machine_id is supplied by the client class. This allows for multiple agents if desired
"""
def __init__(self, arguments, machine_id):
self.arguments = arguments
self.my_uuid = machine_id
self.track_process = ''
self.process = None
# This log object is for stdout purposes
self.log = TemporaryFile()
self.log_position = 0
# Discover if --recover is being used. If so, we need to obtain the
# timestamp of the last entry in the outfile log... a little bulky
# to do... and not a very good place to do it.
if self.arguments.recover:
if os.path.exists(self.arguments.outfile[-1]):
memory_list = []
history_file = open(self.arguments.outfile[-1], 'r')
csv.field_size_limit(sys.maxsize)
reader = csv.reader(history_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
# Get last item in list. Unfortunately, no way to do this until
# we have read the entire file...? Lucky for us, most memory log
# files are in the single digit megabytes
for row in reader:
memory_list.append(row)
history_file.close()
last_entry = float(memory_list[-1][0]) + self.arguments.repeat_rate[-1]
self.delta = (GetTime().now - last_entry)
else:
print 'Recovery options detected, but I could not find your previous memory log file.'
sys.exit(1)
else:
self.delta = 0
# Create the dictionary to which all sampled data will be stored
# NOTE: REQUEST dictionary items are instructions (arguments) we will
# ask the server to provide (if we are running with --pbs)
# Simply add them here. We _can not_ make the arguments match the
# server exactly, this would cause every agent launched to perform
# like a server... bad stuff
# Example: We added repeat_rate (see dictionary below). Now every
# agent would update their repeat_rate according to what the user
# supplied as an argument (--repeat_rate 0.02)
self.agent_data = { self.my_uuid :
{ 'HOSTNAME' : socket.gethostname(),
'STDOUT' : '',
'STACK' : '',
'MEMORY' : 0,
'TIMESTAMP' : GetTime().now - self.delta,
'REQUEST' : { 'run' : '',
'pstack' : '',
'repeat_rate' : '',
'cwd' : '',
'debugger' : ''},
'STOP' : False,
'TOTAL' : 0,
'DEBUG_LOG' : ''
}
}
# we need to create a place holder for our debugger because when
# memory_logger is run via --pbs, this Agent will not know what
# kind of debugger to use until it has made contact with the server
self.stack_trace = None
# NOTE: This is the only function that should be called in this class
def takeSample(self):
if self.arguments.pstack:
if self.stack_trace is None:
self.stack_trace = Debugger(self.arguments)
self.agent_data[self.my_uuid]['STACK'] = self._getStack()
# Always do the following
self.agent_data[self.my_uuid]['MEMORY'] = self._getMemory()
self.agent_data[self.my_uuid]['STDOUT'] = self._getStdout()
if self.arguments.recover:
self.agent_data[self.my_uuid]['TIMESTAMP'] = GetTime().now - self.delta
else:
self.agent_data[self.my_uuid]['TIMESTAMP'] = GetTime().now
# Return the data to whom ever asked for it
return self.agent_data
def _getStdout(self):
self.log.seek(self.log_position)
output = self.log.read()
self.log_position = self.log.tell()
sys.stdout.write(output)
return output
def _getMemory(self):
tmp_pids = self._getPIDs()
memory_usage = 0
if tmp_pids != {}:
for single_pid in tmp_pids.iteritems():
memory_usage += int(single_pid[1][0])
if memory_usage == 0:
# Memory usage hit zero? Then assume the binary being tracked has exited. So lets begin doing the same.
self.agent_data[self.my_uuid]['DEBUG_LOG'] = 'I found the total memory usage of all my processes hit 0. Stopping'
self.agent_data[self.my_uuid]['STOP'] = True
return 0
return int(memory_usage)
# No binay even detected? Lets assume it exited, so we should begin doing the same.
self.agent_data[self.my_uuid]['STOP'] = True
self.agent_data[self.my_uuid]['DEBUG_LOG'] = 'I found no processes running. Stopping'
return 0
def _getStack(self):
# Create a process object if none already exists. Reuse the old one if it does.
if self.process is None:
tmp_pids = self._getPIDs()
# Check if we actually found any running processes
if tmp_pids != {}:
# Obtain a single process id, any process id will do. This will be the process we attach to and perform stack traces
one_pid = tmp_pids.keys()[0]
self.process = self.stack_trace.getProcess(str(one_pid))
return self.stack_trace.getStackTrace(self.process)
else:
return ''
else:
return self.stack_trace.getStackTrace(self.process)
def _getPIDs(self):
pid_list = {}
# Determin the binary to sample and store it. Doing the findCommand is a little expensive.
if self.track_process == '':
self.track_process = self._findCommand(''.join(self.arguments.run))
# If we are tracking a binary
if self.arguments.run:
command = [which('ps'), '-e', '-o', 'pid,rss,user,args']
tmp_proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
all_pids = tmp_proc.communicate()[0].split('\n')
# Figure out what we are allowed to track (strip away mpiexec, processes not owned by us, etc)
for single_pid in all_pids:
if single_pid.find(self.track_process) != -1 and \
single_pid.find(__file__) == -1 and \
single_pid.find('mpirun') == -1 and \
single_pid.find(os.getenv('USER')) != -1 and \
single_pid.find('mpiexec') == -1:
pid_list[int(single_pid.split()[0])] = []
pid_list[int(single_pid.split()[0])].extend([single_pid.split()[1], single_pid.split()[3]])
return pid_list
# Determine the command we are going to track
# A few things are happening here; first we strip off any MPI commands
# we then loop through the remaining items until we find a matching path
# exp: mpiexec -n 12 ../../../moose_test-opt -i simple_diffusion.i -r 6
# would first strip off mpiexec, check for the presence of -n in our
# current directory, then 12, then ../../../moose_test-opt <- found. It would
# stop and return the base name (moose_test-opt).
def _findCommand(self, command):
if command.find('mpiexec') == 0 or command.find('mpirun') == 0:
for binary in command.split():
if os.path.exists(binary):
return os.path.split(binary)[1]
elif os.path.exists(command.split()[0]):
return os.path.split(command.split()[0])[1]
class GetTime:
"""A simple formatted time object.
"""
def __init__(self, posix_time=None):
import datetime
if posix_time == None:
self.posix_time = datetime.datetime.now()
else:
self.posix_time = datetime.datetime.fromtimestamp(posix_time)
self.now = float(datetime.datetime.now().strftime('%s.%f'))
self.microsecond = self.posix_time.microsecond
self.second = self.posix_time.second
self.minute = self.posix_time.strftime('%M')
self.hour = self.posix_time.strftime('%H')
self.day = self.posix_time.strftime('%d')
self.month = self.posix_time.strftime('%m')
self.year = self.posix_time.year
self.dayname = self.posix_time.strftime('%a')
self.monthname = self.posix_time.strftime('%b')
class MemoryPlotter:
def __init__(self, arguments):
self.arguments = arguments
self.buildGraph()
def buildPlots(self):
plot_dictionary = {}
for log in self.arguments.plot:
memory_list = []
if os.path.exists(log):
log_file = open(log, 'r')
csv.field_size_limit(sys.maxsize)
reader = csv.reader(log_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
for row in reader:
memory_list.append(row)
log_file.close()
plot_dictionary[log.split('/')[-1:][0]] = memory_list
else:
print 'log not found:', log
sys.exit(1)
return plot_dictionary
def buildGraph(self):
try:
import matplotlib.pyplot as plt
except ImportError:
print 'Error importing matplotlib. Matplotlib not available on this system?'
sys.exit(1)
plot_dictionary = self.buildPlots()
fig = plt.figure()
plot_list = []
tmp_plot = []
tmp_legend = []
self.stdout_msgs = {}
self.pstack_msgs = {}
self.multiples = 1
self.memory_label = 'Memory in Bytes'
# Try and calculate memory sizes, so we can move annotations around a bit more accurately
largest_memory = []
for plot_name, value_list in plot_dictionary.iteritems():
for records in value_list:
largest_memory.append(int(records[1]))
largest_memory.sort()
# Determine the scale of the graph
suffixes = ["Terabytes", "Gigabytes", "Megabytes", "Kilobytes", "Bytes"]
multiplier = 1 << 40;
index = 0
while largest_memory[-1] < multiplier and multiplier >= 1:
multiplier = multiplier >> 10
index = index + 1
self.multiples = multiplier
self.memory_label = "Memory in " + suffixes[index-1]
# Loop through each log file
for plot_name, value_list in plot_dictionary.iteritems():
plot_list.append(fig.add_subplot(111))
tmp_memory = []
tmp_time = []
tmp_stdout_x = []
tmp_stdout_y = []
tmp_pstack_x = []
tmp_pstack_y = []
stdout_msg = []
pstack_msg = []
# Get the start time, and make this 0
try:
tmp_zero = decimal.Decimal(value_list[0][0])
except:
print 'Could not parse log file:', plot_name, 'is this a valid memory_logger file?'
sys.exit(1)
# Populate the graph
for records in value_list:
tmp_memory.append(decimal.Decimal(records[1]) / self.multiples)
tmp_time.append(str(decimal.Decimal(records[0]) - tmp_zero))
if len(records[2]) > 0 and self.arguments.stdout:
tmp_stdout_x.append(tmp_time[-1])
tmp_stdout_y.append(tmp_memory[-1])
stdout_msg.append(records[2])
if len(records[3]) > 0 and self.arguments.pstack:
tmp_pstack_x.append(tmp_time[-1])
tmp_pstack_y.append(tmp_memory[-1])
pstack_msg.append(records[3])
# Do the actual plotting:
f, = plot_list[-1].plot(tmp_time, tmp_memory)
tmp_plot.append(f)
tmp_legend.append(plot_name)
plot_list[-1].grid(True)
plot_list[-1].set_ylabel(self.memory_label)
plot_list[-1].set_xlabel('Time in Seconds')
# Enable dork mode
if self.arguments.darkmode:
fig.set_facecolor('0.1')
plot_list[-1].set_axis_bgcolor('0.1')
plot_list[-1].spines['bottom'].set_color('white')
plot_list[-1].spines['top'].set_color('white')
plot_list[-1].spines['right'].set_color('white')
plot_list[-1].spines['left'].set_color('white')
plot_list[-1].tick_params(axis='x', colors='white')
plot_list[-1].tick_params(axis='y', colors='white')
plot_list[-1].xaxis.label.set_color('white')
plot_list[-1].yaxis.label.set_color('white')
plot_list[-1].grid(color='0.6')
# Plot annotations
if self.arguments.stdout:
stdout_line, = plot_list[-1].plot(tmp_stdout_x, tmp_stdout_y, 'x', picker=10, color=f.get_color(), markeredgecolor='0.08', markeredgewidth=0.1)
next_index = str(len(plot_list))
stdout_line.set_gid('stdout' + next_index)
self.stdout_msgs[next_index] = stdout_msg
self.buildAnnotation(plot_list[-1], tmp_stdout_x, tmp_stdout_y, stdout_msg, f.get_color())
if self.arguments.pstack:
pstack_line, = plot_list[-1].plot(tmp_pstack_x, tmp_pstack_y, 'o', picker=10, color=f.get_color(), markeredgecolor='0.08', markeredgewidth=0.1)
next_index = str(len(plot_list))
pstack_line.set_gid('pstack' + next_index)
self.pstack_msgs[next_index] = pstack_msg
# Make points clickable
fig.canvas.mpl_connect('pick_event', self)
# Create legend
legend = plt.legend(tmp_plot, tmp_legend, loc = self.arguments.legend)
legend.get_frame().set_alpha(0.7)
# More dork mode settings
if self.arguments.darkmode:
legend.get_frame().set_facecolor('0.2')
for text in legend.get_texts():
text.set_color('0.8')
plt.show()
def __call__(self, event):
color_codes = {'RESET':'\033[0m', 'r':'\033[31m','g':'\033[32m','c':'\033[36m','y':'\033[33m', 'b':'\033[34m', 'm':'\033[35m', 'k':'\033[0m', 'w':'\033[0m' }
line = event.artist
ind = event.ind
name = line.get_gid()[:-1]
index = line.get_gid()[-1]
if self.arguments.stdout and name == 'stdout':
if self.arguments.no_color != False:
print color_codes[line.get_color()]
print "stdout -----------------------------------------------------\n"
for id in ind:
print self.stdout_msgs[index][id]
if self.arguments.no_color != False:
print color_codes['RESET']
if self.arguments.pstack and name == 'pstack':
if self.arguments.no_color != False:
print color_codes[line.get_color()]
print "pstack -----------------------------------------------------\n"
for id in ind:
print self.pstack_msgs[index][id]
if self.arguments.no_color != False:
print color_codes['RESET']
def buildAnnotation(self,fig,x,y,msg,c):
for i in range(len(x)):
fig.annotate(str(msg[i].split('\n')[0][:self.arguments.trim_text[-1]]),
xy=(x[i], y[i]),
rotation=self.arguments.rotate_text[-1],
xytext=(decimal.Decimal(x[i]) + decimal.Decimal(self.arguments.move_text[0]), decimal.Decimal(y[i]) + decimal.Decimal(self.arguments.move_text[1])),
color=c, horizontalalignment='center', verticalalignment='bottom',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.5",
color=c
)
)
class ReadLog:
"""Read a memory_logger log file, and display the results to stdout in an easy to read form.
"""
def __init__(self, arguments):
self.arguments = arguments
history_file = open(self.arguments.read[-1], 'r')
reader = csv.reader(history_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
self.memory_list = []
for row in reader:
self.memory_list.append(row)
history_file.close()
self.sorted_list = []
self.mem_list = []
self.use_nodes = False
self.printHistory()
def printHistory(self):
RESET = '\033[0m'
BOLD = '\033[1m'
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
CYAN = '\033[36m'
YELLOW = '\033[33m'
last_memory = 0.0
(terminal_width, terminal_height) = self.getTerminalSize()
for timestamp in self.memory_list:
to = GetTime(float(timestamp[0]))
total_memory = int(timestamp[1])
log = timestamp[2].split('\n')
pstack = timestamp[3].split('\n')
node_name = str(timestamp[4])
node_memory = int(timestamp[5])
self.mem_list.append(total_memory)
self.sorted_list.append([str(to.day) + ' ' + str(to.monthname) + ' ' + str(to.hour) + ':' + str(to.minute) + ':' + '{:02.0f}'.format(to.second) + '.' + '{:06.0f}'.format(to.microsecond), total_memory, log, pstack, node_name, node_memory])
largest_memory = decimal.Decimal(max(self.mem_list))
if len(set([x[4] for x in self.sorted_list])) > 1:
self.use_nodes = True
print 'Date Stamp' + ' '*int(17) + 'Memory Usage | Percent of MAX memory used: ( ' + str('{:0,.0f}'.format(largest_memory)) + ' K )'
for item in self.sorted_list:
tmp_str = ''
if decimal.Decimal(item[1]) == largest_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], RESET, terminal_width)
elif item[1] > last_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], RED, terminal_width)
elif item[1] == last_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], CYAN, terminal_width)
else:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], GREEN, terminal_width)
last_memory = item[1]
sys.stdout.write(tmp_str)
print 'Date Stamp' + ' '*int(17) + 'Memory Usage | Percent of MAX memory used: ( ' + str('{:0,.0f}'.format(largest_memory)) + ' K )'
def formatText(self, largest_memory, date, total_memory, node_memory, log, pstack, reporting_host, color_code, terminal_width):
RESET = '\033[0m'
if decimal.Decimal(total_memory) == largest_memory:
percent = '100'
elif (decimal.Decimal(total_memory) / largest_memory) == 0:
percent = '0'
else:
percent = str(decimal.Decimal(total_memory) / largest_memory)[2:4] + '.' + str(decimal.Decimal(total_memory) / largest_memory)[4:6]
header = len(date) + 18
footer = len(percent) + 6
additional_correction = 0
max_length = decimal.Decimal(terminal_width - header) / largest_memory
total_position = total_memory * decimal.Decimal(max_length)
node_position = node_memory * decimal.Decimal(max_length)
tmp_log = ''
if self.arguments.stdout:
for single_log in log:
if single_log != '':
tmp_log += ' '*(header - len(' stdout |')) + ' stdout | ' + single_log + '\n'
if self.arguments.pstack:
for single_pstack in pstack:
if single_pstack != '':
tmp_log += ' '*(header - len(' pstack |')) + ' pstack | ' + single_pstack + '\n'
if self.arguments.separate and self.use_nodes != False:
message = '< ' + RESET + reporting_host + ' - ' + '{:10,.0f}'.format(node_memory) + ' K' + color_code + ' >'
additional_correction = len(RESET) + len(color_code)
elif self.use_nodes:
message = '< >'
else:
node_position = 0
message = ''
return date + '{:15,.0f}'.format(total_memory) + ' K | ' + color_code + '-'*int(node_position) + message + '-'*(int(total_position) - (int(node_position) + ((len(message) - additional_correction) + footer))) + RESET + '| ' + percent + '%\n' + tmp_log
def getTerminalSize(self):
"""Quicky to get terminal window size"""
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
# A simple which function to return path to program
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
print 'I could not find the following binary:', program
sys.exit(1)
def verifyArgs(args):
possible_positions = [ 'center',
'center left',
'center right',
'upper center',
'lower center',
'best',
'right',
'left',
'upper right',
'lower right',
'upper left',
'lower left']
if args.legend not in possible_positions:
print 'Invalid legend position requested. Possible values are:\n\t', '\n\t'.join([x for x in possible_positions])
sys.exit(1)
option_count = 0
if args.read:
option_count += 1
if args.run:
option_count += 1
if args.plot:
option_count += 1
if option_count != 1 and args.pbs != True:
if args.call_back_host == None:
print 'You must use one of the following: run, read, or plot'
sys.exit(1)
args.cwd = os.getcwd()
# Work with --recover (a MOOSE application specific option)
args.recover = False
if args.run:
if args.run[0].find('--recover') != -1:
args.recover = True
if args.run[0].find('~') != -1:
print "You must use absolute paths. Python does not understand the '~' path discriptor.\nYou can use environment vairables (eg: $HOME) so long as they are absolute paths."
sys.exit(1)
if args.outfile == None and args.run:
# Attempt to build the output file based on input file
if re.findall(r'-i (\w+)', args.run[0]) != []:
args.outfile = [os.getcwd() + '/' + re.findall(r'-i (\w+)', args.run[0])[0] + '_memory.log']
else:
args.outfile = [os.getcwd() + '/' + args.run[0].replace('..', '').replace('/', '').replace(' ', '_') + '.log']
if args.pstack and (args.read is None and args.plot is None):
if args.debugger is not None:
if args.debugger == 'lldb':
if platform.platform().find('Darwin') != -1:
try:
import lldb
except ImportError:
lldbImportError()
sys.exit(1)
else:
results = which('lldb')
elif args.debugger == 'gdb':
results = which('gdb')
else:
print 'Invalid debugger selected. You must choose between gdb and lldb using the --debugger argument'
sys.exit(1)
return args
def parseArguments(args=None):
parser = argparse.ArgumentParser(description='Track and Display memory usage')
rungroup = parser.add_argument_group('Tracking', 'The following options control how the memory logger tracks memory usage')
rungroup.add_argument('--run', nargs=1, metavar='command', help='Run specified command using absolute paths. You must encapsulate the command in quotes.')
rungroup.add_argument('--pbs', dest='pbs', metavar='', action='store_const', const=True, default=False, help='Instruct memory logger to tally all launches on all nodes\n ')
rungroup.add_argument('--pbs-delay', dest='pbs_delay', metavar='float', nargs=1, type=float, default=[1.0], help='For larger jobs, you may need to increase the delay as to when the memory_logger will launch the tracking agents\n ')
rungroup.add_argument('--sample-delay', dest='sample_delay', metavar='float', nargs=1, type=float, default=[0.25], help='The time to delay before taking the first sample (when not using pbs)')
rungroup.add_argument('--repeat-rate', nargs=1, metavar='float', type=float, default=[0.25], help='Indicate the sleep delay in float seconds to check memory usage (default 0.25 seconds)\n ')
rungroup.add_argument('--outfile', nargs=1, metavar='file', help='Save log to specified file. (Defaults based on run command)\n ')
readgroup = parser.add_argument_group('Read / Display', 'Options to manipulate or read log files created by the memory_logger')
readgroup.add_argument('--read', nargs=1, metavar='file', help='Read a specified memory log file to stdout\n ')
readgroup.add_argument('--separate', dest='separate', action='store_const', const=True, default=False, help='Display individual node memory usage (read mode only)\n ')
readgroup.add_argument('--plot', nargs="+", metavar='file', help='Display a graphical representation of memory usage (Requires Matplotlib). Specify a single file or a list of files to plot\n ')
readgroup.add_argument('--legend', metavar='"lower left"', default='lower left', help='Place legend in one of the following locations (default --legend "lower left") "center", "center left", "center right", "upper center", "lower center", "best", "right", "left", "upper right", "lower right", "upper left", "lower left"\n ')
commongroup = parser.add_argument_group('Common Options', 'The following options can be used when displaying the results')
commongroup.add_argument('--pstack', dest='pstack', action='store_const', const=True, default=False, help='Display/Record stack trace information (if available)\n ')
commongroup.add_argument('--stdout', dest='stdout', action='store_const', const=True, default=False, help='Display stdout information\n ')
commongroup.add_argument('--debugger', dest='debugger', metavar='gdb | lldb', nargs='?', help='Specify the debugger to use. Possible values: gdb or lldb\n ')
plotgroup = parser.add_argument_group('Plot Options', 'Additional options when using --plot')
plotgroup.add_argument('--rotate-text', nargs=1, metavar='int', type=int, default=[30], help='Rotate stdout/pstack text by this ammount (default 30)\n ')
plotgroup.add_argument('--move-text', nargs=2, metavar='int', default=['0', '0'], help='Move text X and Y by this ammount (default 0 0)\n ')
plotgroup.add_argument('--trim-text', nargs=1, metavar='int', type=int, default=[15], help='Display this many characters in stdout/pstack (default 15)\n ')
plotgroup.add_argument('--no-color', dest='no_color', metavar='', action='store_const', const=False, help='When printing output to stdout do not use color codes\n ')
plotgroup.add_argument('--darkmode', dest='darkmode', metavar='', action='store_const', const=True, help='When you want to be cool\n ')
internalgroup = parser.add_argument_group('Internal PBS Options', 'The following options are used to control how memory_logger as a tracking agent connects back to the caller. These are set automatically when using PBS and can be ignored.')
internalgroup.add_argument('--call-back-host', nargs=2, help='Server hostname and port that launched memory_logger\n ')
return verifyArgs(parser.parse_args(args))
def lldbImportError():
print """
Unable to import lldb
The Python lldb API is now supplied by Xcode but not
automatically set in your PYTHONPATH. Please search
the internet for how to do this if you wish to use
--pstack on Mac OS X.
Note: If you installed Xcode to the default location of
/Applications, you should only have to perform the following:
export PYTHONPATH=/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python:$PYTHONPATH
###!! IMPORTANT !!###
It may also be necessary to unload the miniconda module.
If you receive a fatal Python error about PyThreadState
try using your system's version of Python instead.
"""
if __name__ == '__main__':
args = parseArguments()
if args.read:
ReadLog(args)
sys.exit(0)
if args.plot:
MemoryPlotter(args)
sys.exit(0)
Server(args)
| lgpl-2.1 |
bitemyapp/ggplot | ggplot/geoms/geom_bar.py | 11 | 3061 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import matplotlib.cbook as cbook
from .geom import geom
from ggplot.utils import is_string
from ggplot.utils import is_categorical
class geom_bar(geom):
DEFAULT_AES = {'alpha': None, 'color': None, 'fill': '#333333',
'linetype': 'solid', 'size': 1.0, 'weight': None, 'y': None, 'width' : None}
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'stat': 'bin', 'position': 'stack'}
_extra_requires = {'y', 'width'}
_aes_renames = {'linetype': 'linestyle', 'size': 'linewidth',
'fill': 'color', 'color': 'edgecolor'}
# NOTE: Currently, geom_bar does not support mapping
# to alpha and linestyle. TODO: raise exception
_units = {'edgecolor', 'color', 'alpha', 'linestyle', 'linewidth'}
def __init__(self, *args, **kwargs):
# TODO: Change self.__class__ to geom_bar
super(geom_bar, self).__init__(*args, **kwargs)
self.bottom = None
self.ax = None
def _plot_unit(self, pinfo, ax):
categorical = is_categorical(pinfo['x'])
pinfo.pop('weight')
x = pinfo.pop('x')
width_elem = pinfo.pop('width')
# If width is unspecified, default is an array of 1's
if width_elem == None:
width = np.ones(len(x))
else :
width = np.array(width_elem)
# Make sure bottom is initialized and get heights. If we are working on
# a new plot (using facet_wrap or grid), then reset bottom
_reset = self.bottom == None or (self.ax != None and self.ax != ax)
self.bottom = np.zeros(len(x)) if _reset else self.bottom
self.ax = ax
heights = np.array(pinfo.pop('y'))
# layout and spacing
#
# matplotlib needs the left of each bin and it's width
# if x has numeric values then:
# - left = x - width/2
# otherwise x is categorical:
# - left = cummulative width of previous bins starting
# at zero for the first bin
#
# then add a uniform gap between each bin
# - the gap is a fraction of the width of the first bin
# and only applies when x is categorical
_left_gap = 0
_spacing_factor = 0 # of the bin width
if not categorical:
left = np.array([x[i]-width[i]/2 for i in range(len(x))])
else:
_left_gap = 0.2
_spacing_factor = 0.105 # of the bin width
_breaks = np.append([0], width)
left = np.cumsum(_breaks[:-1])
_sep = width[0] * _spacing_factor
left = left + _left_gap + [_sep * i for i in range(len(left))]
ax.bar(left, heights, width, bottom=self.bottom, **pinfo)
ax.autoscale()
if categorical:
ax.set_xticks(left+width/2)
ax.set_xticklabels(x)
# Update bottom positions
self.bottom = heights + self.bottom
| bsd-2-clause |
Myasuka/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
liganega/Gongsu-DataSci | previous/y2017/Wextra/GongSu26_Statistics_Hypothesis_Test_2.py | 2 | 7230 |
# coding: utf-8
# In[1]:
from __future__ import print_function, division
# #### 자료 안내: 여기서 다루는 내용은 아래 사이트의 내용을 참고하여 생성되었음.
#
# https://github.com/rouseguy/intro2stats
# # 가설검정
# ## 주요내용
#
# 미국 51개 주에서 거래된 담배(식물) 도매가 데이터와 pandas 모듈을 활용하여 가설검정을 실행하는 방법을 터득한다.
# ## 주요 예제
#
# * 캘리포니아 주에서 2014년도와 2015년도에 거래된 담배(식물)의 도매가의 가격차이 비교
# * 검정방식
# * t-검정
# * 카이제곱 검정
# ## 가설검정 6단계
#
# 가설검정은 아래 6개의 단계를 따른다.
#
# 1) 검정 대상인 영가설을 결정한다.
#
# 2) 영가설을 검증할 때 사용할 통계방식을 선택한다.
#
# 3) 기각역을 정한다.
# * 보통 상위 또는 하위 5%로 정함
#
# 4) 검정통계를 위한 p-값을 찾는다.
#
# 5) 표본결과가 기각역 안에 들어오는지 확인한다.
#
# 6) 결정을 내린다.
# * p-값이 기각역에 들어갈 경우 영가설을 기각한다.
# ## 주요 모듈
#
# numpy와 pandas 이외에 통계전용 모듈인 stats 모듈을 임포트 한다.
# In[2]:
import numpy as np
import pandas as pd
from scipy import stats
# ### GongSu25 임포트하기
# 또한 GongSu25 내용 전체를 임포트한다.
# In[3]:
from GongSu25_Statistics_Sampling_Distribution import *
# #### 주의
# weed_pd에 이미 month와 year 열이 추가되었음을 기억한다.
# In[4]:
weed_pd.head()
# ### 예제
#
# 캘리포니아 주에서 2014년 1월과, 2015년 1월에 거래된 상품 담배(식물) 도매가 평균의 차이 살펴보기
# #### 2014년 1월 데이터 추출하기
# In[5]:
ca_2014_01 = weed_pd[(weed_pd.State=="California") & (weed_pd.year==2014) & (weed_pd.month==1)].HighQ
# 자료형을 어레이로 변환한다.
#
# **이유:** stats 모듈은 데이터프레임 대신에 어레이를 활용한다.
# In[6]:
weed_ca_jan2014 = np.array(ca_2014_01)
# 2014년 1월 거래가 평균
# In[7]:
weed_ca_jan2014.mean()
# #### 2015년 1월 데이터 추출하기
# In[8]:
ca_2015_01 = weed_pd[(weed_pd.State=="California") & (weed_pd.year==2015) & (weed_pd.month==1)].HighQ
# In[9]:
weed_ca_jan2015 = np.array(ca_2015_01)
# 2015년 1월 거래가 평균
# In[10]:
weed_ca_jan2015.mean()
# #### 2014년 1월과 2015년 1월 거래가 평균의 차이
# In[11]:
weed_ca_jan2014.mean() - weed_ca_jan2015.mean()
# ## t-검정 예제
#
# 앞서 살펴 보았듯이 캘리포니아 주에서 2014년 1월에 거래된 상품 담배(식물) 도매가의 평균이 2015년 1월에 비해 4.84 달러 정도 높았다.
# 이 정도 차이가 유의미한지 여부를 판단해 보자.
# 즉, 2015년 1월의 도매가 1년 전에 비해 현저하게 떨어졌는지, 아니면 그 정도는 인정할 수 있는 오차범위 안에 있는 것인지 여부를 판단해야 한다.
#
# 여기서는 t-검정 방식을 이용하여 평균 도매가의 차이의 유의미성을 판단한다.
# 이를 위해 영가설을 아래와 같이 세운다
#
# > H0: 2014년 1월과 2015년 1월 상품 담배(식물) 도매가 평균값의 차이는 별 의미가 없다.
#
# 이제 t-검정을 이용하여 p-값을 결정한다.
#
# #### 주의
# * t-검정에 대한 자세한 설명은 여기서는 다루지 않는다.
# * stats 모듈에 있는 `ttest_ind` 함수를 이용하면 간단하게 p-값을 구할 수 있다.
# In[12]:
stats.ttest_ind(weed_ca_jan2014, weed_ca_jan2015, equal_var=True)
# #### 결론
# 위 결과에 의해 p-값은 사실상 0이 된다.
# 이 의미는 2014년 1월과 2015년 1월에 거래된 도매가의 차이 정도는 거의 일어날 수 없는 사건이라는 의미이다.
# 따라서 영가설이 사실상 참일 수 없는 가설이라는 의미이다.
# 즉, 1년 사이의 도매가의 변화에 큰 의미를 부여할 수 있다는 결론이다.
# ## 카이-제곱 검정($\chi$-square test) 예제
# #### 전제
# 2014년 미국 전체에서 이루어진 담배(식물) 거래횟수가 예년의 기록이라고 가정한다.
#
# #### 질문
# 2015년 미국 전체에서 이루어진 담배(식물) 거래횟수 역시 예년의 기록인지 여부를 판단하라.
# #### 2014년 기록 가져오기
#
# 2014년도에 이루어진 HighQN, MedQN, LowQN에 대한 정보를 가져온다.
# In[13]:
weed_jan2014 = weed_pd[(weed_pd.year==2014) & (weed_pd.month==1)][["HighQN", "MedQN", "LowQN"]]
weed_jan2014.head()
# #### 2015년 기록 가져오기
#
# 2015년도에 이루어진 HighQN, MedQN, LowQN에 대한 정보를 가져온다.
# In[14]:
weed_jan2015 = weed_pd[(weed_pd.year==2015) & (weed_pd.month==1)][["HighQN", "MedQN", "LowQN"]]
weed_jan2015.head()
# ### 카이-제곱 검정을 이용한 차이 평가
# 카이-제곱 검정은 빈도수 차이의 유의미성을 확인할 때 사용한다.
#
# #### 영가설
# > H0: 2015년도 총 거래횟수가 예년 기록과 비슷하다.
#
# 이제 카이-제곱 검정을 이용하여 영가설의 기각여부를 판단한다.
#
# 카이-제곱 검정 방식을 이용하려면 먼저 카이-제곱 검정 통계량($\chi^2$)을 구해야 한다.
#
# $$ \chi^2 = \sum (O - E)^2/E $$
#
# 여기서 E와 O는 아래의 의미를 갖는다.
#
# * 기대 도수(E): 2014년도 거래횟수 (예년 기록임)
# * 관측 도수(O): 2015년도 거래횟수
# #### 2014년 총 거래횟수
#
# #### 주의
# * apply 함수 활용
# * axis 키워드는 행을 기준으로 할지, 열을 기준으로 할지를 결정함
# * axis=0은 열 별로 apply 적용
# * axis=0은 행 별로 apply 적용
#
# * 아래 코드는 품종(HighQN, MedQN, LowQN)별로 총 거래횟수를 구한다.
# In[15]:
Expected = np.array(weed_jan2014.apply(sum, axis=0))
Expected
# #### 2015년 총 거래횟수
#
# 동일한 방식을 사용한다.
# In[16]:
Observed = np.array(weed_jan2015.apply(sum, axis=0))
Observed
# 이제 2014년도 데이터를 기대도수로, 2015년도 데이터를 관측도수로 활용하여 카이-제곱 통계량 $\chi^2$을
# 계산할 수 있으며, 그 값을 이용하여 p-값을 계산할 수 있다.
#
# 하지만, 여기서는 p-값을 구하는 과정은 설명하지 않고, stats 모듈의 chisquare 함수를 활용하여 p-값을 구한다.
# In[17]:
stats.chisquare(Observed, Expected)
# #### 결론
#
# p-값이 0이 나왔다. 따라서 영가설을 기각해야 한다. 즉, 2015년 총 거래횟수가 예년과 많이 다르다는 결론이다.
# ## 연습문제
# ### 연습
#
# 캘리포니아 주에서 2015년 1월과 2월에 이루어진 중품(MedQ) 담배(식물) 도매가 의미가 있을 정도로 차이가 있는지 여부를 t-검정을 이용하여 확인하라.
# ### 연습
#
# 뉴역 주에서 2014년과 2015년에 이루어진 중품(MedQ) 담배(식물) 거래의 총 횟수가 의미가 있을 정도로 차이가 있는지 여부를 카이-제곱 검정을 이용하여 확인하라.
| gpl-3.0 |
dud225/incubator-airflow | airflow/hooks/dbapi_hook.py | 2 | 7413 |
from builtins import str
from past.builtins import basestring
from datetime import datetime
import numpy
import logging
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_pandas_df(self, sql, parameters=None):
'''
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
'''
import pandas.io.sql as psql
conn = self.get_conn()
df = psql.read_sql(sql, con=conn, params=parameters)
conn.close()
return df
def get_records(self, sql, parameters=None):
'''
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
'''
conn = self.get_conn()
cur = self.get_cursor()
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
rows = cur.fetchall()
cur.close()
conn.close()
return rows
def get_first(self, sql, parameters=None):
'''
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
'''
conn = self.get_conn()
cur = conn.cursor()
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
rows = cur.fetchone()
cur.close()
conn.close()
return rows
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
conn = self.get_conn()
if isinstance(sql, basestring):
sql = [sql]
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
cur = conn.cursor()
for s in sql:
logging.info(s)
if parameters is not None:
cur.execute(s, parameters)
else:
cur.execute(s)
cur.close()
conn.commit()
conn.close()
def set_autocommit(self, conn, autocommit):
conn.autocommit = autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
the whole set of inserts is treated as one transaction
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
conn = self.get_conn()
cur = conn.cursor()
if self.supports_autocommit:
cur.execute('SET autocommit = 0')
conn.commit()
i = 0
for row in rows:
i += 1
l = []
for cell in row:
l.append(self._serialize_cell(cell))
values = tuple(l)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(values))
cur.execute(sql)
if commit_every and i % commit_every == 0:
conn.commit()
logging.info(
"Loaded {i} into {table} rows so far".format(**locals()))
conn.commit()
cur.close()
conn.close()
logging.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
@staticmethod
def _serialize_cell(cell):
if isinstance(cell, basestring):
return "'" + str(cell).replace("'", "''") + "'"
elif cell is None:
return 'NULL'
elif isinstance(cell, numpy.datetime64):
return "'" + str(cell) + "'"
elif isinstance(cell, datetime):
return "'" + cell.isoformat() + "'"
else:
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
DSLituiev/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 55 | 19053 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
lesina/labs2016 | Laba06/exercise03.py | 1 | 2388 | import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def get_percentile(values, bucket_number):
step = 100 / bucket_number
return [np.percentile(values, i*step) for i in range(bucket_number)]
def get_percentile_number(value, percentiles):
i = 0
while value >= percentiles[i]:
i += 1
if i == len(percentiles):
break
if i > 0:
return i - 1
else:
return i
def value_equalization(value, percentiles, add_random=False):
step = 1/len(percentiles)
idx = get_percentile_number(value, percentiles)
if add_random:
return idx * step + random.uniform(0, step)
else:
return idx * step
def values_equalization(values, percentiles, add_random=False):
return [value_equalization(value, percentiles, add_random = add_random) for value in values]
random.seed(0)
#Считываем данные в массив numpy
data = pd.read_csv("img.txt", sep = " ", header = None)
data = np.array(data)
#Создаём рандомные 100 строчек различными способами
top100 = random.choice(data)
random100 = np.array(random.sample(list(data), 100))
for i in range(99):
top100 = np.vstack([top100, random.choice(data)])
#Создаём графики строчек
plt.subplot(321)
plt.imshow(top100, cmap = plt.get_cmap('gray'))
plt.subplot(322)
plt.imshow(random100, cmap = plt.get_cmap('gray'))
#Создаём графики необработанных данных
plt.subplot(323)
plt.imshow(data, cmap = plt.get_cmap('gray'))
plt.subplot(324)
plt.hist(data.flatten())
#Эквализируем данные
# for i in range(len(data)):
# percentiles = get_percentile(data[i], 4)
# if min(data[i]) > 0: #Ставим в начало 0, если все числа положительные
# percentiles[0] = 0.0
# data[i] = values_equalization(data[i], percentiles[1:], add_random = True)
percentiles = get_percentile(data.ravel(), 4)
percentiles[0] = 0.0
for i in range(len(data)):
data[i] = values_equalization(data[i], percentiles, add_random=True)
print(data)
plt.subplot(325)
plt.imshow(data[130:140, 110:120], cmap = plt.get_cmap('gray'))
plt.subplot(326)
plt.hist(data.flatten()) #По гистограмме у меня вроде всё норм
plt.show()
data = data.flatten()
print(data.mean()) | gpl-3.0 |
kevin-intel/scikit-learn | sklearn/feature_selection/_variance_threshold.py | 1 | 3505 | # Author: Lars Buitinck
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from ._base import SelectorMixin
from ..utils.sparsefuncs import mean_variance_axis, min_max_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(SelectorMixin, BaseEstimator):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, default=0
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Notes
-----
Allows NaN in the input.
Raises ValueError if no feature in X meets the variance threshold.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any, default=None
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = self._validate_data(X, accept_sparse=('csr', 'csc'),
dtype=np.float64,
force_all_finite='allow-nan')
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
if self.threshold == 0:
mins, maxes = min_max_axis(X, axis=0)
peak_to_peaks = maxes - mins
else:
self.variances_ = np.nanvar(X, axis=0)
if self.threshold == 0:
peak_to_peaks = np.ptp(X, axis=0)
if self.threshold == 0:
# Use peak-to-peak to avoid numeric precision issues
# for constant features
compare_arr = np.array([self.variances_, peak_to_peaks])
self.variances_ = np.nanmin(compare_arr, axis=0)
if np.all(~np.isfinite(self.variances_) |
(self.variances_ <= self.threshold)):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self)
return self.variances_ > self.threshold
def _more_tags(self):
return {'allow_nan': True}
| bsd-3-clause |
wkfwkf/statsmodels | statsmodels/sandbox/tests/test_predict_functional.py | 29 | 12873 | from statsmodels.sandbox.predict_functional import predict_functional
import numpy as np
import pandas as pd
import statsmodels.api as sm
from numpy.testing import dec
# If true, the output is written to a multi-page pdf file.
pdf_output = False
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
def pctl(q):
return lambda x : np.percentile(x, 100 *q)
class TestPredFunc(object):
@classmethod
def setup_class(cls):
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
cls.pdf = PdfPages("predict_functional.pdf")
@classmethod
def teardown_class(cls):
if pdf_output:
cls.pdf.close()
def close_or_save(self, fig):
if pdf_output:
self.pdf.savefig(fig)
else:
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_formula(self):
np.random.seed(542)
n = 500
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
x4 = np.random.randint(0, 5, size=n)
x4 = np.asarray(["ABCDE"[i] for i in x4])
x5 = np.random.normal(size=n)
y = 0.3*x2**2 + (x4 == "B") + 0.1*(x4 == "B")*x2**2 + x5 + np.random.normal(size=n)
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3, "x4": x4, "x5": x5})
fml = "y ~ x1 + bs(x2, df=4) + x3 + x2*x3 + I(x1**2) + C(x4) + C(x4)*bs(x2, df=4) + x5"
model = sm.OLS.from_formula(fml, data=df)
result = model.fit()
summaries = {"x1": np.mean, "x3": pctl(0.75), "x5": np.mean}
values = {"x4": "B"}
pr1, ci1, fvals1 = predict_functional(result, "x2", summaries, values)
values = {"x4": "C"}
pr2, ci2, fvals2 = predict_functional(result, "x2", summaries, values)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x4=B')
plt.plot(fvals2, pr2, '-', label='x4=C')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x4=B')
plt.fill_between(fvals1, ci1[:, 0], ci1[:, 1], color='grey')
plt.plot(fvals2, pr2, '-', label='x4=C')
plt.fill_between(fvals2, ci2[:, 0], ci2[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_lm_contrast(self):
np.random.seed(542)
n = 200
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
y = x1 + 2*x2 + x3 - x1*x2 + x2*x3 + np.random.normal(size=n)
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3})
fml = "y ~ x1 + x2 + x3 + x1*x2 + x2*x3"
model = sm.OLS.from_formula(fml, data=df)
result = model.fit()
values = {"x2": 1, "x3": 1} # y = 4
values2 = {"x2": 0, "x3": 0} # y = x1
pr, cb, fvals = predict_functional(result, "x1", values=values,
values2=values2, ci_method='scheffe')
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.67, 0.8])
plt.plot(fvals, pr, '-', label="Estimate", color='orange', lw=4)
plt.plot(fvals, 4 - fvals, '-', label="Truth", color='lime', lw=4)
plt.fill_between(fvals, cb[:, 0], cb[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
plt.ylabel("Mean contrast", size=15)
plt.title("Linear model contrast")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_glm_formula_contrast(self):
np.random.seed(542)
n = 50
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
mn = 5 + 0.1*x1 + 0.1*x2 + 0.1*x3 - 0.1*x1*x2
y = np.random.poisson(np.exp(mn), size=len(mn))
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3})
fml = "y ~ x1 + x2 + x3 + x1*x2"
model = sm.GLM.from_formula(fml, data=df, family=sm.families.Poisson())
result = model.fit()
values = {"x2": 1, "x3": 1} # y = 5.2
values2 = {"x2": 0, "x3": 0} # y = 5 + 0.1*x1
pr, cb, fvals = predict_functional(result, "x1", values=values,
values2=values2, ci_method='simultaneous')
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.67, 0.8])
plt.plot(fvals, pr, '-', label="Estimate", color='orange', lw=4)
plt.plot(fvals, 0.2 - 0.1*fvals, '-', label="Truth", color='lime', lw=4)
plt.fill_between(fvals, cb[:, 0], cb[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
plt.ylabel("Linear predictor contrast", size=15)
plt.title("Poisson regression contrast")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_scb(self):
np.random.seed(473)
n = 100
x = np.random.normal(size=(n,4))
x[:, 0] = 1
for fam_name in "poisson", "binomial", "gaussian":
if fam_name == "poisson":
y = np.random.poisson(20, size=n)
fam = sm.families.Poisson()
true_mean = 20
true_lp = np.log(20)
elif fam_name == "binomial":
y = 1 * (np.random.uniform(size=n) < 0.5)
fam = sm.families.Binomial()
true_mean = 0.5
true_lp = 0
elif fam_name == "gaussian":
y = np.random.normal(size=n)
fam = sm.families.Gaussian()
true_mean = 0
true_lp = 0
model = sm.GLM(y, x, family=fam)
result = model.fit()
# CB is for linear predictor or mean response
for linear in False, True:
true = true_lp if linear else true_mean
values = {'const': 1, "x2": 0}
summaries = {"x3": np.mean}
pred1, cb1, fvals1 = predict_functional(result, "x1",
values=values, summaries=summaries, linear=linear)
pred2, cb2, fvals2 = predict_functional(result, "x1",
values=values, summaries=summaries,
ci_method='simultaneous', linear=linear)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.58, 0.8])
plt.plot(fvals1, pred1, '-', color='black', label='Estimate')
plt.plot(fvals1, true * np.ones(len(pred1)), '-', color='purple',
label='Truth')
plt.plot(fvals1, cb1[:, 0], color='blue', label='Pointwise CB')
plt.plot(fvals1, cb1[:, 1], color='blue')
plt.plot(fvals2, cb2[:, 0], color='green', label='Simultaneous CB')
plt.plot(fvals2, cb2[:, 1], color='green')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
if linear:
plt.ylabel("Linear predictor", size=15)
else:
plt.ylabel("Fitted mean", size=15)
plt.title("%s family prediction" % fam_name.capitalize())
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_glm_formula(self):
np.random.seed(542)
n = 500
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.randint(0, 3, size=n)
x3 = np.asarray(["ABC"[i] for i in x3])
lin_pred = -1 + 0.5*x1**2 + (x3 == "B")
prob = 1 / (1 + np.exp(-lin_pred))
y = 1 * (np.random.uniform(size=n) < prob)
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3})
fml = "y ~ x1 + I(x1**2) + x2 + C(x3)"
model = sm.GLM.from_formula(fml, family=sm.families.Binomial(), data=df)
result = model.fit()
summaries = {"x2": np.mean}
for linear in False, True:
values = {"x3": "B"}
pr1, ci1, fvals1 = predict_functional(result, "x1", summaries, values, linear=linear)
values = {"x3": "C"}
pr2, ci2, fvals2 = predict_functional(result, "x1", summaries, values, linear=linear)
exact1 = -1 + 0.5*fvals1**2 + 1
exact2 = -1 + 0.5*fvals2**2
if not linear:
exact1 = 1 / (1 + np.exp(-exact1))
exact2 = 1 / (1 + np.exp(-exact2))
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x3=B')
plt.plot(fvals2, pr2, '-', label='x3=C')
plt.plot(fvals1, exact1, '-', label='x3=B (exact)')
plt.plot(fvals2, exact2, '-', label='x3=C (exact)')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
if linear:
plt.ylabel("Fitted linear predictor", size=15)
else:
plt.ylabel("Fitted probability", size=15)
plt.title("Binomial GLM prediction")
self.close_or_save(fig)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x3=B', color='orange')
plt.fill_between(fvals1, ci1[:, 0], ci1[:, 1], color='grey')
plt.plot(fvals2, pr2, '-', label='x3=C', color='lime')
plt.fill_between(fvals2, ci2[:, 0], ci2[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
if linear:
plt.ylabel("Fitted linear predictor", size=15)
else:
plt.ylabel("Fitted probability", size=15)
plt.title("Binomial GLM prediction")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_noformula_prediction(self):
np.random.seed(6434)
n = 200
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
y = x1 - x2 + np.random.normal(size=n)
exog = np.vstack((x1, x2, x3)).T
model = sm.OLS(y, exog)
result = model.fit()
summaries = {"x3": pctl(0.75)}
values = {"x2": 1}
pr1, ci1, fvals1 = predict_functional(result, "x1", summaries, values)
values = {"x2": -1}
pr2, ci2, fvals2 = predict_functional(result, "x1", summaries, values)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x2=1', lw=4, alpha=0.6, color='orange')
plt.plot(fvals2, pr2, '-', label='x2=-1', lw=4, alpha=0.6, color='lime')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x2=1', lw=4, alpha=0.6, color='orange')
plt.fill_between(fvals1, ci1[:, 0], ci1[:, 1], color='grey')
plt.plot(fvals1, pr2, '-', label='x2=1', lw=4, alpha=0.6, color='lime')
plt.fill_between(fvals2, ci2[:, 0], ci2[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/generic/test_series.py | 2 | 8607 | from distutils.version import LooseVersion
from operator import methodcaller
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import MultiIndex, Series, date_range
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_series_equal
from .test_generic import Generic
try:
import xarray
_XARRAY_INSTALLED = True
except ImportError:
_XARRAY_INSTALLED = False
class TestSeries(Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setup_method(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = "ts"
self.series = tm.makeStringSeries()
self.series.name = "series"
def test_rename_mi(self):
s = Series(
[11, 21, 31],
index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]),
)
s.rename(str.lower)
def test_set_axis_name(self):
s = Series([1, 2, 3], index=["a", "b", "c"])
funcs = ["rename_axis", "_set_axis_name"]
name = "foo"
for func in funcs:
result = methodcaller(func, name)(s)
assert s.index.name is None
assert result.index.name == name
def test_set_axis_name_mi(self):
s = Series(
[11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]], names=["l1", "l2"]
),
)
funcs = ["rename_axis", "_set_axis_name"]
for func in funcs:
result = methodcaller(func, ["L1", "L2"])(s)
assert s.index.name is None
assert s.index.names == ["l1", "l2"]
assert result.index.name is None
assert result.index.names, ["L1", "L2"]
def test_set_axis_name_raises(self):
s = pd.Series([1])
with pytest.raises(ValueError):
s._set_axis_name(name="a", axis=1)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, "2", 3.0])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range("20130101", periods=3))
result = o._get_numeric_data()
expected = Series([], dtype="M8[ns]", index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
assert s.bool()
s = Series([False])
assert not s.bool()
msg = "The truth value of a Series is ambiguous"
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]), Series([False])]:
with pytest.raises(ValueError, match=msg):
bool(s)
msg = "bool cannot act on a non-boolean single element Series"
for s in [Series([np.nan]), Series([pd.NaT])]:
with pytest.raises(ValueError, match=msg):
s.bool()
# multiple bool are still an error
msg = "The truth value of a Series is ambiguous"
for s in [Series([True, True]), Series([False, False])]:
with pytest.raises(ValueError, match=msg):
bool(s)
with pytest.raises(ValueError, match=msg):
s.bool()
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(["a"]), Series([0.0])]:
msg = "The truth value of a Series is ambiguous"
with pytest.raises(ValueError, match=msg):
bool(s)
msg = "bool cannot act on a non-boolean single element Series"
with pytest.raises(ValueError, match=msg):
s.bool()
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = "foo"
o2 = Series(range(3), range(3))
o2.name = "bar"
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(
np.random.rand(1000),
index=date_range("20130101", periods=1000, freq="s"),
name="foo",
)
result = ts.resample("1T").mean()
self.check_metadata(ts, result)
result = ts.resample("1T").min()
self.check_metadata(ts, result)
result = ts.resample("1T").apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ["name", "filename"]
o.filename = "foo"
o2.filename = "bar"
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == "concat" and name == "filename":
value = "+".join(
[getattr(o, name) for o in other.objs if getattr(o, name, None)]
)
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
assert result.filename == "foo+bar"
assert result.name is None
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
@pytest.mark.skipif(
not _XARRAY_INSTALLED
or _XARRAY_INSTALLED
and LooseVersion(xarray.__version__) < LooseVersion("0.10.0"),
reason="xarray >= 0.10.0 required",
)
@pytest.mark.parametrize(
"index",
[
"FloatIndex",
"IntIndex",
"StringIndex",
"UnicodeIndex",
"DateIndex",
"PeriodIndex",
"TimedeltaIndex",
"CategoricalIndex",
],
)
def test_to_xarray_index_types(self, index):
from xarray import DataArray
index = getattr(tm, "make{}".format(index))
s = Series(range(6), index=index(6))
s.index.name = "foo"
result = s.to_xarray()
repr(result)
assert len(result) == 6
assert len(result.coords) == 1
assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, DataArray)
# idempotency
assert_series_equal(
result.to_series(), s, check_index_type=False, check_categorical=True
)
@td.skip_if_no("xarray", min_version="0.7.0")
def test_to_xarray(self):
from xarray import DataArray
s = Series([])
s.index.name = "foo"
result = s.to_xarray()
assert len(result) == 0
assert len(result.coords) == 1
assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, DataArray)
s = Series(range(6))
s.index.name = "foo"
s.index = pd.MultiIndex.from_product(
[["a", "b"], range(3)], names=["one", "two"]
)
result = s.to_xarray()
assert len(result) == 2
assert_almost_equal(list(result.coords.keys()), ["one", "two"])
assert isinstance(result, DataArray)
assert_series_equal(result.to_series(), s)
def test_valid_deprecated(self):
# GH18800
with tm.assert_produces_warning(FutureWarning):
pd.Series([]).valid()
@pytest.mark.parametrize(
"s",
[
Series([np.arange(5)]),
pd.date_range("1/1/2011", periods=24, freq="H"),
pd.Series(range(5), index=pd.date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, s, shift_size):
# GH22397
assert s.shift(shift_size) is not s
@pytest.mark.parametrize("move_by_freq", [pd.Timedelta("1D"), pd.Timedelta("1M")])
def test_datetime_shift_always_copy(self, move_by_freq):
# GH22397
s = pd.Series(range(5), index=pd.date_range("2017", periods=5))
assert s.shift(freq=move_by_freq) is not s
| apache-2.0 |
ChanChiChoi/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
pratapvardhan/pandas | pandas/core/computation/align.py | 6 | 5618 | """Core eval alignment algorithms
"""
import warnings
from functools import partial, wraps
from pandas.compat import zip, range
import numpy as np
import pandas as pd
from pandas import compat
from pandas.errors import PerformanceWarning
import pandas.core.common as com
from pandas.core.computation.common import _result_type_many
def _align_core_single_unary_op(term):
if isinstance(term.value, np.ndarray):
typ = partial(np.asanyarray, dtype=term.value.dtype)
else:
typ = type(term.value)
ret = typ,
if not hasattr(term.value, 'axes'):
ret += None,
else:
ret += _zip_axes_from_type(typ, term.value.axes),
return ret
def _zip_axes_from_type(typ, new_axes):
axes = {}
for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES):
axes[ax_name] = new_axes[ax_ind]
return axes
def _any_pandas_objects(terms):
"""Check a sequence of terms for instances of PandasObject."""
return any(isinstance(term.value, pd.core.generic.PandasObject)
for term in terms)
def _filter_special_cases(f):
@wraps(f)
def wrapper(terms):
# single unary operand
if len(terms) == 1:
return _align_core_single_unary_op(terms[0])
term_values = (term.value for term in terms)
# we don't have any pandas objects
if not _any_pandas_objects(terms):
return _result_type_many(*term_values), None
return f(terms)
return wrapper
@_filter_special_cases
def _align_core(terms):
term_index = [i for i, term in enumerate(terms)
if hasattr(term.value, 'axes')]
term_dims = [terms[i].value.ndim for i in term_index]
ndims = pd.Series(dict(zip(term_index, term_dims)))
# initial axes are the axes of the largest-axis'd term
biggest = terms[ndims.idxmax()].value
typ = biggest._constructor
axes = biggest.axes
naxes = len(axes)
gt_than_one_axis = naxes > 1
for value in (terms[i].value for i in term_index):
is_series = isinstance(value, pd.Series)
is_series_and_gt_one_axis = is_series and gt_than_one_axis
for axis, items in enumerate(value.axes):
if is_series_and_gt_one_axis:
ax, itm = naxes - 1, value.index
else:
ax, itm = axis, items
if not axes[ax].is_(itm):
axes[ax] = axes[ax].join(itm, how='outer')
for i, ndim in compat.iteritems(ndims):
for axis, items in zip(range(ndim), axes):
ti = terms[i].value
if hasattr(ti, 'reindex'):
transpose = isinstance(ti, pd.Series) and naxes > 1
reindexer = axes[naxes - 1] if transpose else items
term_axis_size = len(ti.axes[axis])
reindexer_size = len(reindexer)
ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))
if ordm >= 1 and reindexer_size >= 10000:
w = ('Alignment difference on axis {axis} is larger '
'than an order of magnitude on term {term!r}, by '
'more than {ordm:.4g}; performance may suffer'
).format(axis=axis, term=terms[i].name, ordm=ordm)
warnings.warn(w, category=PerformanceWarning, stacklevel=6)
f = partial(ti.reindex, reindexer, axis=axis, copy=False)
terms[i].update(f())
terms[i].update(terms[i].value.values)
return typ, _zip_axes_from_type(typ, axes)
def _align(terms):
"""Align a set of terms"""
try:
# flatten the parse tree (a nested list, really)
terms = list(com.flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, pd.core.generic.NDFrame):
typ = type(terms.value)
return typ, _zip_axes_from_type(typ, terms.value.axes)
return np.result_type(terms.type), None
# if all resolved variables are numeric scalars
if all(term.is_scalar for term in terms):
return _result_type_many(*(term.value for term in terms)).type, None
# perform the main alignment
typ, axes = _align_core(terms)
return typ, axes
def _reconstruct_object(typ, obj, axes, dtype):
"""Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
"""
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if (not isinstance(typ, partial) and
issubclass(typ, pd.core.generic.PandasObject)):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
# The condition is to distinguish 0-dim array (returned in case of
# scalar) and 1 element array
# e.g. np.array(0) and np.array([0])
if len(obj.shape) == 1 and len(obj) == 1:
if not isinstance(ret_value, np.ndarray):
ret_value = np.array([ret_value]).astype(res_t)
return ret_value
| bsd-3-clause |
anurag313/scikit-learn | sklearn/tree/tests/test_export.py | 130 | 9950 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
WuShichao/computational-physics | 3/3_3/3_3.py | 1 | 1809 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 10:46:05 2016
Simple Pendulum - Euler method
@author: nightwing
"""
from math import cos,pi
import matplotlib.pyplot as plt
g = 9.8 #gravity acceleration (m/s2)
mass = 1.0 #mass of the padulum (kg)
length = 1 #length of the string (m)
k = g / length #g/length
t_end = 10 #end time (s)
Time = [] #this list store the list "time"
Energy = [] #this list store the list "energy"
#-------------caculate (Euler method)------------
for dt in [0.04, 0.02, 0.01, 0.005]:
t = 0 #initial time (s)
theta = 11.5 #initial angle (degrees)
theta *= (pi/180) #initial angle (radians)
angular_vel = 0 #initial angular velocity (rad/s)
angular_velocity = [] #this list store value of angular velocity
time = [] #this list store value of time
energy = [] #thie list store value of total energy
while t <= t_end:
E = 0.5*mass*(length*angular_vel)**2 + mass*g*length*(1-cos(theta))
energy.append(E)
angular_velocity.append(angular_vel)
time.append(t)
angular_vel -= k * theta * dt
theta += angular_velocity[-1] * dt
t += dt
Time.append(time)
Energy.append(energy)
#---------------graph----------------
plt.title("Simple Pendulum - Euler method")
plt.xlabel("time (s)")
plt.ylabel("total energy (J)")
plt.plot(Time[0],Energy[0],"k-",label="dt=0.040s")
plt.plot(Time[1],Energy[1],"k--",label="dt=0.020s")
plt.plot(Time[2],Energy[2],"k-.",label="dt=0.010s")
plt.plot(Time[3],Energy[3],"k:",label="dt=0.005s")
plt.legend(loc=2)
plt.show() | gpl-3.0 |
endangeredoxen/pywebify | setup.py | 1 | 4007 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join('pywebify', 'version.txt'), 'r') as input:
__version__ = input.readlines()[0]
setup(
name='pywebify',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description='Browser-based html/image file report builder',
long_description='Browser-based html/image file report builder',
# The project's main homepage.
url='https://github.com/endangeredoxen/pywebify',
download_url = 'https://github.com/endangeredoxen/pywebify/archive/v%s.tar.gz' % __version__,
# Author details
author='Steve Nicholes',
author_email='[email protected]',
# Choose your license
license='GPLv3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
# classifiers=[
# # How mature is this project? Common values are
# # 3 - Alpha
# # 4 - Beta
# # 5 - Production/Stable
# 'Development Status :: 5',
# # Indicate who your project is intended for
# 'Intended Audience :: Engineers/Scientists',
# 'Topic :: Data Analysis',
# # Pick your license as you wish (should match "license" above)
# 'License :: GPL v3 License',
# # Specify the Python versions you support here. In particular, ensure
# # that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 3.6',
# ],
# What does your project relate to?
keywords=['data', 'web report'],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# dependency_links = [],
install_requires=['pandas','numpy','natsort','fivecentfileio'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'pywebify': ['config.ini', 'img/*', 'js/*', 'templates/css/*',
'templates/html/*', 'templates/jinja/*', 'setup.txt', 'version.txt'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('C:/my_data', ['pywebify/config.ini'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| gpl-2.0 |
JediKoder/coursera-ML | ex3/ex3_nn.py | 3 | 2955 | ## Machine Learning Online Class - Exercise 3 | Part 2: Neural Networks
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# linear exercise. You will need to complete the following functions
# in this exericse:
#
# lrCostFunction.m (logistic regression cost function)
# oneVsAll.m
# predictOneVsAll.m
# predict.m
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
#
from matplotlib import use
use('TkAgg')
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
from displayData import displayData
from predict import predict
## Setup the parameters you will use for this exercise
input_layer_size = 400 # 20x20 Input Images of Digits
hidden_layer_size = 25 # 25 hidden units
num_labels = 10 # 10 labels, from 1 to 10
# (note that we have mapped "0" to label 10)
## =========== Part 1: Loading and Visualizing Data =============
# We start the exercise by first loading and visualizing the dataset.
# You will be working with a dataset that contains handwritten digits.
#
# Load Training Data
print 'Loading and Visualizing Data ...'
data = scipy.io.loadmat('ex3data1.mat')
X = data['X']
y = data['y']
m, _ = X.shape
# Randomly select 100 data points to display
sel = np.random.permutation(range(m))
sel = sel[0:100]
displayData(X[sel,:])
raw_input("Program paused. Press Enter to continue...")
## ================ Part 2: Loading Pameters ================
# In this part of the exercise, we load some pre-initialized
# neural network parameters.
print 'Loading Saved Neural Network Parameters ...'
# Load the weights into variables Theta1 and Theta2
data = scipy.io.loadmat('ex3weights.mat')
Theta1 = data['Theta1']
Theta2 = data['Theta2']
## ================= Part 3: Implement Predict =================
# After training the neural network, we would like to use it to predict
# the labels. You will now implement the "predict" function to use the
# neural network to predict the labels of the training set. This lets
# you compute the training set accuracy.
pred = predict(Theta1, Theta2, X)
print 'Training Set Accuracy: %f\n', np.mean(np.double(pred == np.squeeze(y))) * 100
raw_input("Program paused. Press Enter to continue...")
# To give you an idea of the network's output, you can also run
# through the examples one at the a time to see what it is predicting.
# Randomly permute examples
rp = np.random.permutation(range(m))
plt.figure()
for i in range(m):
# Display
X2 = X[rp[i],:]
print 'Displaying Example Image'
X2 = np.matrix(X[rp[i]])
displayData(X2)
pred = predict(Theta1, Theta2, X2.getA())
pred = np.squeeze(pred)
print 'Neural Network Prediction: %d (digit %d)\n' % (pred, np.mod(pred, 10))
raw_input("Program paused. Press Enter to continue...")
plt.close()
| mit |
dkriegner/xrayutilities | doc/source/example_xu_read_spec_easy.py | 1 | 3247 | """
Example script to show how to use xrayutilities to read and plot
reciprocal space map scans from a spec file created at the ESRF/ID10B
for details about the measurement see:
D Kriegner et al. Nanotechnology 22 425704 (2011)
http://dx.doi.org/10.1088/0957-4484/22/42/425704
"""
import os
import matplotlib.pyplot as plt
import numpy
import xrayutilities as xu
# global setting for the experiment
sample = "test" # sample name used also as file name for the data file
energy = 8042.5 # x-ray energy in eV
center_ch = 715.9 # center channel of the linear detector
chpdeg = 345.28 # channels per degree of the linear detector
roi = [100, 1340] # region of interest of the detector
nchannel = 1500 # number of channels of the detector
# intensity normalizer function responsible for count time and absorber
# correction
normalizer_detcorr = xu.IntensityNormalizer(
"MCA",
mon="Monitor",
time="Seconds",
absfun=lambda d: d["detcorr"] / d["psd2"].astype(numpy.float))
# substrate material used for Bragg peak calculation to correct for
# experimental offsets
InP = xu.materials.InP
# initialize experimental class to specify the reference directions of your
# crystal
# 11-2: inplane reference
# 111: surface normal
hxrd = xu.HXRD(InP.Q(1, 1, -2), InP.Q(1, 1, 1), en=energy)
# configure linear detector
# detector direction + parameters need to be given
# mounted along z direction, which corresponds to twotheta
hxrd.Ang2Q.init_linear('z-', center_ch, nchannel, chpdeg=chpdeg, roi=roi)
# read spec file and save to HDF5-file
# since reading is much faster from HDF5 once the data are transformed
h5file = os.path.join("data", sample + ".h5")
try:
s # try if spec file object already exist ("run -i" in ipython)
except NameError:
s = xu.io.SPECFile(sample + ".spec", path="data")
else:
s.Update()
s.Save2HDF5(h5file)
#################################
# InP (333) reciprocal space map
omalign = 43.0529 # experimental aligned values
ttalign = 86.0733
[omnominal, _, _, ttnominal] = hxrd.Q2Ang(
InP.Q(3, 3, 3)) # nominal values of the substrate peak
# read the data from the HDF5 file
# scan number:36, names of motors in spec file: omega= sample rocking, gamma =
# twotheta
[om, tt], MAP = xu.io.geth5_scan(h5file, 36, 'omega', 'gamma')
# normalize the intensity values (absorber and count time corrections)
psdraw = normalizer_detcorr(MAP)
# remove unusable detector channels/regions (no averaging of detector channels)
psd = xu.blockAveragePSD(psdraw, 1, roi=roi)
# convert angular coordinates to reciprocal space + correct for offsets
[qx, qy, qz] = hxrd.Ang2Q.linear(
om, tt,
delta=[omalign - omnominal, ttalign - ttnominal])
# calculate data on a regular grid of 200x201 points
gridder = xu.Gridder2D(200, 201)
gridder(qy, qz, psd)
# maplog function limits the shown dynamic range to 8 orders of magnitude
# from the maxium
INT = xu.maplog(gridder.data.T, 8., 0)
# plot the intensity as contour plot using matplotlib
plt.figure()
cf = plt.contourf(gridder.xaxis, gridder.yaxis, INT, 100, extend='min')
plt.xlabel(r'$Q_{[11\bar2]}$ ($\mathrm{\AA}^{-1}$)')
plt.ylabel(r'$Q_{[\bar1\bar1\bar1]}$ ($\mathrm{\AA}^{-1}$)')
cb = plt.colorbar(cf)
cb.set_label(r"$\log($Int$)$ (cps)")
| gpl-2.0 |
tkoziara/parmec | tests/prescribe_vsweep.py | 1 | 1385 | # PARMEC test --> PRESCRIBE command test (velocity sweep)
from math import sin, cos, pi
matnum = MATERIAL (1E3, 1E9, 0.25)
nodes = [0, 0, 0,
1, 0, 0,
1, 1, 0,
0, 1, 0,
0, 0, 1,
1, 0, 1,
1, 1, 1,
0, 1, 1]
elements = [8, 0, 1, 2, 3, 4, 5, 6, 7, matnum]
colors = [1, 4, 0, 1, 2, 3, 2, 4, 4, 5, 6, 7, 3]
parnum = MESH (nodes, elements, matnum, colors)
stop = 5.0
def linvel(t):
amag = 1.0
lofq = 0.0
hifq = 5.0
# derivative of a = amag * sin (2.0*pi*(lofq+(hifq-lofq)*t/stop)*t) -->
v = amag * cos (2.0*pi*(lofq+(hifq-lofq)*t/stop)*t) * (2.0*pi*lofq + 4.0*pi*(hifq-lofq)*t/stop)
return (v, 0, 0)
PRESCRIBE (parnum, linear = linvel)
t = HISTORY ('TIME')
vx1 = HISTORY ('VX', parnum)
dx = HISTORY ('DX', parnum)
DEM (stop, 0.001, (0.05, 0.01))
vx0 = []
for s in t: vx0.append(linvel(s)[0])
try:
import matplotlib.pyplot as plt
plt.clf ()
plt.plot (t, vx0, label = 'input', linestyle = '--', marker = '.')
plt.plot (t, vx1, label = 'output')
plt.xlim ((0, t[-1]))
plt.xlabel ('time $(s)$')
plt.ylabel ('vx $(m/s)$')
plt.legend()
plt.savefig ('tests/prescribe_vsweep_vx.png')
plt.clf ()
plt.plot (t, dx)
plt.xlim ((0, t[-1]))
plt.xlabel ('time $(s)$')
plt.ylabel ('dx $(m)$')
plt.savefig ('tests/prescribe_vsweep_dx.png')
except:
print 'time = ', t
print 'vx0 = ', vx0
print 'vx1 = ', vx1
print 'dx = ', dx
| mit |
zhenv5/scikit-learn | sklearn/decomposition/nmf.py | 35 | 39369 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Mathieu Blondel <[email protected]>
# Tom Dupre la Tour
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (Projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted, check_non_negative
from ..utils import deprecated
from ..utils import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _safe_compute_error(X, W, H):
"""Frobenius norm between X and WH, safe for sparse array"""
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(norm_X + norm_WH - 2. * cross_prod)
return error
def _check_string_param(sparseness, solver):
allowed_sparseness = (None, 'data', 'components')
if sparseness not in allowed_sparseness:
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, allowed_sparseness))
allowed_solver = ('pg', 'cd')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise 'random'.
Valid options:
'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
eps: float
Truncate all values less then this in output to zero.
random_state : int seed, RandomState instance, or None (default)
Random number generator seed control, used in 'nndsvdar' and
'random' modes.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
V : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
def _update_projected_gradient_w(X, W, H, tolW, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = H.shape[0]
if sparseness is None:
Wt, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(beta) * np.ones((1,
n_components_))]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(eta) * np.eye(n_components_)]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return Wt.T, gradW.T, iterW
def _update_projected_gradient_h(X, W, H, tolH, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = W.shape[1]
if sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((n_components_, n_features))]),
safe_vstack([W,
np.sqrt(eta) * np.eye(n_components_)]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(beta)
* np.ones((1, n_components_))]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return H, gradH, iterH
def _fit_projected_gradient(X, W, H, tol, max_iter,
nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Compute Non-negative Matrix Factorization (NMF) with Projected Gradient
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with Sparseness Constraints.
Journal of Machine Learning Research 2004.
"""
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition
# as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
W, gradW, iterW = _update_projected_gradient_w(X, W, H, tolW,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _update_projected_gradient_h(X, W, H, tolH,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
W, _, _ = _update_projected_gradient_w(X, W, H, tol, nls_max_iter,
alpha, l1_ratio, sparseness,
beta, eta)
return W, H, n_iter
def _update_coordinate_descent(X, W, Ht, alpha, l1_ratio, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = fast_dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L1 and L2 regularizations
l1_reg = 1. * l1_ratio * alpha
l2_reg = (1. - l1_ratio) * alpha
# L2 regularization corresponds to increase the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization correponds to decrease each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
seed = random_state.randint(np.iinfo(np.int32).max)
return _update_cdnmf_fast(W, HHt, XHt, shuffle, seed)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, alpha=0.001,
l1_ratio=0., regularization=None, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If True, the samples will be taken in shuffled order during
coordinate descent.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
alpha_H = 0.
alpha_W = 0.
if regularization in ('both', 'components'):
alpha_H = float(alpha)
if regularization in ('both', 'transformation'):
alpha_W = float(alpha)
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, alpha_W,
l1_ratio, shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, alpha_H,
l1_ratio, shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
tol=1e-4, max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False, nls_max_iter=2000,
sparseness=None, beta=1, eta=0.1):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean
If True, the samples will be taken in shuffled order during
coordinate descent.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
_check_string_param(sparseness, solver)
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, six.integer_types) or n_components <= 0:
raise ValueError("Number of components must be positive;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, numbers.Number) or max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom':
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
if solver == 'pg':
warnings.warn("'pg' solver will be removed in release 0.19."
" Use 'cd' solver instead.", DeprecationWarning)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(X, W, H, tol,
max_iter,
nls_max_iter,
alpha, l1_ratio,
sparseness,
beta, eta)
else: # transform
W, H, n_iter = _update_projected_gradient_w(X, W, H,
tol, nls_max_iter,
alpha, l1_ratio,
sparseness, beta,
eta)
elif solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol,
max_iter,
alpha, l1_ratio,
regularization,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'random': non-negative random matrices
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'custom': use custom matrices W and H, given in 'fit' method.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
shuffle : boolean
If True, the samples will be taken in shuffled order during
coordinate descent.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, init=None, solver='cd',
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0, shuffle=False,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
if sparseness is not None:
warnings.warn("Controlling regularization through the sparseness,"
" beta and eta arguments is only available"
" for 'pg' solver, which will be removed"
" in release 0.19. Use another solver with L1 or L2"
" regularization instead.", DeprecationWarning)
self.nls_max_iter = nls_max_iter
self.sparseness = sparseness
self.beta = beta
self.eta = eta
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components,
init=self.init, update_H=True, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
if self.solver == 'pg':
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
self.reconstruction_err_ = _safe_compute_error(X, W, H)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Attributes
----------
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
self.n_iter_ = n_iter_
return W
@deprecated("It will be removed in release 0.19. Use NMF instead."
"'pg' solver is still available until release 0.19.")
class ProjectedGradientNMF(NMF):
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
super(ProjectedGradientNMF, self).__init__(
n_components=n_components, init=init, solver='pg', tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio, verbose=verbose, nls_max_iter=nls_max_iter,
sparseness=sparseness, beta=beta, eta=eta)
| bsd-3-clause |
vybstat/scikit-learn | sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 57 | 16523 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False) | bsd-3-clause |
sanketloke/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
valexandersaulys/airbnb_kaggle_contest | prototype_alpha/gradientBoost_take2.py | 1 | 1852 | """
Take 2 on the GradientBoost, predicting for country_destinations.
Use labels in confusion_matrix(y_true,y_preds,labels=[]) to order
the labels in the confusion matrix to see whats overrepresented in the
target files for the Airbnb contest.
http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html
'NDF' is over-represented
"""
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
training = pd.read_csv("protoAlpha_training.csv")
testing = pd.read_csv("protoAlpha_testing.csv")
X = training.iloc[:,1:-1]
y = training['country_destination']
x_train,x_valid,y_train,y_valid = train_test_split(X,y,test_size=0.3,random_state=None)
labels_order = np.unique(y_train.values)
# Train classifier
from sklearn.ensemble import GradientBoostingClassifier
clf = GradientBoostingClassifier(n_estimators=10,verbose=100)
clf.fit(x_train,y_train)
# Run Predictions
from sklearn.metrics import confusion_matrix, accuracy_score
y_preds = clf.predict(x_valid)
yt_preds = clf.predict(x_train)
# Print Predictions
print(labels_order);
print( confusion_matrix(y_train,yt_preds,labels=labels_order) );
print( confusion_matrix(y_valid,y_preds,labels=labels_order) );
print( "Accuracy: %f" % (accuracy_score(y_valid,y_preds)) );
# Save metrics to text file
f = open('gradientBoost_take2.txt', 'w')
f.write( str(labels_order) );
f.write( str(confusion_matrix(y_valid,y_preds,labels=labels_order)) );
f.write( "\nAccuracy: %f" % (accuracy_score(y_valid,y_preds)) );
f.write( "\nclf = GradientBoostingClassifier(n_estimators=70,verbose=100)" );
# Now on to final submission
y_final = pd.DataFrame(clf.predict(testing.iloc[:,1:]).reshape([62096,]));
numbahs = testing['id']
df = pd.concat([numbahs,y_final],axis=1)
df.columns = ['id','country']
df.to_csv("gradientBoost_take2.csv",index=False)
| gpl-2.0 |
Extintor/piva | practica4/p4script6.py | 1 | 2421 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 7 18:01:14 2016
@author: paul
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
def cercle(s,radi):
y,x = np.ogrid[-s[0]/2: s[0]/2, -s[1]/2: s[1]/2]
mascara = (x**2 + y**2 < radi**2)
return mascara
def filtre_passaaltes(img):
radi = 80
mascara = ~cercle(img.shape,radi)
return img*mascara
def filtre_passabanda(img):
radi1 = 50
radi2 = 90
mascara1 = ~cercle(img.shape,radi1)
mascara2 = cercle(img.shape,radi2)
mascara = mascara1*mascara2
return img*mascara
def filtre_passabaixes(img):
radi = 80
mascara = cercle(img.shape,radi)
return img*mascara
def forma_imatge(img,red,green,blue):
newimg = np.zeros(img.shape,dtype="uint8")
newimg[:,:,0] = red
newimg[:,:,1] = green
newimg[:,:,2] = blue
return newimg
if __name__ == "__main__":
img = plt.imread('../Imatges/imatge17.jpg')
plt.figure()
plt.imshow(img)
fftimgred = np.fft.fft2(img[:,:,0])
fftimgred = np.fft.fftshift(fftimgred)
fftimggreen = np.fft.fft2(img[:,:,1])
fftimggreen = np.fft.fftshift(fftimggreen)
fftimgblue = np.fft.fft2(img[:,:,1])
fftimgblue = np.fft.fftshift(fftimgblue)
# plt.figure()
# plt.subplot(1,3,1)
# plt.imshow(np.absolute(np.log(fftimgred)))
# plt.subplot(1,3,2)
# plt.imshow(np.absolute(np.log(fftimggreen)))
# plt.subplot(1,3,3)
# plt.imshow(np.absolute(np.log(fftimgblue)))
fftimgred = filtre_passabaixes(fftimgred)
fftimggreen = filtre_passabanda(fftimggreen)
fftimgblue = filtre_passaaltes(fftimgblue)
fftimgred = np.fft.ifftshift(fftimgred)
red = np.fft.ifft2(fftimgred)
fftimggreen = np.fft.ifftshift(fftimggreen)
green = np.fft.ifft2(fftimggreen)
fftimgblue = np.fft.ifftshift(fftimgblue)
blue = np.fft.ifft2(fftimgblue)
red = np.absolute(red)
green = np.absolute(green)
blue= np.absolute(blue)
red = np.uint8((red/np.max(red))*255)
green = np.uint8((green/np.max(green))*255)
blue = np.uint8((blue/np.max(blue))*255)
plt.figure()
plt.subplot(1,3,1)
plt.imshow(red,cmap="gray")
plt.subplot(1,3,2)
plt.imshow(green,cmap="gray")
plt.subplot(1,3,3)
plt.imshow(blue,cmap="gray")
img = forma_imatge(img,red,green,blue)
plt.figure()
plt.imshow(img)
| gpl-3.0 |
Adai0808/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
f3r/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
robcarver17/pysystemtrade | sysbrokers/IB/ib_Fx_prices_data.py | 1 | 4592 | from collections import namedtuple
import pandas as pd
from sysbrokers.IB.client.ib_fx_client import ibFxClient
from sysbrokers.broker_fx_prices_data import brokerFxPricesData
from sysobjects.spot_fx_prices import fxPrices
from syslogdiag.log_to_screen import logtoscreen
from syscore.fileutils import get_filename_for_package
from syscore.objects import missing_instrument, missing_file, missing_data
IB_CCY_CONFIG_FILE = get_filename_for_package(
"sysbrokers.IB.ib_config_spot_FX.csv")
ibFXConfig = namedtuple("ibFXConfig", ["ccy1", "ccy2", "invert"])
class ibFxPricesData(brokerFxPricesData):
def __init__(self, ibconnection, log=logtoscreen("ibFxPricesData")):
self._ibconnection = ibconnection
super().__init__(log=log)
def __repr__(self):
return "IB FX price data"
@property
def ibconnection(self):
return self._ibconnection
@property
def ib_client(self) -> ibFxClient:
client = getattr(self, "_ib_client", None)
if client is None:
client = self._ib_client = ibFxClient(ibconnection=self.ibconnection,
log = self.log)
return client
def get_list_of_fxcodes(self) -> list:
config_data = self._get_ib_fx_config()
if config_data is missing_file:
self.log.warn(
"Can't get list of fxcodes for IB as config file missing")
return []
list_of_codes = list(config_data.CODE)
return list_of_codes
def _get_fx_prices_without_checking(self, currency_code: str) -> fxPrices:
ib_config_for_code = self._get_config_info_for_code(currency_code)
if ib_config_for_code is missing_instrument:
self.log.warn(
"Can't get prices as missing IB config for %s" %
currency_code, fx_code=currency_code)
return fxPrices.create_empty()
data = self._get_fx_prices_with_ib_config(currency_code, ib_config_for_code)
return data
def _get_fx_prices_with_ib_config(self, currency_code: str, ib_config_for_code: ibFXConfig) ->fxPrices:
raw_fx_prices_as_series = self._get_raw_fx_prices(ib_config_for_code)
if len(raw_fx_prices_as_series) == 0:
self.log.warn("No available IB prices for %s %s" % (currency_code, str(ib_config_for_code))
, fx_code = currency_code )
return fxPrices.create_empty()
if ib_config_for_code.invert:
raw_fx_prices = 1.0 / raw_fx_prices_as_series
else:
raw_fx_prices = raw_fx_prices_as_series
# turn into a fxPrices
fx_prices = fxPrices(raw_fx_prices)
self.log.msg("Downloaded %d prices" % len(fx_prices), fx_code = currency_code)
return fx_prices
def _get_raw_fx_prices(self, ib_config_for_code: ibFXConfig) -> pd.Series:
raw_fx_prices = self.ib_client.broker_get_daily_fx_data(
ib_config_for_code.ccy1, ccy2=ib_config_for_code.ccy2
)
if raw_fx_prices is missing_data:
return pd.Series()
raw_fx_prices_as_series = raw_fx_prices["FINAL"]
return raw_fx_prices_as_series
def _get_config_info_for_code(self, currency_code: str) -> ibFXConfig:
new_log = self.log.setup(currency_code=currency_code)
config_data = self._get_ib_fx_config()
if config_data is missing_file:
new_log.warn(
"Can't get IB FX config for %s as config file missing" %
currency_code, fx_code = currency_code)
return missing_instrument
ccy1 = config_data[config_data.CODE == currency_code].CCY1.values[0]
ccy2 = config_data[config_data.CODE == currency_code].CCY2.values[0]
invert = (config_data[config_data.CODE ==
currency_code].INVERT.values[0] == "YES")
ib_config_for_code = ibFXConfig(ccy1, ccy2, invert)
return ib_config_for_code
# Configuration read in and cache
def _get_ib_fx_config(self) ->pd.DataFrame:
config = getattr(self, "_config", None)
if config is None:
config = self._get_and_set_ib_config_from_file()
return config
def _get_and_set_ib_config_from_file(self) ->pd.DataFrame:
try:
config_data = pd.read_csv(IB_CCY_CONFIG_FILE)
except BaseException:
self.log.warn("Can't read file %s" % IB_CCY_CONFIG_FILE)
config_data = missing_file
self._config = config_data
return config_data
| gpl-3.0 |
alexeyum/scikit-learn | sklearn/tests/test_isotonic.py | 9 | 14049 | import warnings
import numpy as np
import pickle
import copy
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permutation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [1, 1, 2, 3, 4, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_ymin_ymax():
# Test from @NelleV's issue:
# https://github.com/scikit-learn/scikit-learn/issues/6921
x = np.array([1.263, 1.318, -0.572, 0.307, -0.707, -0.176, -1.599, 1.059,
1.396, 1.906, 0.210, 0.028, -0.081, 0.444, 0.018, -0.377,
-0.896, -0.377, -1.327, 0.180])
y = isotonic_regression(x, y_min=0., y_max=0.1)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Also test decreasing case since the logic there is different
y = isotonic_regression(x, y_min=0., y_max=0.1, increasing=False)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Finally, test with only one bound
y = isotonic_regression(x, y_min=0., increasing=False)
assert(np.all(y >= 0))
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
def test_fast_predict():
# test that the faster prediction change doesn't
# affect out-of-sample predictions:
# https://github.com/scikit-learn/scikit-learn/pull/6206
rng = np.random.RandomState(123)
n_samples = 10 ** 3
# X values over the -10,10 range
X_train = 20.0 * rng.rand(n_samples) - 10
y_train = np.less(
rng.rand(n_samples),
1.0 / (1.0 + np.exp(-X_train))
).astype('int64')
weights = rng.rand(n_samples)
# we also want to test that everything still works when some weights are 0
weights[rng.rand(n_samples) < 0.1] = 0
slow_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
fast_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
# Build interpolation function with ALL input data, not just the
# non-redundant subset. The following 2 lines are taken from the
# .fit() method, without removing unnecessary points
X_train_fit, y_train_fit = slow_model._build_y(X_train, y_train,
sample_weight=weights,
trim_duplicates=False)
slow_model._build_f(X_train_fit, y_train_fit)
# fit with just the necessary data
fast_model.fit(X_train, y_train, sample_weight=weights)
X_test = 20.0 * rng.rand(n_samples) - 10
y_pred_slow = slow_model.predict(X_test)
y_pred_fast = fast_model.predict(X_test)
assert_array_equal(y_pred_slow, y_pred_fast)
def test_isotonic_copy_before_fit():
# https://github.com/scikit-learn/scikit-learn/issues/6628
ir = IsotonicRegression()
copy.copy(ir)
| bsd-3-clause |
PredictiveScienceLab/GPy | GPy/plotting/matplot_dep/variational_plots.py | 8 | 3973 | from matplotlib import pyplot as pb, numpy as np
def plot(parameterized, fignum=None, ax=None, colors=None, figsize=(12, 6)):
"""
Plot latent space X in 1D:
- if fig is given, create input_dim subplots in fig and plot in these
- if ax is given plot input_dim 1D latent space plots of X into each `axis`
- if neither fig nor ax is given create a figure with fignum and plot in there
colors:
colors of different latent space dimensions input_dim
"""
if ax is None:
fig = pb.figure(num=fignum, figsize=figsize)
if colors is None:
colors = pb.gca()._get_lines.color_cycle
pb.clf()
else:
colors = iter(colors)
lines = []
fills = []
bg_lines = []
means, variances = parameterized.mean.values, parameterized.variance.values
x = np.arange(means.shape[0])
for i in range(means.shape[1]):
if ax is None:
a = fig.add_subplot(means.shape[1], 1, i + 1)
elif isinstance(ax, (tuple, list)):
a = ax[i]
else:
raise ValueError("Need one ax per latent dimension input_dim")
bg_lines.append(a.plot(means, c='k', alpha=.3))
lines.extend(a.plot(x, means.T[i], c=colors.next(), label=r"$\mathbf{{X_{{{}}}}}$".format(i)))
fills.append(a.fill_between(x,
means.T[i] - 2 * np.sqrt(variances.T[i]),
means.T[i] + 2 * np.sqrt(variances.T[i]),
facecolor=lines[-1].get_color(),
alpha=.3))
a.legend(borderaxespad=0.)
a.set_xlim(x.min(), x.max())
if i < means.shape[1] - 1:
a.set_xticklabels('')
pb.draw()
a.figure.tight_layout(h_pad=.01) # , rect=(0, 0, 1, .95))
return dict(lines=lines, fills=fills, bg_lines=bg_lines)
def plot_SpikeSlab(parameterized, fignum=None, ax=None, colors=None, side_by_side=True):
"""
Plot latent space X in 1D:
- if fig is given, create input_dim subplots in fig and plot in these
- if ax is given plot input_dim 1D latent space plots of X into each `axis`
- if neither fig nor ax is given create a figure with fignum and plot in there
colors:
colors of different latent space dimensions input_dim
"""
if ax is None:
if side_by_side:
fig = pb.figure(num=fignum, figsize=(16, min(12, (2 * parameterized.mean.shape[1]))))
else:
fig = pb.figure(num=fignum, figsize=(8, min(12, (2 * parameterized.mean.shape[1]))))
if colors is None:
colors = pb.gca()._get_lines.color_cycle
pb.clf()
else:
colors = iter(colors)
plots = []
means, variances, gamma = parameterized.mean, parameterized.variance, parameterized.binary_prob
x = np.arange(means.shape[0])
for i in range(means.shape[1]):
if side_by_side:
sub1 = (means.shape[1],2,2*i+1)
sub2 = (means.shape[1],2,2*i+2)
else:
sub1 = (means.shape[1]*2,1,2*i+1)
sub2 = (means.shape[1]*2,1,2*i+2)
# mean and variance plot
a = fig.add_subplot(*sub1)
a.plot(means, c='k', alpha=.3)
plots.extend(a.plot(x, means.T[i], c=colors.next(), label=r"$\mathbf{{X_{{{}}}}}$".format(i)))
a.fill_between(x,
means.T[i] - 2 * np.sqrt(variances.T[i]),
means.T[i] + 2 * np.sqrt(variances.T[i]),
facecolor=plots[-1].get_color(),
alpha=.3)
a.legend(borderaxespad=0.)
a.set_xlim(x.min(), x.max())
if i < means.shape[1] - 1:
a.set_xticklabels('')
# binary prob plot
a = fig.add_subplot(*sub2)
a.bar(x,gamma[:,i],bottom=0.,linewidth=0,width=1.0,align='center')
a.set_xlim(x.min(), x.max())
a.set_ylim([0.,1.])
pb.draw()
fig.tight_layout(h_pad=.01) # , rect=(0, 0, 1, .95))
return fig
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/zh/svm/plot_oneclass.py | 95 | 2419 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s, edgecolors='k')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s,
edgecolors='k')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s,
edgecolors='k')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.