repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Srisai85/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
sbenthall/bigbang | bigbang/process.py | 1 | 6593 | from bigbang.parse import get_date
import pandas as pd
import datetime
import networkx as nx
import numpy as np
import email.utils
import re
import Levenshtein
from functools import partial
def consolidate_senders_activity(activity_df, to_consolidate):
"""
takes a DataFrame in the format returned by activity
takes a list of tuples of format ('from 1', 'from 2') to consolidate
returns the consolidated DataFrame (a copy, not in place)
"""
df = activity_df.copy(deep=True)
for consolidate in to_consolidate:
column_a, column_b = consolidate
if column_a in df.columns and column_b in df.columns:
df[column_a] = df[column_a] + df[column_b]
df.drop(column_b, inplace=True, axis=1) # delete the second column
return df
def matricize(series, func):
"""
create a matrix by applying func to pairwise combos of elements in a Series
returns a square matrix as a DataFrame
should return a symmetric matrix if func(a,b) == func(b,a)
should return the identity matrix if func == '=='
"""
matrix = pd.DataFrame(columns=series, index=series)
for index, element in enumerate(series):
for second_index, second_element in enumerate(series):
matrix.iloc[index, second_index] = func(element, second_element)
return matrix
def minimum_but_not_self(column, dataframe):
minimum = 100
for index, value in dataframe[column].iteritems():
if index == column:
continue
if value < minimum:
minimum = value
return minimum
def sorted_matrix(from_dataframe,limit=None,sort_key=None):
if limit is None:
limit = len(from_dataframe.columns)
distancedf = matricize(from_dataframe.columns[:limit - 1], from_header_distance)
# specify that the values in the matrix are integers
df = distancedf.astype(int)
# unless otherwise specified, sort to minimize the integer values with rows other than yourself
sort_key = sort_key if sort_key is not None else partial(minimum_but_not_self, dataframe=df)
new_columns = sorted(df.columns[:limit - 1], key=sort_key)
new_df = df.reindex(index=new_columns, columns=new_columns)
return new_df
def resolve_sender_entities(act, lexical_distance=0):
"""
Given an Archive's activity matrix, return a dict of lists, each containing
message senders ('From' fields) that have been groups to be
probably the same entity.
"""
# senders orders by descending total activity
senders = act.sum(0).sort_values(ascending=False)
senders_act = senders.index
# senders in lexical order
senders_lex = act.columns.sort_values()
senders_lex_dict = dict([(p[1],p[0]) for p in enumerate(senders_lex)])
n = len(senders)
# binary matrix of similarity between entries
sim = np.zeros((n,n))
# find similarity
for i in range(n):
name = senders_act[i]
i = senders_lex_dict[name]
# checking only lexically close entries and
# in proportion to total activity
# is a performance hack.
for j in range(i - (n - i + 1) / 2, i + (n - i + 1) / 2):
d = from_header_distance(senders_lex[i],senders_lex[j])
sim[i,j] = (d <= lexical_distance)
# An entity is a connected component of the resulting graph
G = nx.Graph(sim)
entities_list = [[senders_lex[j] for j in x] for x in nx.connected_components(G)]
# given each entity a label based on its most active 'member'
entities_dict = {}
for e in entities_list:
# TODO: tighten up this labeling function
label = sorted(e,key=lambda n:senders[n],reverse=True)[0]
entities_dict[label] = e
return entities_dict
ren = "([\w\+\.\-]+(\@| at )[\w+\.\-]*) \((.*)\)"
def from_header_distance(a, b,verbose=False):
"""
A distance measure specifically for the 'From' header of emails.
Normalizes based on common differences in client handling of email,
then computes Levenshtein distance between components of the field.
"""
# this translate table is one way you are supposed to
# delete characters from a unicode string
stop_characters = unicode('"<>')
stop_characters_map = dict((ord(char), None) for char in stop_characters)
a_normal = ""
b_normal = ""
try:
a_normal = unicode(a).lower().translate(stop_characters_map).replace(' at ','@')
except UnicodeDecodeError as e:
a_normal = a.decode("utf-8").lower().translate(stop_characters_map).replace(' at ','@')
try:
b_normal = unicode(b).lower().translate(stop_characters_map).replace(' at ','@')
except UnicodeDecodeError as e:
b_normal = b.decode("utf-8").lower().translate(stop_characters_map).replace(' at ','@')
ag = re.match(ren,a_normal)
bg = re.match(ren,b_normal)
dist = float("inf")
if ag is None or bg is None:
if verbose:
print "malformed pair:"
print a
print b
dist = Levenshtein.distance(a_normal, b_normal)
else:
dist = Levenshtein.distance(ag.groups()[0],bg.groups()[0]) \
+ Levenshtein.distance(ag.groups()[1],bg.groups()[1])
if len(ag.groups()[2]) > 5 and len(ag.groups()[2]) > 5:
dist = min(dist,Levenshtein.distance(ag.groups()[2],bg.groups()[2]))
return dist
def eij(m,parts,i,j):
total_edges = m.sum().sum()
part_i = parts[i]
part_j = parts[j]
edges_in_range = m[np.ix_(np.array(part_i),np.array(part_j))]
return edges_in_range.sum().sum() / total_edges
def ai(m,parts,i):
total = 0
for j in range(len(parts)):
total = total + eij(m,parts,i,j)
return total
def bi(m,parts,i):
total = 0
for j in range(len(parts)):
total = total + eij(m,parts,j,i) # note switched i,j
return total
def modularity(m,parts):
"""
Compute modularity of an adjacency matrix.
Use metric from:
Zanetti, M. and Schweitzer, F. 2012.
"A Network Perspective on Software Modularity"
ARCS Workshops 2012, pp. 175-186.
"""
expected = 0
actual = 0
for i in range(len(parts)):
expected = expected + ai(m,parts,i) * bi(m,parts,i)
actual = actual + eij(m,parts,i,i)
q = (actual - expected) / (1 - expected)
return q
def domain_name_from_email(name):
address = email.utils.parseaddr(name)[1]
if '@' in address: domain = address.split('@')[1]
else: domain = address.split(' at ')[1]
return domain.lower()
| agpl-3.0 |
sudikrt/costproML | new Data/staticData/task.py | 2 | 2892 | import pandas as pd
from pandas.tseries.offsets import *
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import datetime
df = pd.read_csv ("outDataSingle.csv")
print df.head()
grouped = df.groupby ('job')
print "Job List : Engineer, Tester, Carpenter, Cook, Plumber, Mechanic";
prof = raw_input ("Enter a job :").lower()
if prof == "Engineer".lower():
group = list(grouped)[2][1]
elif prof == "Tester".lower():
group = list(grouped)[5][1]
elif prof == "Cook".lower():
group = list(grouped)[1][1]
elif prof == "Carpenter".lower():
group = list(grouped)[0][1]
elif prof == "Plumber".lower() :
group = list(grouped)[4][1]
else :
group = list(grouped)[3][1]
year = input ("Enter Year :")
ts_data = pd.TimeSeries(group.maxsal.values, index=pd.to_datetime(group.date))
fig, axes = plt.subplots(figsize=(10, 8), nrows=3)
ts_data.plot(ax=axes[0])
ts_log_data = np.log (ts_data)
ts_log_data.plot (ax=axes[1], style='b-', label='actual')
model = sm.tsa.ARMA (ts_log_data, order=(1,1)).fit()
y_pred = model.predict (ts_log_data.index[0].isoformat(), ts_log_data.index[-1].isoformat())
y_pred.plot(ax=axes[2], style='r--', label='in-sample fit')
start_date = (pd.to_datetime(str(year) + '-' + str(01) + '-' + str(01))).date()
da = start_date - ts_log_data.index[-1].date()
start_date = (ts_log_data.index[-1] + Day (1)).date()
end_date = (ts_log_data.index[-1] + Day (da.days)).date()
#start_date = (ts_log_data.index[-1] + Day (1)).date()
#end_date = (ts_log_data.index[-1] + Day (200)).date()
# start_date = (pd.to_datetime(str(year) + '-' + str(01) + '-' + str(01)))
# if (start_date < ts_log_data.index[-1]) :
# da = (ts_log_data.index[-1] - start_date)
# start_date = da
# end_date = (da + Day(1)).date()
# else :
# da = start_date - ts_log_data.index[-1].date()
# start_date = (ts_log_data.index[-1] + Day (1)).date()
# end_date = (ts_log_data.index[-1] + Day (da.days)).date()
y_forecast = model.predict(start_date.isoformat(), end_date.isoformat())
y_forecast.plot(ax=axes[2], style='r--', label='in-sample fit')
#print(y_forecast)
maxSAl = np.exp(y_forecast)
#print str(np.array(maxSAl)[0])
#print(np.exp(y_forecast.tail(1)))
print "Min sal"
ts_data = pd.TimeSeries(group.minsal.values, index=pd.to_datetime(group.date))
#fig, axes = plt.subplots(figsize=(10, 8), nrows=3)
#ts_data.plot(ax=axes[0])
ts_log_data = np.log (ts_data)
#ts_log_data.plot (ax=axes[1], style='b-', label='actual')
model = sm.tsa.ARMA (ts_log_data, order=(1,1)).fit()
y_pred = model.predict (ts_log_data.index[0].isoformat(), ts_log_data.index[-1].isoformat())
#y_pred.plot(ax=axes[2], style='r--', label='in-sample fit')
y_forecast = model.predict(start_date.isoformat(), end_date.isoformat())
minSAl = np.exp(y_forecast.tail(1))
print "Max sal :" + str(maxSAl)
print "Min sal :" + str(minSAl)
plt.show()
| apache-2.0 |
louispotok/pandas | pandas/io/parquet.py | 4 | 10091 | """ parquet compat """
from warnings import catch_warnings
from distutils.version import LooseVersion
from pandas import DataFrame, RangeIndex, Int64Index, get_option
from pandas.compat import string_types
import pandas.core.common as com
from pandas.io.common import get_filepath_or_buffer, is_s3_url
def get_engine(engine):
""" return our implementation """
if engine == 'auto':
engine = get_option('io.parquet.engine')
if engine == 'auto':
# try engines in this order
try:
return PyArrowImpl()
except ImportError:
pass
try:
return FastParquetImpl()
except ImportError:
pass
raise ImportError("Unable to find a usable engine; "
"tried using: 'pyarrow', 'fastparquet'.\n"
"pyarrow or fastparquet is required for parquet "
"support")
if engine not in ['pyarrow', 'fastparquet']:
raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
if engine == 'pyarrow':
return PyArrowImpl()
elif engine == 'fastparquet':
return FastParquetImpl()
class BaseImpl(object):
api = None # module
@staticmethod
def validate_dataframe(df):
if not isinstance(df, DataFrame):
raise ValueError("to_parquet only supports IO with DataFrames")
# must have value column names (strings only)
if df.columns.inferred_type not in {'string', 'unicode'}:
raise ValueError("parquet must have string column names")
# index level names must be strings
valid_names = all(
isinstance(name, string_types)
for name in df.index.names
if name is not None
)
if not valid_names:
raise ValueError("Index level names must be strings")
def write(self, df, path, compression, **kwargs):
raise com.AbstractMethodError(self)
def read(self, path, columns=None, **kwargs):
raise com.AbstractMethodError(self)
class PyArrowImpl(BaseImpl):
def __init__(self):
# since pandas is a dependency of pyarrow
# we need to import on first use
try:
import pyarrow
import pyarrow.parquet
except ImportError:
raise ImportError(
"pyarrow is required for parquet support\n\n"
"you can install via conda\n"
"conda install pyarrow -c conda-forge\n"
"\nor via pip\n"
"pip install -U pyarrow\n"
)
if LooseVersion(pyarrow.__version__) < '0.4.1':
raise ImportError(
"pyarrow >= 0.4.1 is required for parquet support\n\n"
"you can install via conda\n"
"conda install pyarrow -c conda-forge\n"
"\nor via pip\n"
"pip install -U pyarrow\n"
)
self._pyarrow_lt_060 = (
LooseVersion(pyarrow.__version__) < LooseVersion('0.6.0'))
self._pyarrow_lt_070 = (
LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'))
self.api = pyarrow
def write(self, df, path, compression='snappy',
coerce_timestamps='ms', **kwargs):
self.validate_dataframe(df)
if self._pyarrow_lt_070:
self._validate_write_lt_070(df)
path, _, _, _ = get_filepath_or_buffer(path, mode='wb')
if self._pyarrow_lt_060:
table = self.api.Table.from_pandas(df, timestamps_to_ms=True)
self.api.parquet.write_table(
table, path, compression=compression, **kwargs)
else:
table = self.api.Table.from_pandas(df)
self.api.parquet.write_table(
table, path, compression=compression,
coerce_timestamps=coerce_timestamps, **kwargs)
def read(self, path, columns=None, **kwargs):
path, _, _, should_close = get_filepath_or_buffer(path)
if self._pyarrow_lt_070:
result = self.api.parquet.read_pandas(path, columns=columns,
**kwargs).to_pandas()
else:
kwargs['use_pandas_metadata'] = True
result = self.api.parquet.read_table(path, columns=columns,
**kwargs).to_pandas()
if should_close:
try:
path.close()
except: # noqa: flake8
pass
return result
def _validate_write_lt_070(self, df):
# Compatibility shim for pyarrow < 0.7.0
# TODO: Remove in pandas 0.23.0
from pandas.core.indexes.multi import MultiIndex
if isinstance(df.index, MultiIndex):
msg = (
"Multi-index DataFrames are only supported "
"with pyarrow >= 0.7.0"
)
raise ValueError(msg)
# Validate index
if not isinstance(df.index, Int64Index):
msg = (
"pyarrow < 0.7.0 does not support serializing {} for the "
"index; you can .reset_index() to make the index into "
"column(s), or install the latest version of pyarrow or "
"fastparquet."
)
raise ValueError(msg.format(type(df.index)))
if not df.index.equals(RangeIndex(len(df))):
raise ValueError(
"pyarrow < 0.7.0 does not support serializing a non-default "
"index; you can .reset_index() to make the index into "
"column(s), or install the latest version of pyarrow or "
"fastparquet."
)
if df.index.name is not None:
raise ValueError(
"pyarrow < 0.7.0 does not serialize indexes with a name; you "
"can set the index.name to None or install the latest version "
"of pyarrow or fastparquet."
)
class FastParquetImpl(BaseImpl):
def __init__(self):
# since pandas is a dependency of fastparquet
# we need to import on first use
try:
import fastparquet
except ImportError:
raise ImportError(
"fastparquet is required for parquet support\n\n"
"you can install via conda\n"
"conda install fastparquet -c conda-forge\n"
"\nor via pip\n"
"pip install -U fastparquet"
)
if LooseVersion(fastparquet.__version__) < '0.1.0':
raise ImportError(
"fastparquet >= 0.1.0 is required for parquet "
"support\n\n"
"you can install via conda\n"
"conda install fastparquet -c conda-forge\n"
"\nor via pip\n"
"pip install -U fastparquet"
)
self.api = fastparquet
def write(self, df, path, compression='snappy', **kwargs):
self.validate_dataframe(df)
# thriftpy/protocol/compact.py:339:
# DeprecationWarning: tostring() is deprecated.
# Use tobytes() instead.
if is_s3_url(path):
# path is s3:// so we need to open the s3file in 'wb' mode.
# TODO: Support 'ab'
path, _, _, _ = get_filepath_or_buffer(path, mode='wb')
# And pass the opened s3file to the fastparquet internal impl.
kwargs['open_with'] = lambda path, _: path
else:
path, _, _, _ = get_filepath_or_buffer(path)
with catch_warnings(record=True):
self.api.write(path, df,
compression=compression, **kwargs)
def read(self, path, columns=None, **kwargs):
if is_s3_url(path):
# When path is s3:// an S3File is returned.
# We need to retain the original path(str) while also
# pass the S3File().open function to fsatparquet impl.
s3, _, _, should_close = get_filepath_or_buffer(path)
try:
parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open)
finally:
s3.close()
else:
path, _, _, _ = get_filepath_or_buffer(path)
parquet_file = self.api.ParquetFile(path)
return parquet_file.to_pandas(columns=columns, **kwargs)
def to_parquet(df, path, engine='auto', compression='snappy', **kwargs):
"""
Write a DataFrame to the parquet format.
Parameters
----------
df : DataFrame
path : string
File path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
kwargs
Additional keyword arguments passed to the engine
"""
impl = get_engine(engine)
return impl.write(df, path, compression=compression, **kwargs)
def read_parquet(path, engine='auto', columns=None, **kwargs):
"""
Load a parquet object from the file path, returning a DataFrame.
.. versionadded 0.21.0
Parameters
----------
path : string
File path
columns: list, default=None
If not None, only these columns will be read from the file.
.. versionadded 0.21.1
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
kwargs are passed to the engine
Returns
-------
DataFrame
"""
impl = get_engine(engine)
return impl.read(path, columns=columns, **kwargs)
| bsd-3-clause |
gojomo/gensim | gensim/sklearn_api/ftmodel.py | 1 | 10825 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Authors: M.Cemil Guney <[email protected]>
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit-learn interface for :class:`~gensim.models.fasttext.FastText`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts
>>> from gensim.sklearn_api import FTTransformer
>>>
>>> # Create a model to represent each word by a 10 dimensional vector.
>>> model = FTTransformer(vector_size=10, min_count=1, seed=1)
>>>
>>> # What is the vector representations of the word 'graph' and 'system'?
>>> wordvecs = model.fit(common_texts).transform(['graph', 'system'])
>>> assert wordvecs.shape == (2, 10)
Retrieve word-vector for vocab and out-of-vocab word:
.. sourcecode:: pycon
>>> existent_word = "system"
>>> existent_word in model.gensim_model.wv.vocab
True
>>> existent_word_vec = model.transform(existent_word) # numpy vector of a word
>>> assert existent_word_vec.shape == (1, 10)
>>>
>>> oov_word = "sys"
>>> oov_word in model.gensim_model.wv.vocab
False
>>> oov_word_vec = model.transform(oov_word) # numpy vector of a word
>>> assert oov_word_vec.shape == (1, 10)
"""
import numpy as np
import six
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
class FTTransformer(TransformerMixin, BaseEstimator):
"""Base FastText module, wraps :class:`~gensim.models.fasttext.FastText`.
For more information please have a look to `Enriching Word Vectors with Subword
Information <https://arxiv.org/abs/1607.04606>`_.
"""
def __init__(self, sg=0, hs=0, vector_size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, word_ngrams=1, sample=1e-3, seed=1,
workers=3, min_alpha=0.0001, negative=5, ns_exponent=0.75,
cbow_mean=1, hashfxn=hash, epochs=5, null_word=0, min_n=3,
max_n=6, sorted_vocab=1, bucket=2000000, trim_rule=None,
batch_words=10000):
"""
Parameters
----------
sg : {1, 0}, optional
Training algorithm: skip-gram if `sg=1`, otherwise CBOW.
hs : {1,0}, optional
If 1, hierarchical softmax will be used for model training.
If set to 0, and `negative` is non-zero, negative sampling will be used.
vector_size : int, optional
Dimensionality of the word vectors.
alpha : float, optional
The initial learning rate.
window : int, optional
The maximum distance between the current and predicted word within a sentence.
min_count : int, optional
The model ignores all words with total frequency lower than this.
max_vocab_size : int, optional
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
word_ngrams : {1,0}, optional
If 1, uses enriches word vectors with subword(n-grams) information.
If 0, this is equivalent to :class:`~gensim.models.word2vec.Word2Vec`.
sample : float, optional
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
seed : int, optional
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
workers : int, optional
Use these many worker threads to train the model (=faster training with multicore machines).
min_alpha : float, optional
Learning rate will linearly drop to `min_alpha` as training progresses.
negative : int, optional
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
ns_exponent : float, optional
The exponent used to shape the negative sampling distribution. A value of 1.0 samples exactly in proportion
to the frequencies, 0.0 samples all words equally, while a negative value samples low-frequency words more
than high-frequency words. The popular default value of 0.75 was chosen by the original Word2Vec paper.
More recently, in https://arxiv.org/abs/1804.04212, Caselles-Dupré, Lesaint, & Royo-Letelier suggest that
other values may perform better for recommendation applications.
cbow_mean : {1,0}, optional
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
hashfxn : function, optional
Hash function to use to randomly initialize weights, for increased training reproducibility.
epochs : int, optional
Number of iterations (epochs) over the corpus.
min_n : int, optional
Minimum length of char n-grams to be used for training word representations.
max_n : int, optional
Max length of char ngrams to be used for training word representations. Set `max_n` to be
lesser than `min_n` to avoid char ngrams being used.
sorted_vocab : {1,0}, optional
If 1, sort the vocabulary by descending frequency before assigning word indices.
bucket : int, optional
Character ngrams are hashed into a fixed number of buckets, in order to limit the
memory usage of the model. This option specifies the number of buckets used by the model.
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during
:meth:`~gensim.models.fasttext.FastText.build_vocab` and is not stored as part of themodel.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
batch_words : int, optional
Target size (in words) for batches of examples passed to worker threads (and
thus cython routines).(Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
"""
self.gensim_model = None
self.sg = sg
self.hs = hs
self.vector_size = vector_size
self.alpha = alpha
self.window = window
self.min_count = min_count
self.max_vocab_size = max_vocab_size
self.word_ngrams = word_ngrams
self.sample = sample
self.seed = seed
self.workers = workers
self.min_alpha = min_alpha
self.negative = negative
self.ns_exponent = ns_exponent
self.cbow_mean = cbow_mean
self.hashfxn = hashfxn
self.epochs = epochs
self.null_word = null_word
self.min_n = min_n
self.max_n = max_n
self.sorted_vocab = sorted_vocab
self.bucket = bucket
self.trim_rule = trim_rule
self.batch_words = batch_words
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : iterable of iterables of str
Can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
Returns
-------
:class:`~gensim.sklearn_api.ftmodel.FTTransformer`
The trained model.
"""
self.gensim_model = models.FastText(
sentences=X, sg=self.sg, hs=self.hs, vector_size=self.vector_size,
alpha=self.alpha, window=self.window, min_count=self.min_count,
max_vocab_size=self.max_vocab_size, word_ngrams=self.word_ngrams,
sample=self.sample, seed=self.seed, workers=self.workers,
min_alpha=self.min_alpha, negative=self.negative,
ns_exponent=self.ns_exponent, cbow_mean=self.cbow_mean,
hashfxn=self.hashfxn, epochs=self.epochs, null_word=self.null_word,
min_n=self.min_n, max_n=self.max_n, sorted_vocab=self.sorted_vocab,
bucket=self.bucket, trim_rule=self.trim_rule,
batch_words=self.batch_words
)
return self
def transform(self, words):
"""Get the word vectors the input words.
Parameters
----------
words : {iterable of str, str}
Word or a collection of words to be transformed.
Returns
-------
np.ndarray of shape [`len(words)`, `vector_size`]
A 2D array where each row is the vector of one word.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(words, six.string_types):
words = [words]
vectors = [self.gensim_model.wv[word] for word in words]
return np.reshape(np.array(vectors), (len(words), self.vector_size))
| lgpl-2.1 |
andrewnc/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
giorgiop/scipy | scipy/stats/_binned_statistic.py | 26 | 17723 | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import callable
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for a set of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : array_like
A sequence of values to be binned.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``.
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First a basic example:
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled', alpha=0.2,
... label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, xy = binned_statistic_dd([x], values, statistic,
bins, range)
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
return BinnedStatisticResult(medians, edges[0], xy)
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None):
"""
Compute a bidimensional binned statistic for a set of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (M,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx=ny=bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edges = y_edges = bins),
* the bin edges in each dimension (x_edges, y_edges = bins).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin
x_edges : (nx + 1) ndarray
The bin edges along the first dimension.
y_edges : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as `values`.
See Also
--------
numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
.. versionadded:: 0.11.0
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, xy = binned_statistic_dd([x, y], values, statistic,
bins, range)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
return BinnedStatistic2dResult(medians, edges[0], edges[1], xy)
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as x.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
np.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# This code is based on np.histogramdd
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D * [None]
dedges = D * [None]
try:
M = len(bins)
if M != D:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = D * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(0), float))
smax = np.atleast_1d(np.array(sample.max(0), float))
else:
smin = np.zeros(D)
smax = np.zeros(D)
for i in np.arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in np.arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in np.arange(D):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in np.arange(D):
Ncount[i] = np.digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in np.arange(D):
# Rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal)
== np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
xy = np.zeros(N, int)
for i in np.arange(0, D - 1):
xy += Ncount[ni[i]] * nbin[ni[i + 1:]].prod()
xy += Ncount[ni[-1]]
result = np.empty(nbin.prod(), float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
a = flatcount.nonzero()
result[a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
flatsum2 = np.bincount(xy, values ** 2)
a = flatcount.nonzero()
result[a] = np.sqrt(flatsum2[a] / flatcount[a]
- (flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(xy, None)
a = np.arange(len(flatcount))
result[a] = flatcount
elif statistic == 'sum':
result.fill(0)
flatsum = np.bincount(xy, values)
a = np.arange(len(flatsum))
result[a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(xy):
result[i] = np.median(values[xy == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(xy):
result[i] = statistic(values[xy == i])
# Shape into a proper matrix
result = result.reshape(np.sort(nbin))
for i in np.arange(nbin.size):
j = ni.argsort()[i]
result = result.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D * [slice(1, -1)]
result = result[core]
if (result.shape != nbin - 2).any():
raise RuntimeError('Internal Shape Error')
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
return BinnedStatisticddResult(result, edges, xy)
| bsd-3-clause |
Eric89GXL/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 52 | 8004 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString
from docscrape import FunctionDoc
from docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
JosmanPS/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
zhushun0008/sms-tools | lectures/08-Sound-transformations/plots-code/sineModelTimeScale-functions.py | 24 | 2725 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time, math
from scipy.interpolate import interp1d
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import sineModel as SM
import stft as STFT
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/mridangam.wav')
x1 = x[:int(1.49*fs)]
w = np.hamming(801)
N = 2048
t = -90
minSineDur = .005
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
Ns = 512
H = Ns/4
sfreq, smag, sphase = SM.sineModelAnal(x1, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
timeScale = np.array([.01, .0, .03, .03, .335, .8, .355, .82, .671, 1.0, .691, 1.02, .858, 1.1, .878, 1.12, 1.185, 1.8, 1.205, 1.82, 1.49, 2.0])
L = sfreq[:,0].size # number of input frames
maxInTime = max(timeScale[::2]) # maximum value used as input times
maxOutTime = max(timeScale[1::2]) # maximum value used in output times
outL = int(L*maxOutTime/maxInTime) # number of output frames
inFrames = L*timeScale[::2]/maxInTime # input time values in frames
outFrames = outL*timeScale[1::2]/maxOutTime # output time values in frames
timeScalingEnv = interp1d(outFrames, inFrames, fill_value=0) # interpolation function
indexes = timeScalingEnv(np.arange(outL)) # generate frame indexes for the output
ysfreq = sfreq[round(indexes[0]),:] # first output frame
ysmag = smag[round(indexes[0]),:] # first output frame
for l in indexes[1:]: # generate frames for output sine tracks
ysfreq = np.vstack((ysfreq, sfreq[round(l),:]))
ysmag = np.vstack((ysmag, smag[round(l),:]))
mag1 = np.sum(10**(smag/20), axis=1)
mag2 = np.sum(10**(ysmag/20), axis=1)
mag1 = 20*np.log10(mag1)
mag2 = 20*np.log10(mag2)
plt.figure(1, figsize=(9, 7))
maxplotfreq = 4000.0
plt.subplot(3,1,1)
plt.plot(H*indexes/float(fs), H*np.arange(outL)/float(fs), color='k', lw=1.5)
plt.autoscale(tight=True)
plt.xlabel('input times')
plt.ylabel('output times')
plt.title('output scaling')
plt.subplot(3,1,2)
plt.plot(H*np.arange(mag1.size)/float(fs), mag1, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('input magnitude sines')
plt.subplot(3,1,3)
plt.plot(H*np.arange(mag2.size)/float(fs), mag2, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('output magnitude sines')
plt.tight_layout()
plt.savefig('sineModelTimeScale-functions.png')
plt.show()
| agpl-3.0 |
themrmax/scikit-learn | examples/decomposition/plot_pca_iris.py | 49 | 1511 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral,
edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
rishikksh20/scikit-learn | examples/plot_digits_pipe.py | 65 | 1652 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
edickie/ciftify | tests/integration/ciftify_integration_tests.py | 1 | 46187 | #!/usr/bin/env python3
"""
Runs all posible ciftify functions on some test data
Usage:
ciftify_intergration_tests [options] <testing_dir> <fixtures-dir>
Arguments:
<testing_dir> PATH The directory to run the tests inside
<fixtures-dir> PATH The directory with the csv to test the results against
Options:
--outputs-dir PATH The directory to write the gerated outputs into
-h, --help Prints this message
DETAILS
If '--outputs-dir' is not given, outputs will be written to
the following <testing_dir>/run-YYYY-MM-DD
Written by Erin W Dickie
"""
# coding: utf-8
# In[1]:
import ciftify
from ciftify.utils import run
import os
import pandas as pd
import datetime
import logging
import glob
from docopt import docopt
logger = logging.getLogger('ciftify')
logger.setLevel(logging.DEBUG)
# In[75]:
arguments = docopt(__doc__)
#working_dir = '/home/edickie/Documents/ciftify_tests/'
working_dir = arguments['<testing_dir>']
fixtures_dir = arguments['<fixtures-dir>']
work_from = arguments['--outputs-dir']
src_data_dir= os.path.join(working_dir,'src_data')
if work_from:
new_outputs = work_from
else:
new_outputs= os.path.join(working_dir,'run_{}'.format(datetime.date.today()))
logger = logging.getLogger('ciftify')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if not os.path.exists(new_outputs):
run(['mkdir','-p', new_outputs])
# Get settings, and add an extra handler for the subject log
fh = logging.FileHandler(os.path.join(new_outputs, 'ciftify_tests.log'))
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info(ciftify.utils.section_header('Starting'))
## getting the data
freesurfer_webtgz = 'https://s3.amazonaws.com/openneuro/ds000030/ds000030_R1.0.4/compressed/ds000030_R1.0.4_derivatives_freesurfer_sub50004-50008.zip'
func_webtgz = 'https://s3.amazonaws.com/openneuro/ds000030/ds000030_R1.0.4/compressed/ds000030_R1.0.4_derivatives_sub50004-50008.zip'
subids = ['sub-50005','sub-50007']
#subids = ['sub-50005','sub-50006']
fs_subjects_dir = os.path.join(src_data_dir, 'ds000030_R1.0.4',
'derivatives','freesurfer')
hcp_data_dir = os.path.join(new_outputs, 'hcp')
if not os.path.exists(hcp_data_dir):
run(['mkdir','-p',hcp_data_dir])
# In[4]:
def download_file(web_address, local_filename):
'''download file if it does not exist'''
if not os.path.isfile(local_filename):
run(['wget', web_address, '-O', local_filename])
if not os.path.getsize(local_filename) > 0:
os.remove(local_filename)
# In[5]:
def get_recon_all_outputs(hcp_data_dir, subid):
recon_all_out = folder_contents_list(os.path.join(hcp_data_dir, subid))
recon_all_out = [x.replace(subid, 'subid') for x in recon_all_out]
return(recon_all_out)
# In[6]:
def folder_contents_list(path):
'''returns a list of folder contents'''
folder_contents = glob.glob(os.path.join(path, '**'), recursive = True)
folder_contents = [x.replace('{}/'.format(path),'') for x in folder_contents ]
folder_contents = folder_contents[1:] ## the first element is the path name
return(folder_contents)
# In[7]:
def seed_corr_default_out(func, seed):
functype, funcbase = ciftify.niio.determine_filetype(func)
_, seedbase = ciftify.niio.determine_filetype(seed)
outputdir = os.path.dirname(func)
outbase = '{}_{}'.format(funcbase, seedbase)
outputname = os.path.join(outputdir, outbase)
if functype == "nifti": outputname = '{}.nii.gz'.format(outputname)
if functype == "cifti": outputname = '{}.dscalar.nii'.format(outputname)
return(outputname)
def run_vis_map(result_map, result_prefix, result_type):
run(['cifti_vis_map', '{}-snaps'.format(result_type), '--hcp-data-dir', hcp_data_dir,
result_map, subid, result_prefix])
def run_seedcorr_peaktable(result_map):
run(['ciftify_peaktable',
'--min-threshold', '-0.5',
'--max-threshold', '0.5',
'--no-cluster-dlabel',
result_map])
def run_seed_corr_fmri_test(func_cifti, hcp_data_dir, roi_dir):
''' runs one of the seed corrs and then peak table to get a csv to test for each dtseries'''
subid = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(func_cifti)))))
atlas_vol = os.path.join(hcp_data_dir, subid, 'MNINonLinear', 'wmparc.nii.gz')
struct = 'RIGHT-PUTAMEN'
putamen_vol_seed_mask = os.path.join(roi_dir, '{}_{}_vol.nii.gz'.format(subid, struct))
if not os.path.exists(putamen_vol_seed_mask):
run(['wb_command', '-volume-label-to-roi', atlas_vol, putamen_vol_seed_mask, '-name', struct])
run(['ciftify_seed_corr', func_cifti, putamen_vol_seed_mask])
run_seedcorr_peaktable(seed_corr_default_out(func_cifti, putamen_vol_seed_mask))
logger.info(ciftify.utils.section_header('Getting ABIDE and running recon-all'))
# # Getting ABIDE and running recon-all
# In[8]:
abide_amazon_addy = 'https://s3.amazonaws.com/fcp-indi/data/Projects/ABIDE_Initiative/Outputs/freesurfer/5.1'
subid = 'NYU_0050954'
abide_freesurfer = os.path.join(src_data_dir, 'abide', 'freesurfer')
fs_subdir = os.path.join(abide_freesurfer, subid)
if not os.path.exists(fs_subdir):
run(['mkdir', '-p', fs_subdir])
for subdir in ['mri', 'surf','label','scripts']:
localdir = os.path.join(fs_subdir, subdir)
if not os.path.exists(localdir):
run(['mkdir', '-p', localdir])
for filename in ['T1.mgz', 'aparc+aseg.mgz', 'aparc.a2009s+aseg.mgz', 'wmparc.mgz', 'brain.finalsurfs.mgz']:
download_file(os.path.join(abide_amazon_addy, subid, 'mri', filename.replace('+','%20')),
os.path.join(fs_subdir, 'mri', filename))
for surface in ['pial', 'white', 'sphere.reg', 'sphere', 'curv', 'sulc', 'thickness']:
for hemi in ['lh', 'rh']:
download_file(os.path.join(abide_amazon_addy, subid, 'surf', '{}.{}'.format(hemi,surface)),
os.path.join(fs_subdir, 'surf', '{}.{}'.format(hemi,surface)))
for labelname in ['aparc', 'aparc.a2009s', 'BA']:
for hemi in ['lh', 'rh']:
download_file(os.path.join(abide_amazon_addy, subid, 'label', '{}.{}.annot'.format(hemi,labelname)),
os.path.join(fs_subdir, 'label', '{}.{}.annot'.format(hemi,labelname)))
for script in ['recon-all.done', 'build-stamp.txt']:
download_file(os.path.join(abide_amazon_addy, subid, 'scripts', script),
os.path.join(fs_subdir, 'scripts', script))
# In[ ]:
run(['ciftify_recon_all', '--hcp-data-dir', hcp_data_dir,
'--fs-subjects-dir', abide_freesurfer,
'NYU_0050954'])
# In[ ]:
run(['cifti_vis_recon_all', 'snaps', '--hcp-data-dir',hcp_data_dir, 'NYU_0050954'])
run(['cifti_vis_recon_all', 'index', '--hcp-data-dir', hcp_data_dir])
logger.info(ciftify.utils.section_header('Download ABIDE PCP data for ciftify_vol_result tests'))
# ## Download ABIDE PCP data for ciftify_vol_result tests
# In[9]:
def download_vmhc(subid):
amazon_addy = 'https://s3.amazonaws.com/fcp-indi/data/Projects/ABIDE_Initiative/Outputs/cpac/filt_global/vmhc/{}_vmhc.nii.gz'.format(subid)
sub_vmhc = os.path.join(src_vmhc, '{}_vmhc.nii.gz'.format(subid))
download_file(amazon_addy, sub_vmhc)
src_vmhc = os.path.join(src_data_dir, 'abide', 'vmhc')
if not os.path.exists(src_vmhc):
run(['mkdir', src_vmhc])
subjects=['NYU_0050954','NYU_0050955']
for subid in subjects:
download_vmhc(subid)
# In[10]:
subject = 'NYU_0050954'
run(['cifti_vis_map', 'nifti-snaps',
'--hcp-data-dir', hcp_data_dir,
'--qcdir', os.path.join(hcp_data_dir, 'abide_vmhc_vis'),
'--resample-nifti', '--colour-palette', 'fidl',
os.path.join(src_vmhc, '{}_vmhc.nii.gz'.format(subject)), subject, '{}_vmhc'.format(subject)])
# In[11]:
subject = 'NYU_0050954'
run(['cifti_vis_map', 'nifti-snaps',
'--hcp-data-dir', hcp_data_dir,
'--qcdir', os.path.join(hcp_data_dir, 'abide_vmhc_vis'),
'--resample-nifti',
os.path.join(src_vmhc, '{}_vmhc.nii.gz'.format(subject)), subject, '{}_vmhc_dcol'.format(subject)])
# In[12]:
subject = 'NYU_0050954'
run(['cifti_vis_map', 'nifti-snaps',
'--qcdir', os.path.join(hcp_data_dir, 'abide_vmhc_vis'),
'--resample-nifti', '--colour-palette', 'fidl',
os.path.join(src_vmhc, '{}_vmhc.nii.gz'.format(subject)), 'HCP_S1200_GroupAvg', '{}_vmhc'.format(subject)])
# In[13]:
subject = 'NYU_0050955'
run(['cifti_vis_map', 'nifti-snaps',
'--qcdir', os.path.join(hcp_data_dir, 'abide_vmhc_vis'),
'--resample-nifti',
os.path.join(src_vmhc, '{}_vmhc.nii.gz'.format(subject)), 'HCP_S1200_GroupAvg', '{}_vmhc'.format(subject)])
# In[14]:
run(['cifti_vis_map', 'index',
'--hcp-data-dir', '/tmp',
'--qcdir', os.path.join(hcp_data_dir, 'abide_vmhc_vis')])
logger.info(ciftify.utils.section_header('ciftify_vol_result with HCP data..if data in src'))
# # Testing ciftify_vol_result with HCP data..if data in src
# In[15]:
HCP_subid = '100307'
HCP_src_dir = os.path.join(src_data_dir, 'HCP')
label_vol = os.path.join(HCP_src_dir, HCP_subid, 'MNINonLinear', 'aparc+aseg.nii.gz')
scalar_vol = os.path.join(HCP_src_dir, HCP_subid, 'MNINonLinear', 'T2w_restore.nii.gz')
run_HCP_tests = True if os.path.exists(HCP_src_dir) else False
if run_HCP_tests:
HCP_out_dir = os.path.join(new_outputs, 'HCP')
run(['mkdir', '-p', HCP_out_dir])
run(['ciftify_vol_result', '--HCP-Pipelines', '--resample-nifti',
'--hcp-data-dir', HCP_src_dir,
'--integer-labels', HCP_subid, label_vol,
os.path.join(HCP_out_dir, '{}.aparc+aseg.32k_fs_LR.dscalar.nii'.format(HCP_subid))])
# In[16]:
if run_HCP_tests:
run(['ciftify_vol_result','--HCP-Pipelines', '--HCP-MSMAll','--resample-nifti',
'--hcp-data-dir', HCP_src_dir,
'--integer-labels', HCP_subid, label_vol,
os.path.join(HCP_out_dir, '{}.aparc+aseg_MSMAll.32k_fs_LR.dscalar.nii'.format(HCP_subid))])
# In[17]:
if run_HCP_tests:
run(['ciftify_vol_result','--HCP-Pipelines', '--resample-nifti',
'--hcp-data-dir', HCP_src_dir, HCP_subid,
os.path.join(HCP_src_dir, HCP_subid, 'MNINonLinear', 'T2w_restore.nii.gz'),
os.path.join(HCP_out_dir, '{}.T2w_restore.32k_fs_LR.dscalar.nii'.format(HCP_subid))])
# In[18]:
if run_HCP_tests:
run(['ciftify_vol_result','--HCP-Pipelines', '--HCP-MSMAll',
'--hcp-data-dir', HCP_src_dir, HCP_subid,
os.path.join(HCP_src_dir, HCP_subid, 'MNINonLinear', 'T2w_restore.2.nii.gz'),
os.path.join(HCP_out_dir, '{}.T2w_restore_MSMAll.32k_fs_LR.dscalar.nii'.format(HCP_subid))])
logger.info(ciftify.utils.section_header('Download the main dataset'))
# # Download the main dataset
# In[19]:
subids = ['sub-50005','sub-50007']
if not os.path.exists(src_data_dir):
run(['mkdir','-p',src_data_dir])
for subid in subids:
sub_fs_path = os.path.join('ds000030_R1.0.4','derivatives','freesurfer', subid)
if not os.path.exists(os.path.join(src_data_dir, sub_fs_path)):
if not os.path.exists(os.path.join(src_data_dir,
os.path.basename(freesurfer_webtgz))):
run(['wget', '-P', src_data_dir, freesurfer_webtgz])
run(['unzip',
os.path.join(src_data_dir,
os.path.basename(freesurfer_webtgz)),
os.path.join(sub_fs_path,'*'),
'-d', src_data_dir])
for subid in subids:
sub_file = os.path.join('ds000030_R1.0.4','derivatives',
'fmriprep', subid,'func',
'{}_task-rest_bold_space-T1w_preproc.nii.gz'.format(subid))
if not os.path.exists(os.path.join(src_data_dir,sub_file)):
if not os.path.exists(os.path.join(src_data_dir,
os.path.basename(func_webtgz))):
run(['wget', '-P', src_data_dir, func_webtgz])
run(['unzip',
os.path.join(src_data_dir,
os.path.basename(func_webtgz)),
sub_file,
'-d', src_data_dir])
for subid in subids:
sub_file = os.path.join('ds000030_R1.0.4','derivatives',
'fmriprep', subid,'func',
'{}_task-rest_bold_space-MNI152NLin2009cAsym_preproc.nii.gz'.format(subid))
if not os.path.exists(os.path.join(src_data_dir,sub_file)):
if not os.path.exists(os.path.join(src_data_dir,
os.path.basename(func_webtgz))):
run(['wget', '-P', src_data_dir, func_webtgz])
run(['unzip',
os.path.join(src_data_dir,
os.path.basename(func_webtgz)),
sub_file,
'-d', src_data_dir])
# In[ ]:
run(['ciftify_recon_all', '--resample-to-T1w32k', '--hcp-data-dir', hcp_data_dir,
'--fs-subjects-dir', fs_subjects_dir,
subids[0]])
# In[ ]:
run(['ciftify_recon_all', '--hcp-data-dir', hcp_data_dir,
'--fs-subjects-dir', fs_subjects_dir,
subids[1]])
# In[ ]:
for subid in subids:
run(['cifti_vis_recon_all', 'snaps', '--hcp-data-dir',hcp_data_dir, subid])
run(['cifti_vis_recon_all', 'index', '--hcp-data-dir', hcp_data_dir])
# In[ ]:
# for subid in ['sub-50004','sub-50006', 'sub-50008']:
# run(['ciftify_recon_all', '--hcp-data-dir', hcp_data_dir,
# '--fs-subjects-dir', fs_subjects_dir,
# subid])
# In[ ]:
# for subid in ['sub-50004','sub-50006', 'sub-50008']:
# run(['cifti_vis_recon_all', 'snaps', '--hcp-data-dir',hcp_data_dir, subid])
# run(['cifti_vis_recon_all', 'index', '--hcp-data-dir', hcp_data_dir])
logger.info(ciftify.utils.section_header('Running ciftify_subject_fmri'))
# # Running ciftify_subject_fmri
# In[ ]:
subid=subids[0]
native_func = os.path.join(src_data_dir,'ds000030_R1.0.4','derivatives',
'fmriprep', subid,'func',
'{}_task-rest_bold_space-native_preproc.nii.gz'.format(subid))
mat_file = os.path.join(src_data_dir, 'ds000030_R1.0.4','derivatives',
'fmriprep', subid,'func',
'{}_task-rest_bold_T1_to_EPI.mat'.format(subid))
with open(mat_file, "w") as text_file:
text_file.write('''1.03 -0.015 0.0025 -15.0
0.014 1.01 -0.005 -11.9
-0.007 0.01 0.99 2
0 0 0 1
''')
t1_func = os.path.join(src_data_dir,'ds000030_R1.0.4','derivatives',
'fmriprep', subid,'func',
'{}_task-rest_bold_space-T1w_preproc.nii.gz'.format(subid))
if not os.path.exists(native_func):
run(['flirt', '-in', t1_func, '-ref', t1_func,
'-out', native_func, '-init', mat_file, '-applyxfm'])
run(['ciftify_subject_fmri', '--SmoothingFWHM', '12',
'--hcp-data-dir', hcp_data_dir,
native_func, subid, 'rest_test1'])
# In[ ]:
subid=subids[0]
run(['cifti_vis_fmri', 'snaps', '--SmoothingFWHM', '12', '--hcp-data-dir',hcp_data_dir, 'rest_test1', subid])
# In[ ]:
subid=subids[0]
native_func_mean = os.path.join(src_data_dir,'ds000030_R1.0.4','derivatives',
'fmriprep', subid,'func',
'{}_task-rest_bold_space-native_mean.nii.gz'.format(subid))
run(['fslmaths', native_func, '-Tmean', native_func_mean])
run(['ciftify_subject_fmri', '--hcp-data-dir', hcp_data_dir,
'--FLIRT-template', native_func_mean, native_func, subid, 'rest_test2'])
# In[ ]:
subid=subids[0]
run(['cifti_vis_fmri', 'snaps', '--smooth-conn', '12', '--hcp-data-dir',hcp_data_dir, 'rest_test2', subid])
# In[ ]:
subid = subids[1]
run(['ciftify_subject_fmri', '--SmoothingFWHM', '8', '--FLIRT-dof', '30',
'--hcp-data-dir', hcp_data_dir,
'--OutputSurfDiagnostics',
os.path.join(src_data_dir,'ds000030_R1.0.4','derivatives',
'fmriprep', subid,'func',
'{}_task-rest_bold_space-T1w_preproc.nii.gz'.format(subid)),
subid, 'rest_test_dof30'])
# In[ ]:
subid = subids[1]
run(['cifti_vis_fmri', 'snaps', '--hcp-data-dir',hcp_data_dir, 'rest_test_dof30', subid])
# In[ ]:
subid = subids[1]
run(['ciftify_subject_fmri', '--already-in-MNI', '--SmoothingFWHM', '8',
'--hcp-data-dir', hcp_data_dir,
os.path.join(src_data_dir,'ds000030_R1.0.4','derivatives',
'fmriprep', subid,'func',
'{}_task-rest_bold_space-MNI152NLin2009cAsym_preproc.nii.gz'.format(subid)),
subid, 'rest_bad_transform'])
# In[ ]:
subid = subids[1]
run(['cifti_vis_fmri', 'snaps', '--hcp-data-dir',hcp_data_dir, 'rest_bad_transform', subid])
# In[ ]:
run(['cifti_vis_fmri', 'index', '--hcp-data-dir', hcp_data_dir])
# In[21]:
from glob import glob
dtseries_files = glob(os.path.join(hcp_data_dir,'*',
'MNINonLinear', 'Results',
'*', '*Atlas_s*.dtseries.nii'))
run(['cifti_vis_RSN', 'cifti-snaps',
'--hcp-data-dir',hcp_data_dir,
'--colour-palette', 'PSYCH-NO-NONE',
dtseries_files[0],
os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(dtseries_files[0])))))])
for dtseries in dtseries_files[1:]:
run(['cifti_vis_RSN', 'cifti-snaps',
'--hcp-data-dir',hcp_data_dir, dtseries,
os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(dtseries)))))])
run(['cifti_vis_RSN', 'index',
'--hcp-data-dir', hcp_data_dir])
# In[22]:
run(['cifti_vis_RSN', 'index', '--hcp-data-dir', hcp_data_dir])
# In[117]:
roi_dir = os.path.join(new_outputs, 'rois')
if not os.path.exists(roi_dir):
run(['mkdir', roi_dir])
for dtseries in dtseries_files:
run_seed_corr_fmri_test(dtseries, hcp_data_dir, roi_dir)
# In[24]:
run(['cifti_vis_RSN', 'nifti-snaps',
'--hcp-data-dir',hcp_data_dir,
'--qcdir', os.path.join(hcp_data_dir, 'RSN_from_nii'),
'--colour-palette', 'PSYCH-NO-NONE',
os.path.join(src_data_dir,'ds000030_R1.0.4','derivatives',
'fmriprep', subids[0],'func',
'{}_task-rest_bold_space-MNI152NLin2009cAsym_preproc.nii.gz'.format(subids[0])),
subids[0]])
run(['cifti_vis_RSN', 'nifti-snaps',
'--hcp-data-dir',hcp_data_dir,
'--qcdir', os.path.join(hcp_data_dir, 'RSN_from_nii'),
os.path.join(src_data_dir,'ds000030_R1.0.4','derivatives',
'fmriprep', subids[1],'func',
'{}_task-rest_bold_space-MNI152NLin2009cAsym_preproc.nii.gz'.format(subids[1])),
subids[1]])
run(['cifti_vis_RSN', 'index',
'--qcdir', os.path.join(hcp_data_dir, 'RSN_from_nii'),
'--hcp-data-dir', hcp_data_dir])
# In[116]:
groupmask_cmd = ['ciftify_groupmask', os.path.join(hcp_data_dir, 'groupmask.dscalar.nii')]
groupmask_cmd.extend(dtseries_files)
run(groupmask_cmd)
logger.info(ciftify.utils.section_header('Running PINT and related functions'))
# # Running PINT and related functions
# In[26]:
subid = subids[0]
run(['mkdir', '-p', os.path.join(new_outputs, 'PINT', subid)])
run(['ciftify_PINT_vertices', '--pcorr',
os.path.join(hcp_data_dir, subid,'MNINonLinear', 'Results',
'rest_test1', 'rest_test1_Atlas_s12.dtseries.nii'),
os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.L.midthickness.32k_fs_LR.surf.gii'.format(subid)),
os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.R.midthickness.32k_fs_LR.surf.gii'.format(subid)),
os.path.join(ciftify.config.find_ciftify_global(), 'PINT', 'Yeo7_2011_80verts.csv'),
os.path.join(new_outputs, 'PINT', subid, subid)])
# In[27]:
subid = subids[0]
run(['cifti_vis_PINT', 'snaps', '--hcp-data-dir', hcp_data_dir,
os.path.join(hcp_data_dir, subid,'MNINonLinear', 'Results',
'rest_test1', 'rest_test1_Atlas_s12.dtseries.nii'),
subid,
os.path.join(new_outputs, 'PINT', subid, '{}_summary.csv'.format(subid))])
# In[28]:
subid = subids[1]
run(['mkdir', '-p', os.path.join(new_outputs, 'PINT', subid)])
run(['ciftify_PINT_vertices',
os.path.join(hcp_data_dir, subid,'MNINonLinear', 'Results',
'rest_test_dof30', 'rest_test_dof30_Atlas_s8.dtseries.nii'),
os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.L.midthickness.32k_fs_LR.surf.gii'.format(subid)),
os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.R.midthickness.32k_fs_LR.surf.gii'.format(subid)),
os.path.join(ciftify.config.find_ciftify_global(), 'PINT', 'Yeo7_2011_80verts.csv'),
os.path.join(new_outputs, 'PINT', subid, subid)])
# In[29]:
run(['cifti_vis_PINT', 'snaps', '--hcp-data-dir', hcp_data_dir,
os.path.join(hcp_data_dir, subid,'MNINonLinear', 'Results',
'rest_test_dof30', 'rest_test_dof30_Atlas_s8.dtseries.nii'),
subid,
os.path.join(new_outputs, 'PINT', subid, '{}_summary.csv'.format(subid))])
# In[120]:
subid = subids[1]
run(['mkdir', '-p', os.path.join(new_outputs, 'PINT', subid)])
run(['ciftify_PINT_vertices', '--outputall',
os.path.join(hcp_data_dir, subid,'MNINonLinear', 'Results',
'rest_test_dof30', 'rest_test_dof30_Atlas_s8.dtseries.nii'),
os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.L.midthickness.32k_fs_LR.surf.gii'.format(subid)),
os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.R.midthickness.32k_fs_LR.surf.gii'.format(subid)),
os.path.join(ciftify.config.find_ciftify_global(), 'PINT', 'Yeo7_2011_80verts.csv'),
os.path.join(new_outputs, 'PINT', subid, '{}_all'.format(subid))])
# In[121]:
run(['cifti_vis_PINT', 'snaps', '--hcp-data-dir', hcp_data_dir,
os.path.join(hcp_data_dir, subid,'MNINonLinear', 'Results',
'rest_test_dof30', 'rest_test_dof30_Atlas_s8.dtseries.nii'),
subid,
os.path.join(new_outputs, 'PINT', subid, '{}_all_summary.csv'.format(subid))])
# In[122]:
run(['cifti_vis_PINT', 'index', '--hcp-data-dir', hcp_data_dir])
# In[123]:
summary_files = glob(os.path.join(new_outputs, 'PINT', '*','*_summary.csv'))
concat_cmd = ['ciftify_postPINT1_concat',
os.path.join(new_outputs, 'PINT', 'concatenated.csv')]
concat_cmd.extend(summary_files)
run(concat_cmd)
# In[124]:
run(['ciftify_postPINT2_sub2sub', os.path.join(new_outputs, 'PINT', 'concatenated.csv'),
os.path.join(new_outputs, 'PINT', 'ivertex_distances.csv')])
# In[125]:
run(['ciftify_postPINT2_sub2sub', '--roiidx', '14',
os.path.join(new_outputs, 'PINT', 'concatenated.csv'),
os.path.join(new_outputs, 'PINT', 'ivertex_distances_roiidx14.csv')])
logger.info(ciftify.utils.section_header('Running ciftify_surface_rois'))
# # Running ciftify_surface_rois
# In[126]:
vertices1_csv = os.path.join(src_data_dir, 'vertices1.csv')
with open(vertices1_csv, "w") as text_file:
text_file.write('''hemi,vertex
L,11801
L,26245
L,26235
L,26257
L,13356
L,289
L,13336
L,13337
L,26269
L,13323
L,26204
L,26214
L,13326
L,13085
L,13310
L,13281
L,13394
L,13395
L,26263
L,26265
L,13343
L,77
L,13273
L,13342
L,26271
L,11804
L,13322
L,13369
L,13353
L,26268
L,26201
L,26269
L,68
L,13391
''')
subid = subids[0]
run(['mkdir', '-p', os.path.join(new_outputs, 'rois')])
run(['ciftify_surface_rois', vertices1_csv, '10', '--gaussian',
os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.L.midthickness.32k_fs_LR.surf.gii'.format(subid)),
os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.R.midthickness.32k_fs_LR.surf.gii'.format(subid)),
os.path.join(new_outputs, 'rois', 'gaussian_roi.dscalar.nii')])
# In[37]:
subid = subids[0]
run(['cifti_vis_map', 'cifti-snaps', '--hcp-data-dir',
hcp_data_dir,
os.path.join(new_outputs, 'rois', 'gaussian_roi.dscalar.nii'),
subid,
'gaussian_roi'])
# In[38]:
run(['ciftify_peaktable', '--max-threshold', '0.05',
'--left-surface', os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.L.midthickness.32k_fs_LR.surf.gii'.format(subid)),
'--right-surface', os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.R.midthickness.32k_fs_LR.surf.gii'.format(subid)),
os.path.join(new_outputs, 'rois', 'gaussian_roi.dscalar.nii')])
# In[39]:
vertices2_csv = os.path.join(src_data_dir, 'vertices1.csv')
with open(vertices2_csv, "w") as text_file:
text_file.write('''hemi,vertex
R,2379
R,2423
R,2423
R,2629
R,29290
R,29290
R,1794
R,29199
R,1788
R,2380
R,2288
R,29320
R,29274
R,29272
R,29331
R,29356
R,2510
R,29345
R,2506
R,2506
R,1790
R,29305
R,2630
R,2551
R,2334
R,2334
R,29306
R,2551
R,2422
R,29256
R,2468
R,29332
R,2425
R,29356
''')
run(['mkdir', '-p', os.path.join(new_outputs, 'rois')])
run(['ciftify_surface_rois', vertices1_csv, '6', '--probmap',
os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.L.midthickness.32k_fs_LR.surf.gii'.format(subid)),
os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.R.midthickness.32k_fs_LR.surf.gii'.format(subid)),
os.path.join(new_outputs, 'rois', 'probmap_roi.dscalar.nii')])
# In[40]:
subid = subids[0]
run(['cifti_vis_map', 'cifti-snaps', '--hcp-data-dir',
hcp_data_dir,
os.path.join(new_outputs, 'rois', 'probmap_roi.dscalar.nii'),
subid,
'probmap_roi'])
# In[41]:
run(['ciftify_peaktable', '--max-threshold', '0.05',
'--left-surface', os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.L.midthickness.32k_fs_LR.surf.gii'.format(subid)),
'--right-surface', os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.R.midthickness.32k_fs_LR.surf.gii'.format(subid)),
os.path.join(new_outputs, 'rois', 'probmap_roi.dscalar.nii')])
# In[42]:
for hemi in ['L','R']:
run(['wb_command', '-surface-vertex-areas',
os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.{}.midthickness.32k_fs_LR.surf.gii'.format(subid, hemi)),
os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.{}.midthickness_va.32k_fs_LR.shape.gii'.format(subid, hemi))])
run(['ciftify_peaktable',
'--outputbase', os.path.join(new_outputs, 'rois', 'probmap_roi_withva'),
'--max-threshold', '0.05',
'--left-surface', os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.L.midthickness.32k_fs_LR.surf.gii'.format(subid)),
'--right-surface', os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.R.midthickness.32k_fs_LR.surf.gii'.format(subid)),
'--left-surf-area', os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.L.midthickness_va.32k_fs_LR.shape.gii'.format(subid)),
'--right-surf-area', os.path.join(hcp_data_dir, subid,'MNINonLinear','fsaverage_LR32k',
'{}.R.midthickness_va.32k_fs_LR.shape.gii'.format(subid)),
os.path.join(new_outputs, 'rois', 'probmap_roi.dscalar.nii')])
# In[43]:
run(['ciftify_surface_rois',
os.path.join(ciftify.config.find_ciftify_global(), 'PINT', 'Yeo7_2011_80verts.csv'),
'6', '--vertex-col', 'tvertex', '--labels-col', 'NETWORK', '--overlap-logic', 'EXCLUDE',
os.path.join(ciftify.config.find_HCP_S1200_GroupAvg(),
'S1200.L.midthickness_MSMAll.32k_fs_LR.surf.gii'),
os.path.join(ciftify.config.find_HCP_S1200_GroupAvg(),
'S1200.R.midthickness_MSMAll.32k_fs_LR.surf.gii'),
os.path.join(new_outputs, 'rois', 'tvertex.dscalar.nii')])
# In[44]:
subid = subids[0]
run(['cifti_vis_map', 'cifti-snaps', '--hcp-data-dir',
hcp_data_dir,
os.path.join(new_outputs, 'rois', 'tvertex.dscalar.nii'),
subid,
'tvertex'])
logger.info(ciftify.utils.section_header('Running ciftify_seed_corr'))
# # Running ciftify_seed_corr
subid = subids[0]
smoothing = 12
func_vol = os.path.join(hcp_data_dir, subid, 'MNINonLinear', 'Results', 'rest_test1', 'rest_test1.nii.gz')
func_cifti_sm0 = os.path.join(hcp_data_dir, subid, 'MNINonLinear', 'Results', 'rest_test1', 'rest_test1_Atlas_s0.dtseries.nii')
func_cifti_smoothed = os.path.join(hcp_data_dir, subid, 'MNINonLinear', 'Results', 'rest_test1', 'rest_test1_Atlas_s{}.dtseries.nii'.format(smoothing))
atlas_vol = os.path.join(hcp_data_dir, subid, 'MNINonLinear', 'wmparc.nii.gz')
seed_corr_dir = os.path.join(new_outputs, 'ciftify_seed_corr')
if not os.path.exists(seed_corr_dir):
run(['mkdir', '-p', seed_corr_dir])
cifti_mask = os.path.join(seed_corr_dir, '{}_func_mask.dscalar.nii'.format(subid))
run(['wb_command', '-cifti-math', "'(x > 0)'", cifti_mask,
'-var', 'x', func_cifti_sm0, '-select', '1', '1'])
run(['extract_nuisance_regressors',
os.path.join(hcp_data_dir, subid, 'MNINonLinear'),
func_vol])
# In[ ]:
hipp_struct = 'LEFT-HIPPOCAMPUS'
hipp_vol_seed_mask = os.path.join(seed_corr_dir, '{}_{}_vol.nii.gz'.format(subid, hipp_struct))
run(['wb_command', '-volume-label-to-roi', atlas_vol, hipp_vol_seed_mask, '-name', hipp_struct])
# In[ ]:
run(['ciftify_seed_corr', '--fisher-z', func_vol, hipp_vol_seed_mask])
run(['cifti_vis_map', 'nifti-snaps', '--hcp-data-dir',
hcp_data_dir,
seed_corr_default_out(func_vol, hipp_vol_seed_mask),
subid,
'{}_{}_niftitoniftiZ_unmasked'.format(subid, hipp_struct)])
# In[ ]:
run(['ciftify_seed_corr', '--fisher-z',
'--outputname', os.path.join(seed_corr_dir, '{}_{}_niftitoniftiZ_masked.nii.gz'.format(subid, hipp_struct)),
'--mask', os.path.join(hcp_data_dir, subid, 'MNINonLinear', 'brainmask_fs.nii.gz'),
func_vol, hipp_vol_seed_mask])
run(['cifti_vis_map', 'nifti-snaps', '--hcp-data-dir',
hcp_data_dir,
os.path.join(seed_corr_dir, '{}_{}_niftitoniftiZ_masked.nii.gz'.format(subid, hipp_struct)),
subid,
'{}_{}_niftitonifti_masked'.format(subid, hipp_struct)])
# In[ ]:
run(['ciftify_seed_corr', '--fisher-z', func_cifti_smoothed, hipp_vol_seed_mask])
run(['cifti_vis_map', 'cifti-snaps', '--hcp-data-dir',
hcp_data_dir,
seed_corr_default_out(func_cifti_smoothed, hipp_vol_seed_mask),
subid,
'{}_{}_niftitociftiZ_unmasked'.format(subid, hipp_struct)])
run_seedcorr_peaktable(seed_corr_default_out(func_cifti_smoothed, hipp_vol_seed_mask))
# In[ ]:
run(['ciftify_seed_corr', '--fisher-z',
'--outputname', os.path.join(seed_corr_dir, '{}_{}_niftitociftiZ_masked.dscalar.nii'.format(subid, hipp_struct)),
'--mask', cifti_mask,
func_cifti_smoothed, hipp_vol_seed_mask])
subid = subids[0]
run(['cifti_vis_map', 'cifti-snaps', '--hcp-data-dir', hcp_data_dir,
os.path.join(seed_corr_dir, '{}_{}_niftitociftiZ_masked.dscalar.nii'.format(subid, hipp_struct)),
subid,
'{}_{}_niftitociftiZ_masked'.format(subid, hipp_struct)])
run_seedcorr_peaktable(os.path.join(seed_corr_dir, '{}_{}_niftitociftiZ_masked.dscalar.nii'.format(subid, hipp_struct)))
# In[ ]:
struct = 'RIGHT-PUTAMEN'
putamen_vol_seed_mask = os.path.join(seed_corr_dir, '{}_{}_vol.nii.gz'.format(subid, struct))
run(['wb_command', '-volume-label-to-roi', atlas_vol, putamen_vol_seed_mask, '-name', struct])
# In[ ]:
run(['ciftify_seed_corr', func_vol, putamen_vol_seed_mask])
run(['cifti_vis_map', 'nifti-snaps', '--hcp-data-dir',
hcp_data_dir,
seed_corr_default_out(func_vol, putamen_vol_seed_mask),
subid,
'{}_{}_niftitonifti_unmasked'.format(subid, struct)])
# In[ ]:
run(['ciftify_seed_corr',
'--outputname', os.path.join(seed_corr_dir, '{}_{}_niftitonifti_masked.nii.gz'.format(subid, struct)),
'--mask', os.path.join(hcp_data_dir, subid, 'MNINonLinear', 'brainmask_fs.nii.gz'),
func_vol, putamen_vol_seed_mask])
run(['cifti_vis_map', 'nifti-snaps', '--hcp-data-dir',
hcp_data_dir,
os.path.join(seed_corr_dir, '{}_{}_niftitonifti_masked.nii.gz'.format(subid, struct)),
subid,
'{}_{}_niftitonifti_masked'.format(subid, struct)])
# In[ ]:
run(['ciftify_seed_corr', func_cifti_smoothed, putamen_vol_seed_mask])
run(['cifti_vis_map', 'cifti-snaps', '--hcp-data-dir',
hcp_data_dir,
seed_corr_default_out(func_cifti_smoothed, putamen_vol_seed_mask),
subid,
'{}_{}_niftitocifti_unmasked'.format(subid, struct)])
run_seedcorr_peaktable(seed_corr_default_out(func_cifti_smoothed, putamen_vol_seed_mask))
# In[ ]:
run(['ciftify_seed_corr',
'--outputname', os.path.join(seed_corr_dir, '{}_{}_niftitocifti_masked.dscalar.nii'.format(subid, struct)),
'--mask', cifti_mask,
func_cifti_smoothed, putamen_vol_seed_mask])
subid = subids[0]
run(['cifti_vis_map', 'cifti-snaps', '--hcp-data-dir', hcp_data_dir,
os.path.join(seed_corr_dir, '{}_{}_niftitocifti_masked.dscalar.nii'.format(subid, struct)),
subid,
'{}_{}_niftitocifti_masked'.format(subid, struct)])
run_seedcorr_peaktable(os.path.join(seed_corr_dir, '{}_{}_niftitocifti_masked.dscalar.nii'.format(subid, struct)))
# In[ ]:
func = func_cifti_smoothed
seed = os.path.join(seed_corr_dir, '{}_{}_cifti.dscalar.nii'.format(subid, struct))
result_map = seed_corr_default_out(func, seed)
result_type = 'cifti'
result_prefix = '{}_{}_ciftitocifti_unmasked'.format(subid, struct)
run(['wb_command', '-cifti-create-dense-from-template',
func_cifti_sm0, seed,
'-volume-all', putamen_vol_seed_mask])
run(['ciftify_seed_corr', func, seed])
# In[ ]:
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
result_map = os.path.join(seed_corr_dir, '{}_{}_ciftitocifti_30TRs.dscalar.nii'.format(subid, struct))
result_type = 'cifti'
result_prefix = '{}_{}_ciftitocifti_30TRs'.format(subid, struct)
TR_file = os.path.join(new_outputs, 'rois','TR_file.txt')
with open(TR_file, "w") as text_file:
text_file.write('''1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30''')
run(['ciftify_seed_corr',
'--outputname', result_map,
'--use-TRs', TR_file,
func_cifti_smoothed, putamen_vol_seed_mask])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
result_map = os.path.join(seed_corr_dir, '{}_{}_ciftitocifti_masked.dscalar.nii'.format(subid, struct))
result_type = 'cifti'
result_prefix = '{}_{}_ciftitocifti_masked'.format(subid, struct)
run(['ciftify_seed_corr',
'--outputname', result_map,
'--mask', cifti_mask,
func_cifti_smoothed,
os.path.join(seed_corr_dir, '{}_{}_cifti.dscalar.nii'.format(subid, struct))])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
result_map = seed_corr_default_out(func_cifti_smoothed, os.path.join(new_outputs, 'rois', 'gaussian_roi.dscalar.nii'))
result_type = 'cifti'
result_prefix = '{}_gaussian_ciftitocifti_unmasked'.format(subid)
run(['ciftify_seed_corr', '--weighted', func_cifti_smoothed,
os.path.join(new_outputs, 'rois', 'gaussian_roi.dscalar.nii')])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
result_map = os.path.join(seed_corr_dir, '{}_gaussian_ciftitocifti_masked.dscalar.nii'.format(subid))
result_type = 'cifti'
result_prefix = '{}_gaussian_ciftitocifti_masked'.format(subid)
run(['ciftify_seed_corr', '--weighted',
'--outputname', result_map,
'--mask', cifti_mask,
func_cifti_smoothed,
os.path.join(new_outputs, 'rois', 'gaussian_roi.dscalar.nii')])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
L_gaussian_roi = os.path.join(new_outputs, 'rois', 'gaussian_L_roi.shape.gii')
result_map = seed_corr_default_out(func_cifti_smoothed, L_gaussian_roi)
result_type = 'cifti'
result_prefix = '{}_gaussian_giftitocifti_unmasked'.format(subid)
run(['wb_command', '-cifti-separate',
os.path.join(new_outputs, 'rois', 'gaussian_roi.dscalar.nii'),
'COLUMN','-metric', 'CORTEX_LEFT', L_gaussian_roi])
run(['ciftify_seed_corr', '--weighted', '--hemi', 'L',
func_cifti_smoothed,
L_gaussian_roi])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
result_map = os.path.join(seed_corr_dir, '{}_gaussian_giftitocifti_masked.dscalar.nii'.format(subid))
result_type = 'cifti'
result_prefix = '{}_gaussian_giftitocifti_masked'.format(subid)
run(['ciftify_seed_corr', '--weighted', '--hemi', 'L',
'--outputname', result_map,
'--mask', cifti_mask,
func_cifti_smoothed,
L_gaussian_roi])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
result_map = seed_corr_default_out(func_cifti_smoothed, os.path.join(new_outputs, 'rois', 'probmap_roi.dscalar.nii'))
result_type = 'cifti'
result_prefix = '{}_promap_ciftitocifti_unmasked.dscalar.nii'.format(subid)
run(['ciftify_seed_corr', '--weighted', func_cifti_smoothed,
os.path.join(new_outputs, 'rois', 'probmap_roi.dscalar.nii')])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
result_map = os.path.join(seed_corr_dir, '{}_probmap_ciftitocifti_masked.dscalar.nii'.format(subid))
result_type = 'cifti'
result_prefix = '{}_probmap_ciftitocifti_masked'.format(subid)
run(['ciftify_seed_corr', '--weighted',
'--outputname', result_map,
'--mask', cifti_mask,
func_cifti_smoothed,
os.path.join(new_outputs, 'rois', 'probmap_roi.dscalar.nii')])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
R_probmap_roi = os.path.join(new_outputs, 'rois', 'probmap_R_roi.shape.gii')
result_map = seed_corr_default_out(func_cifti_smoothed, R_probmap_roi)
result_type = 'cifti'
result_prefix = '{}_promap_giftitocifti_unmasked.dscalar.nii'.format(subid)
run(['wb_command', '-cifti-separate',
os.path.join(new_outputs, 'rois', 'probmap_roi.dscalar.nii'),
'COLUMN','-metric', 'CORTEX_RIGHT', R_probmap_roi])
run(['ciftify_seed_corr', '--weighted', '--hemi', 'R', func_cifti_smoothed,
R_probmap_roi])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
result_map = os.path.join(seed_corr_dir, '{}_probmap_giftitocifti_masked.dscalar.nii'.format(subid))
result_type = 'cifti'
result_prefix = '{}_probmap_giftitocifti_masked'.format(subid)
run(['ciftify_seed_corr', '--weighted', '--hemi', 'R',
'--outputname', result_map,
'--mask', cifti_mask,
func_cifti_smoothed,
R_probmap_roi])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
result_map = seed_corr_default_out(func_cifti_smoothed, os.path.join(new_outputs, 'rois', 'tvertex.dscalar.nii'))
result_type = 'cifti'
result_prefix = '{}_tvertex7_ciftitocifti_unmasked'.format(subid)
run(['ciftify_seed_corr', '--roi-label', '7', func_cifti_smoothed,
os.path.join(new_outputs, 'rois', 'tvertex.dscalar.nii')])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
result_map = os.path.join(seed_corr_dir, '{}_tvertex7_ciftitocifti_masked.dscalar.nii'.format(subid))
result_type = 'cifti'
result_prefix = '{}_tvertex7_ciftitocifti_masked'.format(subid)
run(['ciftify_seed_corr', '--roi-label', '7',
'--outputname', result_map,
'--mask', cifti_mask,
func_cifti_smoothed,
os.path.join(new_outputs, 'rois', 'tvertex.dscalar.nii')])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
R_tvertex_roi = os.path.join(new_outputs, 'rois', 'tvertex_R_roi.shape.gii')
result_map = seed_corr_default_out(func_cifti_smoothed, R_tvertex_roi)
result_type = 'cifti'
result_prefix = '{}_tvertex7_giftitocifti_umasked'.format(subid)
run(['wb_command', '-cifti-separate',
os.path.join(new_outputs, 'rois', 'tvertex.dscalar.nii'),
'COLUMN','-metric', 'CORTEX_RIGHT', R_tvertex_roi])
run(['ciftify_seed_corr', '--roi-label', '7', '--hemi', 'R',
func_cifti_smoothed, R_tvertex_roi])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
# In[ ]:
result_map = os.path.join(seed_corr_dir, '{}_tvertex7_giftitocifti_masked.dscalar.nii'.format(subid))
result_type = 'cifti'
result_prefix = '{}_tvertex7_giftitocifti_masked'.format(subid)
run(['ciftify_seed_corr', '--roi-label', '7', '--hemi', 'R',
'--outputname', result_map,
'--mask', cifti_mask,
func_cifti_smoothed, R_tvertex_roi])
run_vis_map(result_map, result_prefix, result_type)
run_seedcorr_peaktable(result_map)
run(['cifti_vis_map', 'index', '--hcp-data-dir', hcp_data_dir])
logger.info(ciftify.utils.section_header('ciftify_meants (atlas examples)'))
# # ciftify_meants (atlas examples)
# In[102]:
subject_aparc = os.path.join(hcp_data_dir, subid,
'MNINonLinear', 'fsaverage_LR32k',
'{}.aparc.32k_fs_LR.dlabel.nii'.format(subid))
subject_thickness = os.path.join(hcp_data_dir, subid,
'MNINonLinear', 'fsaverage_LR32k',
'{}.thickness.32k_fs_LR.dscalar.nii'.format(subid))
run(['ciftify_meants', func_cifti_sm0, subject_aparc])
# In[112]:
run(['ciftify_meants',
'--outputcsv', os.path.join(seed_corr_dir,
'{}_aparc_thickness_ciftitocifti_unmasked_meants.csv'.format(subid)),
'--outputlabels', os.path.join(seed_corr_dir,
'{}_aparc_ciftitocifti_unmasked_labels.csv'.format(subid)),
subject_thickness, subject_aparc])
# In[104]:
run(['ciftify_meants', func_cifti_sm0,
os.path.join(new_outputs, 'rois', 'tvertex.dscalar.nii')])
# In[105]:
run(['ciftify_meants',
'--mask', cifti_mask,
'--outputcsv', os.path.join(seed_corr_dir,
'{}_tvertex_func_ciftitocifti_unmasked_meants.csv'.format(subid)),
func_cifti_sm0,
os.path.join(new_outputs, 'rois', 'tvertex.dscalar.nii')])
# In[106]:
## project wmparc to subject
wmparc_dscalar_d0 = os.path.join(seed_corr_dir, '{}_wmparc_MNI_d0.dscalar.nii'.format(subid))
run(['ciftify_vol_result', '--hcp-data-dir', hcp_data_dir,
'--integer-labels', subid, atlas_vol, wmparc_dscalar_d0])
# In[107]:
## project wmparc to subject
wmparc_dscalar = os.path.join(seed_corr_dir, '{}_wmparc_MNI_d10.dscalar.nii'.format(subid))
run(['ciftify_vol_result',
'--hcp-data-dir', hcp_data_dir,
'--dilate', '10',
'--integer-labels', subid, atlas_vol, wmparc_dscalar])
# In[115]:
run(['ciftify_meants',
'--mask', os.path.join(hcp_data_dir, subid, 'MNINonLinear', 'brainmask_fs.nii.gz'),
'--outputcsv', os.path.join(seed_corr_dir,
'{}_wmparc_func_niftitonifti_masked_meants.csv'.format(subid)),
'--outputlabels', os.path.join(seed_corr_dir,
'{}_wmparc_func_niftitonifti_masked_labels.csv'.format(subid)),
func_vol, atlas_vol])
# In[114]:
run(['ciftify_meants',
'--mask', cifti_mask,
'--outputcsv', os.path.join(seed_corr_dir,
'{}_wmparc_func_ciftitocifti_masked_meants.csv'.format(subid)),
'--outputlabels', os.path.join(seed_corr_dir,
'{}_wmparc_func_ciftitocifti_masked_labels.csv'.format(subid)),
func_cifti_sm0, wmparc_dscalar])
# In[113]:
wmparc_dlabel = os.path.join(seed_corr_dir, '{}_wmparc_MNI.dlabel.nii'.format(subid))
run(['wb_command', '-cifti-label-import', '-logging', 'SEVERE',
wmparc_dscalar,
os.path.join(ciftify.config.find_ciftify_global(), 'hcp_config', 'FreeSurferAllLut.txt'),
wmparc_dlabel])
run(['ciftify_meants',
'--outputcsv', os.path.join(seed_corr_dir,
'{}_wmparc_func_ciftiltocifti_dlabel_meants.csv'.format(subid)),
'--outputlabels', os.path.join(seed_corr_dir,
'{}_wmparc_func_ciftiltocifti_dlabel_labels.csv'.format(subid)),
func_cifti_sm0, wmparc_dlabel])
# In[128]:
logger.info(ciftify.utils.section_header('Testing csv outputs'))
csv_df = pd.read_csv(os.path.join(fixtures_dir, 'expected_csvs.csv'))
csv_df['num_rows'] = ''
csv_df['num_cols'] = ''
csv_df['exists'] = ''
csv_df['matches'] = ''
# In[129]:
for row in csv_df.index:
expected_csv = os.path.join(new_outputs, csv_df.loc[row, 'expected_output'])
if os.path.isfile(expected_csv):
compare_csv = os.path.join(fixtures_dir, csv_df.loc[row, 'compare_to'])
header_col = 0
if expected_csv.endswith('_meants.csv'): header_col = None
if expected_csv.endswith('_labels.csv'): header_col = None
if expected_csv.endswith('_CSF.csv'): header_col = None
if expected_csv.endswith('_WM.csv'): header_col = None
if expected_csv.endswith('_GM.csv'): header_col = None
testdf = pd.read_csv(expected_csv, header = header_col)
csv_df.loc[row, 'exists'] = True
csv_df.loc[row, 'num_rows'] = testdf.shape[0]
csv_df.loc[row, 'num_cols'] = testdf.shape[1]
comparedf = pd.read_csv(compare_csv, header = header_col)
try:
csv_df.loc[row, 'matches'] = (comparedf == testdf).all().all()
except:
csv_df.loc[row, 'matches'] = False
else:
csv_df.loc[row, 'exists'] = False
# In[131]:
csv_df.to_csv(os.path.join(new_outputs, 'csv_compare_results.csv'))
# In[134]:
if csv_df.exists.all():
logger.info('All expected csv were generated')
else:
logger.error('Some expected csv outputs were not generated')
# In[135]:
if csv_df.matches.all():
logger.info('All expected csv outputs match expected')
else:
logger.info('Some csv outputs do not match..see csv_compare_results.csv of more details')
# In[136]:
logger.info(ciftify.utils.section_header('Done'))
# In[ ]:
| mit |
hlin117/scikit-learn | sklearn/tests/test_cross_validation.py | 79 | 47914 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
alexrudnick/chipa | squoiawsd/learn.py | 1 | 9276 | #!/usr/bin/env python3
import argparse
from operator import itemgetter
import readline
import functools
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
import nltk
from nltk.probability import FreqDist
from nltk.probability import ConditionalFreqDist
from nltk.probability import ConditionalProbDist
from nltk.probability import ELEProbDist
import features
from constants import UNTRANSLATED
from constants import OOV
DEBUG=False
def pause():
if DEBUG: input("ENTER TO CONTINUE")
def target_words_for_each_source_word(ss, ts, alignment):
"""Given a list of tokens in source language, a list of tokens in target
language, and a list of Berkeley-style alignments of the form target-source,
for each source word, return the list of corresponding target words."""
alignment = [tuple(map(int, pair.split('-'))) for pair in alignment]
out = [list() for i in range(len(ss))]
indices = [list() for i in range(len(ss))]
alignment.sort(key=itemgetter(0))
for (ti,si) in alignment:
## make sure we're grabbing contiguous phrases
if (not indices[si]) or (ti == indices[si][-1] + 1):
indices[si].append(ti)
targetword = ts[ti]
out[si].append(targetword)
return [" ".join(targetwords) for targetwords in out]
def get_target_language_sentences(triple_sentences):
"""Return all of the "sentences" over the target language, used for training
the Source-Order language model."""
sentences = []
for (ss, ts, alignment) in triple_sentences:
tws = target_words_for_each_source_word(ss, ts, alignment)
sentence = []
for label in tws:
if label:
sentence.append(label)
else:
sentence.append(UNTRANSLATED)
sentences.append(sentence)
return sentences
def load_bitext(args):
"""Take in args containing filenames filenames, return a list of
(source,target,alignment) tuples. Lowercase everything.
NB: input files should already be lemmatized at this point.
"""
out_source = []
out_target = []
out_align = []
with open(args.sourcefn) as infile_s, \
open(args.targetfn) as infile_t, \
open(args.alignfn) as infile_align:
for source, target, alignment in zip(infile_s, infile_t, infile_align):
out_source.append(source.strip().lower().split())
out_target.append(target.strip().lower().split())
out_align.append(alignment.strip().split())
return list(zip(out_source, out_target, out_align))
def cpd(cfd):
"""Take a ConditionalFreqDist and turn it into a ConditionalProdDist"""
return ConditionalProbDist(cfd, ELEProbDist)
def reverse_cfd(cfd):
"""Given a ConditionalFreqDist, reverse the conditions and the samples!!"""
out = ConditionalFreqDist()
for condition in cfd.conditions():
for sample in cfd[condition].samples():
out[sample].inc(condition, cfd[condition][sample])
return out
SL_SENTENCES = None
TAGGED_SENTENCES = None
def set_examples(sl_sentences, tagged_sentences):
global SL_SENTENCES
global TAGGED_SENTENCES
SL_SENTENCES = sl_sentences
TAGGED_SENTENCES = tagged_sentences
def build_instance(tagged_sentence, index):
feat = features.extract(tagged_sentence, index)
label = tagged_sentence[index][1]
return (feat, label)
def trainingdata_for(word, nonnull=False):
training = []
for ss,tagged in zip(SL_SENTENCES, TAGGED_SENTENCES):
if word in ss:
index = ss.index(word)
training.append(build_instance(tagged, index))
if nonnull:
training = [(feat,label) for (feat,label) in training
if label != UNTRANSLATED]
return training
@functools.lru_cache(maxsize=100000)
def classifier_for(word, nonnull=False):
training = trainingdata_for(word, nonnull=nonnull)
if not training:
return OOVClassifier()
labels = set(label for fs,label in training)
if len(labels) == 1:
classif = MFSClassifier()
else:
## XXX: futz with regularization constant here.
classif = SklearnClassifier(LogisticRegression(C=0.1))
classif.train(training)
return classif
@functools.lru_cache(maxsize=100000)
def mfs_for(word):
fd = nltk.probability.FreqDist()
labeled_featuresets = trainingdata_for(word)
for (f,label) in labeled_featuresets:
fd[label] += 1
return fd.max()
@functools.lru_cache(maxsize=100000)
def mfs_translation(word):
"""Return the MFS for the given word, but require that it's not the
untranslated token unless that's all we've seen."""
fd = nltk.probability.FreqDist()
labeled_featuresets = trainingdata_for(word)
for (f,label) in labeled_featuresets:
if label == UNTRANSLATED: continue
fd[label] += 1
mostcommon = fd.most_common()
if not mostcommon:
return OOV
return mostcommon[0][0]
class MFSClassifier(nltk.classify.ClassifierI):
def __init__(self):
self.fd = nltk.probability.FreqDist()
def train(self, labeled_featuresets):
for (f,label) in labeled_featuresets:
self.fd[label] += 1
def classify(self, featureset):
return self.fd.max()
def prob_classify(self, featureset):
return nltk.probability.DictionaryProbDist({self.fd.max(): 1.0})
class OOVClassifier(nltk.classify.ClassifierI):
def __init__(self):
pass
def train(self, labeled_featuresets):
pass
def classify(self, featureset):
return OOV
def prob_classify(self, featureset):
return nltk.probability.DictionaryProbDist({OOV: 1.0})
def disambiguate_words(words):
"""Given a list of words/lemmas, return a list of disambiguation answers for
them."""
classifiers = [classifier_for(word, nonnull=True) for word in words]
answers = []
for i in range(len(words)):
faketagged = [(w,None) for w in words]
feat = features.extract(faketagged, i)
classif = classifiers[i]
ans = classif.classify(feat)
if ans == UNTRANSLATED:
ans = mfs_translation(words[i])
print("MFS!!!", words[i], "==>", ans)
answers.append(ans)
return [str(ans) for ans in answers]
def prob_disambiguate_words(words):
"""Given a list of words/lemmas, return a list of disambiguation answers for
them -- return a list of lists, where each sublist is ordered in decreasing
probability."""
classifiers = [classifier_for(word, nonnull=True) for word in words]
answers = []
for i in range(len(words)):
faketagged = [(w,None) for w in words]
feat = features.extract(faketagged, i)
classif = classifiers[i]
## get all possible options, sorted in wrong order
dist = classif.prob_classify(feat)
options = [(dist.prob(samp), samp) for samp in dist.samples()]
options.sort(reverse=True)
myanswers = [str(lex) for (prob, lex) in options
if prob > 0.01 ]
print(myanswers)
answers.append(myanswers)
return answers
@functools.lru_cache(maxsize=100000)
def distribution_for(word):
fd = nltk.probability.FreqDist()
labeled_featuresets = trainingdata_for(word)
for (f,label) in labeled_featuresets:
fd[label] += 1
return fd
def repl():
while True:
try:
line = input('> ')
except: break
line = line.strip()
sentences = nltk.sent_tokenize(line)
s_tokenized = [nltk.word_tokenize(sent) for sent in sentences]
tokenized = []
for sent in s_tokenized:
tokenized.extend(sent)
print("tokenized:", tokenized)
answers = disambiguate_words(tokenized)
print(list(zip(tokenized, answers)))
for w in tokenized:
print(w, end=" ")
fd = distribution_for(w)
print(fd.most_common(10))
def get_argparser():
parser = argparse.ArgumentParser(description='quechua')
parser.add_argument('--sourcefn', type=str, required=True)
parser.add_argument('--targetfn', type=str, required=True)
parser.add_argument('--alignfn', type=str, required=True)
parser.add_argument('--clusterfn', type=str, required=True)
parser.add_argument('--crossvalidate',dest='crossvalidate',
action='store_true')
parser.add_argument('--no-crossvalidate',dest='crossvalidate',
action='store_false')
parser.set_defaults(crossvalidate=False)
return parser
def main():
parser = get_argparser()
args = parser.parse_args()
triple_sentences = load_bitext(args)
print("training on {0} sentences.".format(len(triple_sentences)))
tl_sentences = get_target_language_sentences(triple_sentences)
sl_sentences = [s for (s,t,a) in triple_sentences]
tagged_sentences = [list(zip(ss, ts))
for ss,ts in zip(sl_sentences, tl_sentences)]
set_examples(sl_sentences, tagged_sentences)
repl()
if __name__ == "__main__": main()
| gpl-3.0 |
hanteng/pyGeolinguisticSize | pyGeolinguisticSize/sizedb/__init__.py | 1 | 5717 | # -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
import os
__all__ = ["wiki_sitemap", "wiki_pageviews",\
"mapping", "langname", "territory", "territory_lang", \
"size_gl", "size_gl_IPop", "size_l", "size_l_wiki", \
"simplified"]
__all__ = [str(u) for u in __all__]
_ROOT = os.path.abspath(os.path.dirname(__file__))
from os.path import basename, join, splitext
wiki_sitemap = pd.read_pickle(os.path.join(_ROOT,"Sitemap.pkl"))
mapping = pd.read_pickle(os.path.join(_ROOT,"df_codeMappings.pkl")) #Legacy:mapping.pkl
territory = pd.read_pickle(os.path.join(_ROOT,"df_territory_basic.pkl"))#Legacy:territory.pkl
territory_lang= pd.read_pickle(os.path.join(_ROOT,"df_territory_lang.pkl"))
# wiki_sitemap df=df.set_index('l_code') INDEX: Wiki style language codes
# mapping df=df.set_index("type") INDEX: CLDR two-digit ISO country codes
# territory df=df.set_index("type") INDEX: CLDR two-digit ISO country codes
# territory_lang INDEX: serial number, with
####c_code (2 digit country code)
####geo (2 digit country code)
####type (CLDR language code)
# for futher integrations
size_gl = pd.read_pickle(os.path.join(_ROOT,"_tl_size.pkl"))
##>>> size_gl
##<class 'pandas.core.panel.Panel'>
##Dimensions: 5 (items) x 1261 (major_axis) x 23 (minor_axis)
##Items axis: IH to PPPGDP
##Major_axis axis: (en, AC) to (tn, ZW)
##Minor_axis axis: l_name to 2015
## Slice out IPop for use
size_gl_IPop=size_gl['IPop'].sort(columns=[2013], ascending=False)
#test=size_gl_IPop[2010].copy(deep=True)
#test.sort(ascending=False)
## Language names
langname=size_gl['IPop'].reset_index()[['type','l_name']].drop_duplicates().set_index('type')['l_name']
#>>> langname['fr']
#'French'
## Aggregation results based on languages _l_size.pkl
try:
size_l.pd.read_pickle(os.path.join(_ROOT,"_l_size.pkl"))
except:
list_indicators=size_gl.items #Legacy: ['IPop','PPPGDP','LP']
#indicator='IPop'
sizel=dict()
for indicator in list_indicators:
dfo=size_gl[indicator].reset_index()
osize_l=dfo[["type"]+list (range(2000,2013+1))].groupby(['type']).sum()
osize_l["l_name"]=[langname[x] for x in osize_l.index]
#osize_l.drop(['c_code','populationPercent'], axis=1, inplace=True)
osize_lg=dfo[["type"]+["geo"]].groupby(['type']).sum()
osize_l["geo"]=[[x[i:i+2] for i in range(0, len(x), 2)] for x in osize_lg.geo]
#exit()
sizel[indicator]=osize_l
#size_l=size_gl[indicator].groupby(['type']).sum()[list_indicators].sort(columns=['IPop'], ascending=False)
size_l=pd.Panel(sizel)
size_l.to_pickle(os.path.join(_ROOT,"_l_size.pkl"))
## Aggregation results based on languages in Wikipedia
size_l_wiki=pd.read_pickle(os.path.join(_ROOT,"wiki_lang_panel_aggregated.pkl"))
## Simplified results
simplified=pd.read_pickle(os.path.join(_ROOT,"size_geolinguistic.pkl"))
'''
>>> size_l_wiki.loc[:,:,2013].head()
IH IPop IPv4 LP PPPGDP
index
zh NaN 594.441 NaN 1271.592 16487.45
iu NaN 0.02530088 NaN 0.0294882 1.275464
mn NaN 0.951459 NaN 5.364831 49.92671
als NaN 1.307198 NaN 1.539666 74.89924
az NaN 11.54916 NaN 28.38094 469.3428
>>> size_l_wiki.loc['IPop',:,:].head()[['geo']+list(range(2000,2013+1))]
geo 2000 \
index
zh [AU, BN, GB, GF, HK, ID, MO, MY, PA, PF, PH, S... 31.23139
iu [CA, CA] 0.01320641
mn [MN, RU, MN, RU] 0.05592889
als [CH, FR, LI] 0.4815436
az [IR, AZ, RU, AM, AZ, TR] 0.1754967
2001 2002 2003 2004 2005 \
index
zh 44.19375 70.60649 91.6723 106.4931 123.2236
iu 0.01566192 0.01619877 0.01704233 0.01767301 0.0193844
mn 0.07469757 0.09346433 0.0003592577 0.000554754 0.0006541379
als 0.6179152 0.6985425 0.7690295 0.8146932 0.8615768
az 0.2835659 1.195153 1.192338 1.318835 2.118526
2006 2007 2008 2009 2010 \
index
zh 149.3169 216.5657 297.1577 375.4637 443.4091
iu 0.01978284 0.02019758 0.02138945 0.02265106 0.02290602
mn 0.0007721172 0.4388074 0.4858311 0.5053024 0.524522
als 0.939457 1.067281 1.121284 1.158199 1.225288
az 2.614118 3.047691 3.473452 4.588354 6.988492
2011 2012 2013
index
zh 494.5442 547.4921 594.441
iu 0.02391605 0.02419423 0.02530088
mn 0.6498466 0.8690564 0.951459
als 1.251201 1.283024 1.307198
az 8.593783 10.30142 11.54916
'''
wiki_pageviews=pd.read_pickle(os.path.join(_ROOT,"TablesPageViewsMonthlyCombined.pkl"))
##>>> pgviews_wiki.head()
## pgviews timeperiod
##l_code
##ALL 19629 Feb 2015
##en 9327 Feb 2015
##ja 1444 Feb 2015
##es 1357 Feb 2015
##ru 1275 Feb 2015
| gpl-3.0 |
ZhukovGreen/UMLND | k-means_clustering_mini_project/feature_format.py | 1 | 4419 | #!/usr/bin/python
"""
A general tool for converting data from the
dictionary format to an (n x k) python list that's
ready for training an sklearn algorithm
n--no. of key-value pairs in dictonary
k--no. of features being extracted
dictionary keys are names of persons in dataset
dictionary values are dictionaries, where each
key-value pair in the dict is the name
of a feature, and its value for that person
In addition to converting a dictionary to a numpy
array, you may want to separate the labels from the
features--this is what targetFeatureSplit is for
so, if you want to have the poi label as the target,
and the features you want to use are the person's
salary and bonus, here's what you would do:
feature_list = ["poi", "salary", "bonus"]
data_array = featureFormat( data_dictionary, feature_list )
label, features = targetFeatureSplit(data_array)
the line above (targetFeatureSplit) assumes that the
label is the _first_ item in feature_list--very important
that poi is listed first!
"""
import numpy as np
def featureFormat(dictionary, features, remove_NaN=True,
remove_all_zeroes=True, remove_any_zeroes=False,
sort_keys=False):
""" convert dictionary to numpy array of features
remove_NaN = True will convert "NaN" string to 0.0
remove_all_zeroes = True will omit any data points for which
all the features you seek are 0.0
remove_any_zeroes = True will omit any data points for which
any of the features you seek are 0.0
sort_keys = True sorts keys by alphabetical order. Setting the value as
a string opens the corresponding pickle file with a preset key
order (this is used for Python 3 compatibility, and sort_keys
should be left as False for the course mini-projects).
NOTE: first feature is assumed to be 'poi' and is not checked for
removal for zero or missing values.
"""
return_list = []
# Key order - first branch is for Python 3 compatibility on mini-projects,
# second branch is for compatibility on final project.
if isinstance(sort_keys, str):
import pickle
keys = pickle.load(open(sort_keys, "rb"))
elif sort_keys:
keys = sorted(dictionary.keys())
else:
keys = dictionary.keys()
for key in keys:
tmp_list = []
for feature in features:
try:
dictionary[key][feature]
except KeyError:
print
"error: key ", feature, " not present"
return
value = dictionary[key][feature]
if value == "NaN" and remove_NaN:
value = 0
tmp_list.append(float(value))
# Logic for deciding whether or not to add the data point.
append = True
# exclude 'poi' class as criteria.
if features[0] == 'poi':
test_list = tmp_list[1:]
else:
test_list = tmp_list
### if all features are zero and you want to remove
### data points that are all zero, do that here
if remove_all_zeroes:
append = False
for item in test_list:
if item != 0 and item != "NaN":
append = True
break
### if any features for a given data point are zero
### and you want to remove data points with any zeroes,
### handle that here
if remove_any_zeroes:
if 0 in test_list or "NaN" in test_list:
append = False
### Append the data point if flagged for addition.
if append:
return_list.append(np.array(tmp_list))
return np.array(return_list)
def targetFeatureSplit(data):
"""
given a numpy array like the one returned from
featureFormat, separate out the first feature
and put it into its own list (this should be the
quantity you want to predict)
return targets and features as separate lists
(sklearn can generally handle both lists and numpy arrays as
input formats when training/predicting)
"""
target = []
features = []
for item in data:
target.append(item[0])
features.append(item[1:])
return target, features
| gpl-3.0 |
dssg/wikienergy | disaggregator/build/pandas/pandas/sandbox/qtpandas.py | 3 | 4085 | '''
Easy integration of DataFrame into pyqt framework
@author: Jev Kuznetsov
'''
try:
from PyQt4.QtCore import QAbstractTableModel, Qt, QVariant, QModelIndex
from PyQt4.QtGui import (
QApplication, QDialog, QVBoxLayout, QTableView, QWidget)
except ImportError:
from PySide.QtCore import QAbstractTableModel, Qt, QModelIndex
from PySide.QtGui import (
QApplication, QDialog, QVBoxLayout, QTableView, QWidget)
QVariant = lambda value=None: value
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self):
super(DataFrameModel, self).__init__()
self.df = DataFrame()
def setDataFrame(self, dataFrame):
self.df = dataFrame
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not
efficient)'''
self.layoutChanged.emit()
#------------- table display functions -----------------
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
# return self.df.index.tolist()
return self.df.index.tolist()[section]
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
return QVariant(str(self.df.ix[index.row(), index.column()]))
def flags(self, index):
flags = super(DataFrameModel, self).flags(index)
flags |= Qt.ItemIsEditable
return flags
def setData(self, index, value, role):
row = self.df.index[index.row()]
col = self.df.columns[index.column()]
if hasattr(value, 'toPyObject'):
# PyQt4 gets a QVariant
value = value.toPyObject()
else:
# PySide gets an unicode
dtype = self.df[col].dtype
if dtype != object:
value = None if value == '' else dtype.type(value)
self.df.set_value(row, col, value)
return True
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self, dataFrame, parent=None):
super(DataFrameWidget, self).__init__(parent)
self.dataModel = DataFrameModel()
self.dataTable = QTableView()
self.dataTable.setModel(self.dataModel)
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
# Set DataFrame
self.setDataFrame(dataFrame)
def setDataFrame(self, dataFrame):
self.dataModel.setDataFrame(dataFrame)
self.dataModel.signalUpdate()
self.dataTable.resizeColumnsToContents()
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int': [1, 2, 3], 'float': [1.5, 2.5, 3.5],
'string': ['a', 'b', 'c'], 'nan': [np.nan, np.nan, np.nan]}
return DataFrame(data, index=Index(['AAA', 'BBB', 'CCC']),
columns=['int', 'float', 'string', 'nan'])
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
df = testDf() # make up some data
widget = DataFrameWidget(df)
widget.resizeColumnsToContents()
layout = QVBoxLayout()
layout.addWidget(widget)
self.setLayout(layout)
if __name__ == '__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| mit |
terkkila/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
xuewei4d/scikit-learn | sklearn/linear_model/_logistic.py | 6 | 84460 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
# Arthur Mensch <[email protected]
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from scipy.special import expit, logsumexp
from joblib import Parallel, effective_n_jobs
from ._base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ._sag import sag_solver
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm._base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (log_logistic, safe_sparse_dot, softmax,
squared_norm)
from ..utils.extmath import row_norms
from ..utils.optimize import _newton_cg, _check_optimize_result
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.validation import _deprecate_positional_args
from ..utils.multiclass import check_classification_targets
from ..utils.fixes import _joblib_parallel_args
from ..utils.fixes import delayed
from ..model_selection import check_cv
from ..metrics import get_scorer
_LOGISTIC_SOLVER_CONVERGENCE_MSG = (
"Please also refer to the documentation for alternative solver options:\n"
" https://scikit-learn.org/stable/modules/linear_model.html"
"#logistic-regression")
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray of shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
c : float
The intercept.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray of shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray of shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray of shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
Y : ndarray of shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,)
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
p : ndarray of shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray of shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray of shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
Y : ndarray of shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,)
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray of shape (n_classes * n_features,) or \
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray of shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)),
dtype=X.dtype)
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray of shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
Y : ndarray of shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,)
Array of weights that are assigned to individual samples.
Returns
-------
grad : ndarray of shape (n_classes * n_features,) or \
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver(solver, penalty, dual):
all_solvers = ['liblinear', 'newton-cg', 'lbfgs', 'sag', 'saga']
if solver not in all_solvers:
raise ValueError("Logistic Regression supports only solvers in %s, got"
" %s." % (all_solvers, solver))
all_penalties = ['l1', 'l2', 'elasticnet', 'none']
if penalty not in all_penalties:
raise ValueError("Logistic Regression supports only penalties in %s,"
" got %s." % (all_penalties, penalty))
if solver not in ['liblinear', 'saga'] and penalty not in ('l2', 'none'):
raise ValueError("Solver %s supports only 'l2' or 'none' penalties, "
"got %s penalty." % (solver, penalty))
if solver != 'liblinear' and dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
if penalty == 'elasticnet' and solver != 'saga':
raise ValueError("Only 'saga' solver supports elasticnet penalty,"
" got solver={}.".format(solver))
if solver == 'liblinear' and penalty == 'none':
raise ValueError(
"penalty='none' is not supported for the liblinear solver"
)
return solver
def _check_multi_class(multi_class, solver, n_classes):
if multi_class == 'auto':
if solver == 'liblinear':
multi_class = 'ovr'
elif n_classes > 2:
multi_class = 'multinomial'
else:
multi_class = 'ovr'
if multi_class not in ('multinomial', 'ovr'):
raise ValueError("multi_class should be 'multinomial', 'ovr' or "
"'auto'. Got %s." % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
return multi_class
def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='auto',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or array-like of shape (n_cs,), default=10
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool, default=True
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Numerical solver to use.
coef : array-like of shape (n_features,), default=None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array of shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=solver != 'liblinear')
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight,
classes=classes, y=y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight,
classes=mask_classes,
y=y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver not in ['sag', 'saga']:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# scipy.optimize.minimize and newton-cg accepts only
# ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
def func(x, *args): return _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
def func(x, *args): return _multinomial_loss(x, *args)[0]
def grad(x, *args): return _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
def grad(x, *args): return _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
opt_res = optimize.minimize(
func, w0, method="L-BFGS-B", jac=True,
args=(X, target, 1. / C, sample_weight),
options={"iprint": iprint, "gtol": tol, "maxiter": max_iter}
)
n_iter_i = _check_optimize_result(
solver, opt_res, max_iter,
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
w0, loss = opt_res.x, opt_res.fun
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = _newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(X.dtype, copy=False)
loss = 'multinomial'
else:
loss = 'log'
# alpha is for L2-norm, beta is for L1-norm
if penalty == 'l1':
alpha = 0.
beta = 1. / C
elif penalty == 'l2':
alpha = 1. / C
beta = 0.
else: # Elastic-Net penalty
alpha = (1. / C) * (1 - l1_ratio)
beta = (1. / C) * l1_ratio
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0.copy())
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return np.array(coefs), np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='auto', random_state=None,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or list of floats, default=10
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool, default=False
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Tolerance for stopping criteria.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Decides which solver to use.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto', 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray of shape (n_cs,)
Scores obtained for each Cs.
n_iter : ndarray of shape(n_cs,)
Actual number of iteration for each Cs.
"""
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = sample_weight[train]
coefs, Cs, n_iter = _logistic_regression_path(
X_train, y_train, Cs=Cs, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, solver=solver, max_iter=max_iter,
class_weight=class_weight, pos_class=pos_class,
multi_class=multi_class, tol=tol, verbose=verbose, dual=dual,
penalty=penalty, intercept_scaling=intercept_scaling,
random_state=random_state, check_input=False,
max_squared_sum=max_squared_sum, sample_weight=sample_weight)
log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
scores = list()
scoring = get_scorer(scoring)
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(LinearClassifierMixin,
SparseCoefMixin,
BaseEstimator):
"""
Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the
cross-entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag', 'saga' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note
that regularization is applied by default**. It can handle both dense
and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit
floats for optimal performance; any other input format will be converted
(and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation, or no regularization. The 'liblinear' solver
supports both L1 and L2 regularization, with a dual formulation only for
the L2 penalty. The Elastic-Net regularization is only supported by the
'saga' solver.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : {'l1', 'l2', 'elasticnet', 'none'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver. If 'none' (not supported by the
liblinear solver), no regularization is applied.
.. versionadded:: 0.19
l1 penalty with SAGA solver (allowing 'multinomial' + L1)
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
tol : float, default=1e-4
Tolerance for stopping criteria.
C : float, default=1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default=1
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'*
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs', 'sag' and 'saga' handle L2 or no penalty
- 'liblinear' and 'saga' also handle L1 penalty
- 'saga' also supports 'elasticnet' penalty
- 'liblinear' does not support setting ``penalty='none'``
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can
preprocess the data with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
.. versionchanged:: 0.22
The default solver changed from 'liblinear' to 'lbfgs' in 0.22.
max_iter : int, default=100
Maximum number of iterations taken for the solvers to converge.
multi_class : {'auto', 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.
n_jobs : int, default=None
Number of CPU cores used when parallelizing over classes if
multi_class='ovr'". This parameter is ignored when the ``solver`` is
set to 'liblinear' regardless of whether 'multi_class' is specified or
not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors.
See :term:`Glossary <n_jobs>` for more details.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Attributes
----------
classes_ : ndarray of shape (n_classes, )
A list of class labels known to the classifier.
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `coef_` corresponds
to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False).
intercept_ : ndarray of shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape (1,) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `intercept_`
corresponds to outcome 1 (True) and `-intercept_` corresponds to
outcome 0 (False).
n_iter_ : ndarray of shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
.. versionchanged:: 0.20
In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
See Also
--------
SGDClassifier : Incrementally trained logistic regression (when given
the parameter ``loss="log"``).
LogisticRegressionCV : Logistic regression with built-in cross validation.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
L-BFGS-B -- Software for Large-scale Bound-constrained Optimization
Ciyou Zhu, Richard Byrd, Jorge Nocedal and Jose Luis Morales.
http://users.iems.northwestern.edu/~nocedal/lbfgsb.html
LIBLINEAR -- A Library for Large Linear Classification
https://www.csie.ntu.edu.tw/~cjlin/liblinear/
SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
Minimizing Finite Sums with the Stochastic Average Gradient
https://hal.inria.fr/hal-00860051/document
SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(random_state=0).fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :])
array([[9.8...e-01, 1.8...e-02, 1.4...e-08],
[9.7...e-01, 2.8...e-02, ...e-08]])
>>> clf.score(X, y)
0.97...
"""
@_deprecate_positional_args
def __init__(self, penalty='l2', *, dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='lbfgs', max_iter=100,
multi_class='auto', verbose=0, warm_start=False, n_jobs=None,
l1_ratio=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.l1_ratio = l1_ratio
def fit(self, X, y, sample_weight=None):
"""
Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self
Fitted estimator.
Notes
-----
The SAGA solver supports both float64 and float32 bit arrays.
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if self.penalty == 'elasticnet':
if (not isinstance(self.l1_ratio, numbers.Number) or
self.l1_ratio < 0 or self.l1_ratio > 1):
raise ValueError("l1_ratio must be between 0 and 1;"
" got (l1_ratio=%r)" % self.l1_ratio)
elif self.l1_ratio is not None:
warnings.warn("l1_ratio parameter is only used when penalty is "
"'elasticnet'. Got "
"(penalty={})".format(self.penalty))
if self.penalty == 'none':
if self.C != 1.0: # default values
warnings.warn(
"Setting penalty='none' will ignore the C and l1_ratio "
"parameters"
)
# Note that check for l1_ratio is done right above
C_ = np.inf
penalty = 'l2'
else:
C_ = self.C
penalty = self.penalty
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
if solver == 'lbfgs':
_dtype = np.float64
else:
_dtype = [np.float64, np.float32]
X, y = self._validate_data(X, y, accept_sparse='csr', dtype=_dtype,
order="C",
accept_large_sparse=solver != 'liblinear')
check_classification_targets(y)
self.classes_ = np.unique(y)
multi_class = _check_multi_class(self.multi_class, solver,
len(self.classes_))
if solver == 'liblinear':
if effective_n_jobs(self.n_jobs) != 1:
warnings.warn("'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = {}.".format(effective_n_jobs(self.n_jobs)))
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
# Hack so that we iterate only once for the multinomial case.
if multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(_logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if solver in ['sag', 'saga']:
prefer = 'threads'
else:
prefer = 'processes'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer))(
path_func(X, y, pos_class=class_, Cs=[C_],
l1_ratio=self.l1_ratio, fit_intercept=self.fit_intercept,
tol=self.tol, verbose=self.verbose, solver=solver,
multi_class=multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
penalty=penalty, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for class_, warm_start_coef_ in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
n_features = X.shape[1]
if multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
else:
self.intercept_ = np.zeros(n_classes)
return self
def predict_proba(self, X):
"""
Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Vector to be scored, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
T : array-like of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
check_is_fitted(self)
ovr = (self.multi_class in ["ovr", "warn"] or
(self.multi_class == 'auto' and (self.classes_.size <= 2 or
self.solver == 'liblinear')))
if ovr:
return super()._predict_proba_lr(X)
else:
decision = self.decision_function(X)
if decision.ndim == 1:
# Workaround for multi_class="multinomial" and binary outcomes
# which requires softmax prediction with only a 1D decision.
decision_2d = np.c_[-decision, decision]
else:
decision_2d = decision
return softmax(decision_2d, copy=False)
def predict_log_proba(self, X):
"""
Predict logarithm of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Vector to be scored, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
T : array-like of shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression,
LinearClassifierMixin,
BaseEstimator):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
See glossary entry for :term:`cross-validation estimator`.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
Elastic-Net penalty is only supported by the saga solver.
For the grid of `Cs` values and `l1_ratios` values, the best hyperparameter
is selected by the cross-validator
:class:`~sklearn.model_selection.StratifiedKFold`, but it can be changed
using the :term:`cv` parameter. The 'newton-cg', 'sag', 'saga' and 'lbfgs'
solvers can warm-start the coefficients (see :term:`Glossary<warm_start>`).
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : int or list of floats, default=10
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
cv : int or cross-validation generator, default=None
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
scoring : str or callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is 'accuracy'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty, whereas
'liblinear' and 'saga' handle L1 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can preprocess the data
with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float, default=1e-4
Tolerance for stopping criteria.
max_iter : int, default=100
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
n_jobs : int, default=None
Number of CPU cores used during the cross-validation loop.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool, default=True
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
intercept_scaling : float, default=1
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto, 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance, default=None
Used when `solver='sag'`, 'saga' or 'liblinear' to shuffle the data.
Note that this only applies to the solver and not the cross-validation
generator. See :term:`Glossary <random_state>` for details.
l1_ratios : list of float, default=None
The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``.
Only used if ``penalty='elasticnet'``. A value of 0 is equivalent to
using ``penalty='l2'``, while 1 is equivalent to using
``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination
of L1 and L2.
Attributes
----------
classes_ : ndarray of shape (n_classes, )
A list of class labels known to the classifier.
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : ndarray of shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
Cs_ : ndarray of shape (n_cs)
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
l1_ratios_ : ndarray of shape (n_l1_ratios)
Array of l1_ratios used for cross-validation. If no l1_ratio is used
(i.e. penalty is not 'elasticnet'), this is set to ``[None]``
coefs_paths_ : ndarray of shape (n_folds, n_cs, n_features) or \
(n_folds, n_cs, n_features + 1)
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, n_cs, n_features)`` or
``(n_folds, n_cs, n_features + 1)`` depending on whether the
intercept is fit or not. If ``penalty='elasticnet'``, the shape is
``(n_folds, n_cs, n_l1_ratios_, n_features)`` or
``(n_folds, n_cs, n_l1_ratios_, n_features + 1)``.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class. Each dict value
has shape ``(n_folds, n_cs`` or ``(n_folds, n_cs, n_l1_ratios)`` if
``penalty='elasticnet'``.
C_ : ndarray of shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
`C_` is of shape(n_classes,) when the problem is binary.
l1_ratio_ : ndarray of shape (n_classes,) or (n_classes - 1,)
Array of l1_ratio that maps to the best scores across every class. If
refit is set to False, then for each class, the best l1_ratio is the
average of the l1_ratio's that correspond to the best scores for each
fold. `l1_ratio_` is of shape(n_classes,) when the problem is binary.
n_iter_ : ndarray of shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
If ``penalty='elasticnet'``, the shape is ``(n_classes, n_folds,
n_cs, n_l1_ratios)`` or ``(1, n_folds, n_cs, n_l1_ratios)``.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegressionCV
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegressionCV(cv=5, random_state=0).fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :]).shape
(2, 3)
>>> clf.score(X, y)
0.98...
See Also
--------
LogisticRegression
"""
@_deprecate_positional_args
def __init__(self, *, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=None, verbose=0,
refit=True, intercept_scaling=1., multi_class='auto',
random_state=None, l1_ratios=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
self.l1_ratios = l1_ratios
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
if self.penalty == 'elasticnet':
if self.l1_ratios is None or len(self.l1_ratios) == 0 or any(
(not isinstance(l1_ratio, numbers.Number) or l1_ratio < 0
or l1_ratio > 1) for l1_ratio in self.l1_ratios):
raise ValueError("l1_ratios must be a list of numbers between "
"0 and 1; got (l1_ratios=%r)" %
self.l1_ratios)
l1_ratios_ = self.l1_ratios
else:
if self.l1_ratios is not None:
warnings.warn("l1_ratios parameter is only used when penalty "
"is 'elasticnet'. Got (penalty={})".format(
self.penalty))
l1_ratios_ = [None]
if self.penalty == 'none':
raise ValueError(
"penalty='none' is not useful and not supported by "
"LogisticRegressionCV."
)
X, y = self._validate_data(X, y, accept_sparse='csr', dtype=np.float64,
order="C",
accept_large_sparse=solver != 'liblinear')
check_classification_targets(y)
class_weight = self.class_weight
# Encode for string labels
label_encoder = LabelEncoder().fit(y)
y = label_encoder.transform(y)
if isinstance(class_weight, dict):
class_weight = {label_encoder.transform([cls])[0]: v
for cls, v in class_weight.items()}
# The original class labels
classes = self.classes_ = label_encoder.classes_
encoded_labels = label_encoder.transform(label_encoder.classes_)
multi_class = _check_multi_class(self.multi_class, solver,
len(classes))
if solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
# Use the label encoded classes
n_classes = len(encoded_labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
encoded_labels = encoded_labels[1:]
classes = classes[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
if multi_class == 'multinomial':
iter_encoded_labels = iter_classes = [None]
else:
iter_encoded_labels = encoded_labels
iter_classes = classes
# compute the class weights for the entire dataset y
if class_weight == "balanced":
class_weight = compute_class_weight(
class_weight, classes=np.arange(len(self.classes_)), y=y)
class_weight = dict(enumerate(class_weight))
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if self.solver in ['sag', 'saga']:
prefer = 'threads'
else:
prefer = 'processes'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer))(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio
)
for label in iter_encoded_labels
for train, test in folds
for l1_ratio in l1_ratios_)
# _log_reg_scoring_path will output different shapes depending on the
# multi_class param, so we need to reshape the outputs accordingly.
# Cs is of shape (n_classes . n_folds . n_l1_ratios, n_Cs) and all the
# rows are equal, so we just take the first one.
# After reshaping,
# - scores is of shape (n_classes, n_folds, n_Cs . n_l1_ratios)
# - coefs_paths is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios, n_features)
# - n_iter is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios) or
# (1, n_folds, n_Cs . n_l1_ratios)
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
if multi_class == 'multinomial':
coefs_paths = np.reshape(
coefs_paths,
(len(folds), len(l1_ratios_) * len(self.Cs_), n_classes, -1)
)
# equiv to coefs_paths = np.moveaxis(coefs_paths, (0, 1, 2, 3),
# (1, 2, 0, 3))
coefs_paths = np.swapaxes(coefs_paths, 0, 1)
coefs_paths = np.swapaxes(coefs_paths, 0, 2)
self.n_iter_ = np.reshape(
n_iter_,
(1, len(folds), len(self.Cs_) * len(l1_ratios_))
)
# repeat same scores across all classes
scores = np.tile(scores, (n_classes, 1, 1))
else:
coefs_paths = np.reshape(
coefs_paths,
(n_classes, len(folds), len(self.Cs_) * len(l1_ratios_),
-1)
)
self.n_iter_ = np.reshape(
n_iter_,
(n_classes, len(folds), len(self.Cs_) * len(l1_ratios_))
)
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(classes, scores))
self.coefs_paths_ = dict(zip(classes, coefs_paths))
self.C_ = list()
self.l1_ratio_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
for index, (cls, encoded_label) in enumerate(
zip(iter_classes, iter_encoded_labels)):
if multi_class == 'ovr':
scores = self.scores_[cls]
coefs_paths = self.coefs_paths_[cls]
else:
# For multinomial, all scores are the same across classes
scores = scores[0]
# coefs_paths will keep its original shape because
# logistic_regression_path expects it this way
if self.refit:
# best_index is between 0 and (n_Cs . n_l1_ratios - 1)
# for example, with n_cs=2 and n_l1_ratios=3
# the layout of scores is
# [c1, c2, c1, c2, c1, c2]
# l1_1 , l1_2 , l1_3
best_index = scores.sum(axis=0).argmax()
best_index_C = best_index % len(self.Cs_)
C_ = self.Cs_[best_index_C]
self.C_.append(C_)
best_index_l1 = best_index // len(self.Cs_)
l1_ratio_ = l1_ratios_[best_index_l1]
self.l1_ratio_.append(l1_ratio_)
if multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, :, best_index, :],
axis=1)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
# Note that y is label encoded and hence pos_class must be
# the encoded label / None (for 'multinomial')
w, _, _ = _logistic_regression_path(
X, y, pos_class=encoded_label, Cs=[C_], solver=solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=class_weight,
multi_class=multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio_)
w = w[0]
else:
# Take the best scores across every fold and the average of
# all coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
if multi_class == 'ovr':
w = np.mean([coefs_paths[i, best_indices[i], :]
for i in range(len(folds))], axis=0)
else:
w = np.mean([coefs_paths[:, i, best_indices[i], :]
for i in range(len(folds))], axis=0)
best_indices_C = best_indices % len(self.Cs_)
self.C_.append(np.mean(self.Cs_[best_indices_C]))
if self.penalty == 'elasticnet':
best_indices_l1 = best_indices // len(self.Cs_)
self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))
else:
self.l1_ratio_.append(None)
if multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.l1_ratio_ = np.tile(self.l1_ratio_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
self.l1_ratio_ = np.asarray(self.l1_ratio_)
self.l1_ratios_ = np.asarray(l1_ratios_)
# if elasticnet was used, add the l1_ratios dimension to some
# attributes
if self.l1_ratios is not None:
# with n_cs=2 and n_l1_ratios=3
# the layout of scores is
# [c1, c2, c1, c2, c1, c2]
# l1_1 , l1_2 , l1_3
# To get a 2d array with the following layout
# l1_1, l1_2, l1_3
# c1 [[ . , . , . ],
# c2 [ . , . , . ]]
# We need to first reshape and then transpose.
# The same goes for the other arrays
for cls, coefs_path in self.coefs_paths_.items():
self.coefs_paths_[cls] = coefs_path.reshape(
(len(folds), self.l1_ratios_.size, self.Cs_.size, -1))
self.coefs_paths_[cls] = np.transpose(self.coefs_paths_[cls],
(0, 2, 1, 3))
for cls, score in self.scores_.items():
self.scores_[cls] = score.reshape(
(len(folds), self.l1_ratios_.size, self.Cs_.size))
self.scores_[cls] = np.transpose(self.scores_[cls], (0, 2, 1))
self.n_iter_ = self.n_iter_.reshape(
(-1, len(folds), self.l1_ratios_.size, self.Cs_.size))
self.n_iter_ = np.transpose(self.n_iter_, (0, 1, 3, 2))
return self
def score(self, X, y, sample_weight=None):
"""Returns the score using the `scoring` option on the given
test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Score of self.predict(X) wrt. y.
"""
scoring = self.scoring or 'accuracy'
scoring = get_scorer(scoring)
return scoring(self, X, y, sample_weight=sample_weight)
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
| bsd-3-clause |
quantopian/zipline | zipline/pipeline/domain.py | 1 | 14154 | """
This module defines the interface and implementations of Pipeline domains.
A domain represents a set of labels for the arrays computed by a Pipeline.
Currently, this means that a domain defines two things:
1. A calendar defining the dates to which the pipeline's inputs and outputs
should be aligned. The calendar is represented concretely by a pandas
DatetimeIndex.
2. The set of assets that the pipeline should compute over. Right now, the only
supported way of representing this set is with a two-character country code
describing the country of assets over which the pipeline should compute. In
the future, we expect to expand this functionality to include more general
concepts.
"""
import datetime
from textwrap import dedent
from interface import default, implements, Interface
import numpy as np
import pandas as pd
import pytz
from trading_calendars import get_calendar
from zipline.country import CountryCode
from zipline.utils.formatting import bulleted_list
from zipline.utils.input_validation import expect_types, optional
from zipline.utils.memoize import lazyval
from zipline.utils.pandas_utils import days_at_time
class IDomain(Interface):
"""Domain interface.
"""
def all_sessions(self):
"""
Get all trading sessions for the calendar of this domain.
This determines the row labels of Pipeline outputs for pipelines run on
this domain.
Returns
-------
sessions : pd.DatetimeIndex
An array of all session labels for this domain.
"""
@property
def country_code(self):
"""The country code for this domain.
Returns
-------
code : str
The two-character country iso3166 country code for this domain.
"""
def data_query_cutoff_for_sessions(self, sessions):
"""Compute the data query cutoff time for the given sessions.
Parameters
----------
sessions : pd.DatetimeIndex
The sessions to get the data query cutoff times for. This index
will contain all midnight UTC values.
Returns
-------
data_query_cutoff : pd.DatetimeIndex
Timestamp of the last minute for which data should be considered
"available" on each session.
"""
@default
def roll_forward(self, dt):
"""
Given a date, align it to the calendar of the pipeline's domain.
Parameters
----------
dt : pd.Timestamp
Returns
-------
pd.Timestamp
"""
dt = pd.Timestamp(dt, tz='UTC')
trading_days = self.all_sessions()
try:
return trading_days[trading_days.searchsorted(dt)]
except IndexError:
raise ValueError(
"Date {} was past the last session for domain {}. "
"The last session for this domain is {}.".format(
dt.date(),
self,
trading_days[-1].date()
)
)
Domain = implements(IDomain)
Domain.__doc__ = """
A domain represents a set of labels for the arrays computed by a Pipeline.
A domain defines two things:
1. A calendar defining the dates to which the pipeline's inputs and outputs
should be aligned. The calendar is represented concretely by a pandas
DatetimeIndex.
2. The set of assets that the pipeline should compute over. Right now, the only
supported way of representing this set is with a two-character country code
describing the country of assets over which the pipeline should compute. In
the future, we expect to expand this functionality to include more general
concepts.
"""
Domain.__name__ = "Domain"
Domain.__qualname__ = "zipline.pipeline.domain.Domain"
class GenericDomain(Domain):
"""Special singleton class used to represent generic DataSets and Columns.
"""
def all_sessions(self):
raise NotImplementedError("Can't get sessions for generic domain.")
@property
def country_code(self):
raise NotImplementedError("Can't get country code for generic domain.")
def data_query_cutoff_for_sessions(self, sessions):
raise NotImplementedError(
"Can't compute data query cutoff times for generic domain.",
)
def __repr__(self):
return "GENERIC"
GENERIC = GenericDomain()
class EquityCalendarDomain(Domain):
"""
An equity domain whose sessions are defined by a named TradingCalendar.
Parameters
----------
country_code : str
ISO-3166 two-letter country code of the domain
calendar_name : str
Name of the calendar, to be looked by by trading_calendar.get_calendar.
data_query_offset : np.timedelta64
The offset from market open when data should no longer be considered
available for a session. For example, a ``data_query_offset`` of
``-np.timedelta64(45, 'm')`` means that the data must have
been available at least 45 minutes prior to market open for it to
appear in the pipeline input for the given session.
"""
@expect_types(
country_code=str,
calendar_name=str,
__funcname='EquityCountryDomain',
)
def __init__(self,
country_code,
calendar_name,
data_query_offset=-np.timedelta64(45, 'm')):
self._country_code = country_code
self.calendar_name = calendar_name
self._data_query_offset = (
# add one minute because `open_time` is actually the open minute
# label which is one minute _after_ market open...
data_query_offset - np.timedelta64(1, 'm')
)
if data_query_offset >= datetime.timedelta(0):
raise ValueError(
'data must be ready before market open (offset must be < 0)',
)
@property
def country_code(self):
return self._country_code
@lazyval
def calendar(self):
return get_calendar(self.calendar_name)
def all_sessions(self):
return self.calendar.all_sessions
def data_query_cutoff_for_sessions(self, sessions):
opens = self.calendar.opens.loc[sessions].values
missing_mask = pd.isnull(opens)
if missing_mask.any():
missing_days = sessions[missing_mask]
raise ValueError(
'cannot resolve data query time for sessions that are not on'
' the %s calendar:\n%s' % (
self.calendar.name,
missing_days,
),
)
return pd.DatetimeIndex(opens + self._data_query_offset, tz='UTC')
def __repr__(self):
return "EquityCalendarDomain({!r}, {!r})".format(
self.country_code, self.calendar_name,
)
AR_EQUITIES = EquityCalendarDomain(CountryCode.ARGENTINA, 'XBUE')
AT_EQUITIES = EquityCalendarDomain(CountryCode.AUSTRIA, 'XWBO')
AU_EQUITIES = EquityCalendarDomain(CountryCode.AUSTRALIA, 'XASX')
BE_EQUITIES = EquityCalendarDomain(CountryCode.BELGIUM, 'XBRU')
BR_EQUITIES = EquityCalendarDomain(CountryCode.BRAZIL, 'BVMF')
CA_EQUITIES = EquityCalendarDomain(CountryCode.CANADA, 'XTSE')
CH_EQUITIES = EquityCalendarDomain(CountryCode.SWITZERLAND, 'XSWX')
CL_EQUITIES = EquityCalendarDomain(CountryCode.CHILE, 'XSGO')
CN_EQUITIES = EquityCalendarDomain(CountryCode.CHINA, 'XSHG')
CO_EQUITIES = EquityCalendarDomain(CountryCode.COLOMBIA, 'XBOG')
CZ_EQUITIES = EquityCalendarDomain(CountryCode.CZECH_REPUBLIC, 'XPRA')
DE_EQUITIES = EquityCalendarDomain(CountryCode.GERMANY, 'XFRA')
DK_EQUITIES = EquityCalendarDomain(CountryCode.DENMARK, 'XCSE')
ES_EQUITIES = EquityCalendarDomain(CountryCode.SPAIN, 'XMAD')
FI_EQUITIES = EquityCalendarDomain(CountryCode.FINLAND, 'XHEL')
FR_EQUITIES = EquityCalendarDomain(CountryCode.FRANCE, 'XPAR')
GB_EQUITIES = EquityCalendarDomain(CountryCode.UNITED_KINGDOM, 'XLON')
GR_EQUITIES = EquityCalendarDomain(CountryCode.GREECE, 'ASEX')
HK_EQUITIES = EquityCalendarDomain(CountryCode.HONG_KONG, 'XHKG')
HU_EQUITIES = EquityCalendarDomain(CountryCode.HUNGARY, 'XBUD')
ID_EQUITIES = EquityCalendarDomain(CountryCode.INDONESIA, 'XIDX')
IE_EQUITIES = EquityCalendarDomain(CountryCode.IRELAND, 'XDUB')
IN_EQUITIES = EquityCalendarDomain(CountryCode.INDIA, "XBOM")
IT_EQUITIES = EquityCalendarDomain(CountryCode.ITALY, 'XMIL')
JP_EQUITIES = EquityCalendarDomain(CountryCode.JAPAN, 'XTKS')
KR_EQUITIES = EquityCalendarDomain(CountryCode.SOUTH_KOREA, 'XKRX')
MX_EQUITIES = EquityCalendarDomain(CountryCode.MEXICO, 'XMEX')
MY_EQUITIES = EquityCalendarDomain(CountryCode.MALAYSIA, 'XKLS')
NL_EQUITIES = EquityCalendarDomain(CountryCode.NETHERLANDS, 'XAMS')
NO_EQUITIES = EquityCalendarDomain(CountryCode.NORWAY, 'XOSL')
NZ_EQUITIES = EquityCalendarDomain(CountryCode.NEW_ZEALAND, 'XNZE')
PE_EQUITIES = EquityCalendarDomain(CountryCode.PERU, 'XLIM')
PH_EQUITIES = EquityCalendarDomain(CountryCode.PHILIPPINES, 'XPHS')
PK_EQUITIES = EquityCalendarDomain(CountryCode.PAKISTAN, 'XKAR')
PL_EQUITIES = EquityCalendarDomain(CountryCode.POLAND, 'XWAR')
PT_EQUITIES = EquityCalendarDomain(CountryCode.PORTUGAL, 'XLIS')
RU_EQUITIES = EquityCalendarDomain(CountryCode.RUSSIA, 'XMOS')
SE_EQUITIES = EquityCalendarDomain(CountryCode.SWEDEN, 'XSTO')
SG_EQUITIES = EquityCalendarDomain(CountryCode.SINGAPORE, 'XSES')
TH_EQUITIES = EquityCalendarDomain(CountryCode.THAILAND, 'XBKK')
TR_EQUITIES = EquityCalendarDomain(CountryCode.TURKEY, 'XIST')
TW_EQUITIES = EquityCalendarDomain(CountryCode.TAIWAN, 'XTAI')
US_EQUITIES = EquityCalendarDomain(CountryCode.UNITED_STATES, 'XNYS')
ZA_EQUITIES = EquityCalendarDomain(CountryCode.SOUTH_AFRICA, 'XJSE')
BUILT_IN_DOMAINS = [
AR_EQUITIES,
AT_EQUITIES,
AU_EQUITIES,
BE_EQUITIES,
BR_EQUITIES,
CA_EQUITIES,
CH_EQUITIES,
CL_EQUITIES,
CN_EQUITIES,
CO_EQUITIES,
CZ_EQUITIES,
DE_EQUITIES,
DK_EQUITIES,
ES_EQUITIES,
FI_EQUITIES,
FR_EQUITIES,
GB_EQUITIES,
GR_EQUITIES,
HK_EQUITIES,
HU_EQUITIES,
ID_EQUITIES,
IE_EQUITIES,
IN_EQUITIES,
IT_EQUITIES,
JP_EQUITIES,
KR_EQUITIES,
MX_EQUITIES,
MY_EQUITIES,
NL_EQUITIES,
NO_EQUITIES,
NZ_EQUITIES,
PE_EQUITIES,
PH_EQUITIES,
PK_EQUITIES,
PL_EQUITIES,
PT_EQUITIES,
RU_EQUITIES,
SE_EQUITIES,
SG_EQUITIES,
TH_EQUITIES,
TR_EQUITIES,
TW_EQUITIES,
US_EQUITIES,
ZA_EQUITIES,
]
def infer_domain(terms):
"""
Infer the domain from a collection of terms.
The algorithm for inferring domains is as follows:
- If all input terms have a domain of GENERIC, the result is GENERIC.
- If there is exactly one non-generic domain in the input terms, the result
is that domain.
- Otherwise, an AmbiguousDomain error is raised.
Parameters
----------
terms : iterable[zipline.pipeline.Term]
Returns
-------
inferred : Domain or NotSpecified
Raises
------
AmbiguousDomain
Raised if more than one concrete domain is present in the input terms.
"""
domains = {t.domain for t in terms}
num_domains = len(domains)
if num_domains == 0:
return GENERIC
elif num_domains == 1:
return domains.pop()
elif num_domains == 2 and GENERIC in domains:
domains.remove(GENERIC)
return domains.pop()
else:
# Remove GENERIC if it's present before raising. Showing it to the user
# is confusing because it doesn't contribute to the error.
domains.discard(GENERIC)
raise AmbiguousDomain(sorted(domains, key=repr))
# This would be better if we provided more context for which domains came from
# which terms.
class AmbiguousDomain(Exception):
"""
Raised when we attempt to infer a domain from a collection of mixed terms.
"""
_TEMPLATE = dedent(
"""\
Found terms with conflicting domains:
{domains}"""
)
def __init__(self, domains):
self.domains = domains
def __str__(self):
return self._TEMPLATE.format(
domains=bulleted_list(self.domains, indent=2),
)
class EquitySessionDomain(Domain):
"""A domain built directly from an index of sessions.
Mostly useful for testing.
Parameters
----------
sessions : pd.DatetimeIndex
Sessions to use as output labels for pipelines run on this domain.
country_code : str
ISO 3166 country code of equities to be used with this domain.
data_query_time : datetime.time, optional
The time of day when data should no longer be considered available for
a session.
data_query_date_offset : int, optional
The number of days to add to the session label before applying the
``data_query_time``. This can be used to express that the cutoff time
for a session falls on a different calendar day from the session label.
"""
@expect_types(
sessions=pd.DatetimeIndex,
country_code=str,
data_query_time=optional(datetime.time),
data_query_date_offset=int,
__funcname='EquitySessionDomain',
)
def __init__(self,
sessions,
country_code,
data_query_time=None,
data_query_date_offset=0):
self._country_code = country_code
self._sessions = sessions
if data_query_time is None:
data_query_time = datetime.time(0, 0, tzinfo=pytz.timezone('UTC'))
if data_query_time.tzinfo is None:
raise ValueError("data_query_time cannot be tz-naive")
self._data_query_time = data_query_time
self._data_query_date_offset = data_query_date_offset
@property
def country_code(self):
return self._country_code
def all_sessions(self):
return self._sessions
def data_query_cutoff_for_sessions(self, sessions):
return days_at_time(
sessions,
self._data_query_time,
self._data_query_time.tzinfo,
self._data_query_date_offset,
)
| apache-2.0 |
fredhusser/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
Winand/pandas | pandas/core/sparse/series.py | 2 | 28410 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.common import is_scalar
from pandas.core.common import _values_from_object, _maybe_match_name
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, _ensure_index, InvalidIndexError
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.internals import SingleBlockManager
from pandas.core import generic
import pandas.core.common as com
import pandas.core.ops as ops
import pandas._libs.index as _index
from pandas.util._decorators import Appender
from pandas.core.sparse.array import (
make_sparse, _sparse_array_op, SparseArray,
_make_index)
from pandas._libs.sparse import BlockIndex, IntIndex
import pandas._libs.sparse as splib
from pandas.core.sparse.scipy_sparse import (
_sparse_series_to_coo,
_coo_to_sparse_series)
_shared_doc_kwargs = dict(axes='index', klass='SparseSeries',
axes_single_arg="{0, 'index'}")
# -----------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are
present for compatibility.
"""
def wrapper(self, other):
if isinstance(other, Series):
if not isinstance(other, SparseSeries):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_series_op(self, other, op, name)
elif isinstance(other, DataFrame):
return NotImplemented
elif is_scalar(other):
with np.errstate(all='ignore'):
new_values = op(self.values, other)
return self._constructor(new_values,
index=self.index,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with {other} not supported'
.format(other=type(other)))
wrapper.__name__ = name
if name.startswith("__"):
# strip special method names, e.g. `__add__` needs to be `add` when
# passed to _sparse_series_op
name = name[2:-2]
return wrapper
def _sparse_series_op(left, right, op, name):
left, right = left.align(right, join='outer', copy=False)
new_index = left.index
new_name = _maybe_match_name(left, right)
result = _sparse_array_op(left.values, right.values, op, name,
series=True)
return left._constructor(result, index=new_index, name=new_name)
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseSeries objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
_subtyp = 'sparse_series'
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, copy=False,
fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
if isinstance(data, SparseArray):
if index is not None:
assert (len(index) == len(data))
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
if fill_value is None:
fill_value = data.fill_value
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
data = Series(data, index=index)
index = data.index.view()
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
else:
assert (len(data) == sparse_index.npoints)
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isna(data) and isna(fill_value)):
if kind == 'block':
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == 'block':
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.length)
index = _ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype,
copy=copy)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
@property
def values(self):
""" return the array """
return self.block.values
def __array__(self, result=None):
""" the array interface, return my values """
return self.block.values
def get_values(self):
""" same as values """
return self.block.to_dense().view()
@property
def block(self):
return self._data._block
@property
def fill_value(self):
return self.block.fill_value
@fill_value.setter
def fill_value(self, v):
self.block.fill_value = v
@property
def sp_index(self):
return self.block.sp_index
@property
def sp_values(self):
return self.values.sp_values
@property
def npoints(self):
return self.sp_index.npoints
@classmethod
def from_array(cls, arr, index=None, name=None, copy=False,
fill_value=None, fastpath=False):
"""
Simplified alternate constructor
"""
return cls(arr, index=index, name=name, copy=copy,
fill_value=fill_value, fastpath=fastpath)
@property
def _constructor(self):
return SparseSeries
@property
def _constructor_expanddim(self):
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def as_sparse_array(self, kind=None, fill_value=None, copy=False):
""" return my self as a sparse array, do not copy by default """
if fill_value is None:
fill_value = self.fill_value
if kind is None:
kind = self.kind
return SparseArray(self.values, sparse_index=self.sp_index,
fill_value=fill_value, kind=kind, copy=copy)
def __len__(self):
return len(self.block)
@property
def shape(self):
return self._data.shape
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
series_rep = Series.__unicode__(self)
rep = '{series}\n{index!r}'.format(series=series_rep,
index=self.sp_index)
return rep
def __array_wrap__(self, result, context=None):
"""
Gets called prior to a ufunc (and after)
See SparseArray.__array_wrap__ for detail.
"""
if isinstance(context, tuple) and len(context) == 3:
ufunc, args, domain = context
args = [getattr(a, 'fill_value', a) for a in args]
with np.errstate(all='ignore'):
fill_value = ufunc(self.fill_value, *args[1:])
else:
fill_value = self.fill_value
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=fill_value,
copy=False).__finalize__(self)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.name = getattr(obj, 'name', None)
self.fill_value = getattr(obj, 'fill_value', None)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform a reduction operation """
return op(self.get_values(), skipna=skipna, **kwds)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
fill_value=self.fill_value, name=self.name)
def _unpickle_series_compat(self, state):
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
index, fill_value, sp_index = own_state[:3]
name = None
if len(own_state) > 3:
name = own_state[3]
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sp_index,
fill_value=fill_value, copy=False)
# recreate
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data)
self._set_axis(0, index)
self.name = name
def __iter__(self):
""" forward to the array """
return iter(self.values)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'sparse_time_series')
else:
object.__setattr__(self, '_subtyp', 'sparse_series')
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis, convert=True)
else:
return self._get_val_at(i)
def _get_val_at(self, loc):
""" forward to the array """
return self.block.values._get_val_at(loc)
def __getitem__(self, key):
try:
return self.index.get_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
elif key is Ellipsis:
return self
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item, must be array-like?
pass
key = _values_from_object(key)
if self.index.nlevels > 1 and isinstance(key, tuple):
# to handle MultiIndex labels
key = self.index.get_loc(key)
return self._constructor(self.values[key],
index=self.index[key]).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self[indexer]
def _set_with_engine(self, key, value):
return self.set_value(key, value)
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are all numeric
Returns
-------
abs: type of caller
"""
return self._constructor(np.abs(self.values),
index=self.index).__finalize__(self)
def get(self, label, default=None):
"""
Returns value occupying requested label, default to specified
missing value if not present. Analogous to dict.get
Parameters
----------
label : object
Label value looking for
default : object, optional
Value to return if label not in index
Returns
-------
y : scalar
"""
if label in self.index:
loc = self.index.get_loc(label)
return self._get_val_at(loc)
else:
return default
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
loc = label if takeable is True else self.index.get_loc(label)
return self._get_val_at(loc)
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Series
Returns
-------
series : SparseSeries
"""
values = self.to_dense()
# if the label doesn't exist, we will create a new object here
# and possibily change the index
new_values = values.set_value(label, value, takeable=takeable)
if new_values is not None:
values = new_values
new_index = values.index
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, new_index)
self._index = new_index
def _set_values(self, key, value):
# this might be inefficient as we have to recreate the sparse array
# rather than setting individual elements, but have to convert
# the passed slice/boolean that's in dense space into a sparse indexer
# not sure how to do that!
if isinstance(key, Series):
key = key.values
values = self.values.to_dense()
values[key] = _index.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
def to_dense(self, sparse_only=False):
"""
Convert SparseSeries to a Series.
Parameters
----------
sparse_only: bool, default False
DEPRECATED: this argument will be removed in a future version.
If True, return just the non-sparse values, or the dense version
of `self.values` if False.
Returns
-------
s : Series
"""
if sparse_only:
warnings.warn(("The 'sparse_only' parameter has been deprecated "
"and will be removed in a future version."),
FutureWarning, stacklevel=2)
int_index = self.sp_index.to_int_index()
index = self.index.take(int_index.indices)
return Series(self.sp_values, index=index, name=self.name)
else:
return Series(self.values.to_dense(), index=self.index,
name=self.name)
@property
def density(self):
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
new_data = self._data
if deep:
new_data = self._data.copy()
return self._constructor(new_data, sparse_index=self.sp_index,
fill_value=self.fill_value).__finalize__(self)
@Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, method=None, copy=True, limit=None,
**kwargs):
return super(SparseSeries, self).reindex(index=index, method=method,
copy=copy, limit=limit,
**kwargs)
def sparse_reindex(self, new_index):
"""
Conform sparse values to new SparseIndex
Parameters
----------
new_index : {BlockIndex, IntIndex}
Returns
-------
reindexed : SparseSeries
"""
if not isinstance(new_index, splib.SparseIndex):
raise TypeError('new index must be a SparseIndex')
block = self.block.sparse_reindex(new_index)
new_data = SingleBlockManager(block, self.index)
return self._constructor(new_data, index=self.index,
sparse_index=new_index,
fill_value=self.fill_value).__finalize__(self)
def take(self, indices, axis=0, convert=True, *args, **kwargs):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
convert = nv.validate_take_with_convert(convert, args, kwargs)
new_values = SparseArray.take(self.values, indices)
new_index = self.index.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of non-NA/null values.
When performing the cumulative summation, any non-NA/null values will
be skipped. The resulting SparseSeries will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : {0}
Returns
-------
cumsum : SparseSeries
"""
nv.validate_cumsum(args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
new_array = self.values.cumsum()
return self._constructor(
new_array, index=self.index,
sparse_index=new_array.sp_index).__finalize__(self)
@Appender(generic._shared_docs['isna'])
def isna(self):
arr = SparseArray(isna(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=isna(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
isnull = isna
@Appender(generic._shared_docs['notna'])
def notna(self):
arr = SparseArray(notna(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=notna(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
notnull = notna
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Analogous to Series.dropna. If fill_value=NaN, returns a dense Series
"""
# TODO: make more efficient
axis = self._get_axis_number(axis or 0)
dense_valid = self.to_dense().valid()
if inplace:
raise NotImplementedError("Cannot perform inplace dropna"
" operations on a SparseSeries")
if isna(self.fill_value):
return dense_valid
else:
dense_valid = dense_valid[dense_valid != self.fill_value]
return dense_valid.to_sparse(fill_value=self.fill_value)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods, freq=None, axis=0):
if periods == 0:
return self.copy()
# no special handling of fill values yet
if not isna(self.fill_value):
shifted = self.to_dense().shift(periods, freq=freq,
axis=axis)
return shifted.to_sparse(fill_value=self.fill_value,
kind=self.kind)
if freq is not None:
return self._constructor(
self.sp_values, sparse_index=self.sp_index,
index=self.index.shift(periods, freq),
fill_value=self.fill_value).__finalize__(self)
int_index = self.sp_index.to_int_index()
new_indices = int_index.indices + periods
start, end = new_indices.searchsorted([0, int_index.length])
new_indices = new_indices[start:end]
new_sp_index = _make_index(len(self), new_indices, self.sp_index)
arr = self.values._simple_new(self.sp_values[start:end].copy(),
new_sp_index, fill_value=np.nan)
return self._constructor(arr, index=self.index).__finalize__(self)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
if isinstance(other, SparseSeries):
other = other.to_dense()
dense_combined = self.to_dense().combine_first(other)
return dense_combined.to_sparse(fill_value=self.fill_value)
def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a SparseSeries with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> from numpy import nan
>>> s = Series([3.0, nan, 1.0, 3.0, nan, nan])
>>> s.index = MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
>>> ss = s.to_sparse()
>>> A, rows, columns = ss.to_coo(row_levels=['A', 'B'],
column_levels=['C', 'D'],
sort_labels=True)
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 3.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
A, rows, columns = _sparse_series_to_coo(self, row_levels,
column_levels,
sort_labels=sort_labels)
return A, rows, columns
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a SparseSeries from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : SparseSeries
Examples
---------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
shape=(3, 4))
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 2.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> ss = SparseSeries.from_coo(A)
>>> ss
0 2 1
3 2
1 0 3
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
"""
return _coo_to_sparse_series(A, dense_index=dense_index)
# overwrite series methods with unaccelerated versions
ops.add_special_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_special_funcs)
ops.add_flex_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_flex_funcs)
# overwrite basic arithmetic to use SparseSeries version
# force methods to overwrite previous definitions.
ops.add_special_arithmetic_methods(SparseSeries, _arith_method,
comp_method=_arith_method,
bool_method=None, use_numexpr=False,
force=True)
| bsd-3-clause |
omni5cience/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/IPython/config/loader.py | 7 | 28718 | """A simple configuration system.
Inheritance diagram:
.. inheritance-diagram:: IPython.config.loader
:parts: 3
Authors
-------
* Brian Granger
* Fernando Perez
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import argparse
import copy
import logging
import os
import re
import sys
import json
from IPython.utils.path import filefind, get_ipython_dir
from IPython.utils import py3compat
from IPython.utils.encoding import DEFAULT_ENCODING
from IPython.utils.py3compat import unicode_type, iteritems
from IPython.utils.traitlets import HasTraits, List, Any, TraitError
#-----------------------------------------------------------------------------
# Exceptions
#-----------------------------------------------------------------------------
class ConfigError(Exception):
pass
class ConfigLoaderError(ConfigError):
pass
class ConfigFileNotFound(ConfigError):
pass
class ArgumentError(ConfigLoaderError):
pass
#-----------------------------------------------------------------------------
# Argparse fix
#-----------------------------------------------------------------------------
# Unfortunately argparse by default prints help messages to stderr instead of
# stdout. This makes it annoying to capture long help screens at the command
# line, since one must know how to pipe stderr, which many users don't know how
# to do. So we override the print_help method with one that defaults to
# stdout and use our class instead.
class ArgumentParser(argparse.ArgumentParser):
"""Simple argparse subclass that prints help to stdout by default."""
def print_help(self, file=None):
if file is None:
file = sys.stdout
return super(ArgumentParser, self).print_help(file)
print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
#-----------------------------------------------------------------------------
# Config class for holding config information
#-----------------------------------------------------------------------------
class LazyConfigValue(HasTraits):
"""Proxy object for exposing methods on configurable containers
Exposes:
- append, extend, insert on lists
- update on dicts
- update, add on sets
"""
_value = None
# list methods
_extend = List()
_prepend = List()
def append(self, obj):
self._extend.append(obj)
def extend(self, other):
self._extend.extend(other)
def prepend(self, other):
"""like list.extend, but for the front"""
self._prepend[:0] = other
_inserts = List()
def insert(self, index, other):
if not isinstance(index, int):
raise TypeError("An integer is required")
self._inserts.append((index, other))
# dict methods
# update is used for both dict and set
_update = Any()
def update(self, other):
if self._update is None:
if isinstance(other, dict):
self._update = {}
else:
self._update = set()
self._update.update(other)
# set methods
def add(self, obj):
self.update({obj})
def get_value(self, initial):
"""construct the value from the initial one
after applying any insert / extend / update changes
"""
if self._value is not None:
return self._value
value = copy.deepcopy(initial)
if isinstance(value, list):
for idx, obj in self._inserts:
value.insert(idx, obj)
value[:0] = self._prepend
value.extend(self._extend)
elif isinstance(value, dict):
if self._update:
value.update(self._update)
elif isinstance(value, set):
if self._update:
value.update(self._update)
self._value = value
return value
def to_dict(self):
"""return JSONable dict form of my data
Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
"""
d = {}
if self._update:
d['update'] = self._update
if self._extend:
d['extend'] = self._extend
if self._prepend:
d['prepend'] = self._prepend
elif self._inserts:
d['inserts'] = self._inserts
return d
def _is_section_key(key):
"""Is a Config key a section name (does it start with a capital)?"""
if key and key[0].upper()==key[0] and not key.startswith('_'):
return True
else:
return False
class Config(dict):
"""An attribute based dict that can do smart merges."""
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self._ensure_subconfig()
def _ensure_subconfig(self):
"""ensure that sub-dicts that should be Config objects are
casts dicts that are under section keys to Config objects,
which is necessary for constructing Config objects from dict literals.
"""
for key in self:
obj = self[key]
if _is_section_key(key) \
and isinstance(obj, dict) \
and not isinstance(obj, Config):
setattr(self, key, Config(obj))
def _merge(self, other):
"""deprecated alias, use Config.merge()"""
self.merge(other)
def merge(self, other):
"""merge another config object into this one"""
to_update = {}
for k, v in iteritems(other):
if k not in self:
to_update[k] = copy.deepcopy(v)
else: # I have this key
if isinstance(v, Config) and isinstance(self[k], Config):
# Recursively merge common sub Configs
self[k].merge(v)
else:
# Plain updates for non-Configs
to_update[k] = copy.deepcopy(v)
self.update(to_update)
def __contains__(self, key):
# allow nested contains of the form `"Section.key" in config`
if '.' in key:
first, remainder = key.split('.', 1)
if first not in self:
return False
return remainder in self[first]
return super(Config, self).__contains__(key)
# .has_key is deprecated for dictionaries.
has_key = __contains__
def _has_section(self, key):
return _is_section_key(key) and key in self
def copy(self):
return type(self)(dict.copy(self))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
import copy
return type(self)(copy.deepcopy(list(self.items())))
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
if _is_section_key(key):
c = Config()
dict.__setitem__(self, key, c)
return c
elif not key.startswith('_'):
# undefined, create lazy value, used for container methods
v = LazyConfigValue()
dict.__setitem__(self, key, v)
return v
else:
raise KeyError
def __setitem__(self, key, value):
if _is_section_key(key):
if not isinstance(value, Config):
raise ValueError('values whose keys begin with an uppercase '
'char must be Config instances: %r, %r' % (key, value))
dict.__setitem__(self, key, value)
def __getattr__(self, key):
if key.startswith('__'):
return dict.__getattr__(self, key)
try:
return self.__getitem__(key)
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, key, value):
if key.startswith('__'):
return dict.__setattr__(self, key, value)
try:
self.__setitem__(key, value)
except KeyError as e:
raise AttributeError(e)
def __delattr__(self, key):
if key.startswith('__'):
return dict.__delattr__(self, key)
try:
dict.__delitem__(self, key)
except KeyError as e:
raise AttributeError(e)
#-----------------------------------------------------------------------------
# Config loading classes
#-----------------------------------------------------------------------------
class ConfigLoader(object):
"""A object for loading configurations from just about anywhere.
The resulting configuration is packaged as a :class:`Config`.
Notes
-----
A :class:`ConfigLoader` does one thing: load a config from a source
(file, command line arguments) and returns the data as a :class:`Config` object.
There are lots of things that :class:`ConfigLoader` does not do. It does
not implement complex logic for finding config files. It does not handle
default values or merge multiple configs. These things need to be
handled elsewhere.
"""
def _log_default(self):
from IPython.config.application import Application
if Application.initialized():
return Application.instance().log
else:
return logging.getLogger()
def __init__(self, log=None):
"""A base class for config loaders.
log : instance of :class:`logging.Logger` to use.
By default loger of :meth:`IPython.config.application.Application.instance()`
will be used
Examples
--------
>>> cl = ConfigLoader()
>>> config = cl.load_config()
>>> config
{}
"""
self.clear()
if log is None:
self.log = self._log_default()
self.log.debug('Using default logger')
else:
self.log = log
def clear(self):
self.config = Config()
def load_config(self):
"""Load a config from somewhere, return a :class:`Config` instance.
Usually, this will cause self.config to be set and then returned.
However, in most cases, :meth:`ConfigLoader.clear` should be called
to erase any previous state.
"""
self.clear()
return self.config
class FileConfigLoader(ConfigLoader):
"""A base class for file based configurations.
As we add more file based config loaders, the common logic should go
here.
"""
def __init__(self, filename, path=None, **kw):
"""Build a config loader for a filename and path.
Parameters
----------
filename : str
The file name of the config file.
path : str, list, tuple
The path to search for the config file on, or a sequence of
paths to try in order.
"""
super(FileConfigLoader, self).__init__(**kw)
self.filename = filename
self.path = path
self.full_filename = ''
def _find_file(self):
"""Try to find the file by searching the paths."""
self.full_filename = filefind(self.filename, self.path)
class JSONFileConfigLoader(FileConfigLoader):
"""A Json file loader for config"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
dct = self._read_file_as_dict()
self.config = self._convert_to_config(dct)
return self.config
def _read_file_as_dict(self):
with open(self.full_filename) as f:
return json.load(f)
def _convert_to_config(self, dictionary):
if 'version' in dictionary:
version = dictionary.pop('version')
else:
version = 1
self.log.warn("Unrecognized JSON config file version, assuming version {}".format(version))
if version == 1:
return Config(dictionary)
else:
raise ValueError('Unknown version of JSON config file: {version}'.format(version=version))
class PyFileConfigLoader(FileConfigLoader):
"""A config loader for pure python files.
This is responsible for locating a Python config file by filename and
path, then executing it to construct a Config object.
"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
self._read_file_as_dict()
return self.config
def _read_file_as_dict(self):
"""Load the config file into self.config, with recursive loading."""
# This closure is made available in the namespace that is used
# to exec the config file. It allows users to call
# load_subconfig('myconfig.py') to load config files recursively.
# It needs to be a closure because it has references to self.path
# and self.config. The sub-config is loaded with the same path
# as the parent, but it uses an empty config which is then merged
# with the parents.
# If a profile is specified, the config file will be loaded
# from that profile
def load_subconfig(fname, profile=None):
# import here to prevent circular imports
from IPython.core.profiledir import ProfileDir, ProfileDirError
if profile is not None:
try:
profile_dir = ProfileDir.find_profile_dir_by_name(
get_ipython_dir(),
profile,
)
except ProfileDirError:
return
path = profile_dir.location
else:
path = self.path
loader = PyFileConfigLoader(fname, path)
try:
sub_config = loader.load_config()
except ConfigFileNotFound:
# Pass silently if the sub config is not there. This happens
# when a user s using a profile, but not the default config.
pass
else:
self.config.merge(sub_config)
# Again, this needs to be a closure and should be used in config
# files to get the config being loaded.
def get_config():
return self.config
namespace = dict(
load_subconfig=load_subconfig,
get_config=get_config,
__file__=self.full_filename,
)
fs_encoding = sys.getfilesystemencoding() or 'ascii'
conf_filename = self.full_filename.encode(fs_encoding)
py3compat.execfile(conf_filename, namespace)
class CommandLineConfigLoader(ConfigLoader):
"""A config loader for command line arguments.
As we add more command line based loaders, the common logic should go
here.
"""
def _exec_config_str(self, lhs, rhs):
"""execute self.config.<lhs> = <rhs>
* expands ~ with expanduser
* tries to assign with raw eval, otherwise assigns with just the string,
allowing `--C.a=foobar` and `--C.a="foobar"` to be equivalent. *Not*
equivalent are `--C.a=4` and `--C.a='4'`.
"""
rhs = os.path.expanduser(rhs)
try:
# Try to see if regular Python syntax will work. This
# won't handle strings as the quote marks are removed
# by the system shell.
value = eval(rhs)
except (NameError, SyntaxError):
# This case happens if the rhs is a string.
value = rhs
exec(u'self.config.%s = value' % lhs)
def _load_flag(self, cfg):
"""update self.config from a flag, which can be a dict or Config"""
if isinstance(cfg, (dict, Config)):
# don't clobber whole config sections, update
# each section from config:
for sec,c in iteritems(cfg):
self.config[sec].update(c)
else:
raise TypeError("Invalid flag: %r" % cfg)
# raw --identifier=value pattern
# but *also* accept '-' as wordsep, for aliases
# accepts: --foo=a
# --Class.trait=value
# --alias-name=value
# rejects: -foo=value
# --foo
# --Class.trait
kv_pattern = re.compile(r'\-\-[A-Za-z][\w\-]*(\.[\w\-]+)*\=.*')
# just flags, no assignments, with two *or one* leading '-'
# accepts: --foo
# -foo-bar-again
# rejects: --anything=anything
# --two.word
flag_pattern = re.compile(r'\-\-?\w+[\-\w]*$')
class KeyValueConfigLoader(CommandLineConfigLoader):
"""A config loader that loads key value pairs from the command line.
This allows command line options to be gives in the following form::
ipython --profile="foo" --InteractiveShell.autocall=False
"""
def __init__(self, argv=None, aliases=None, flags=None, **kw):
"""Create a key value pair config loader.
Parameters
----------
argv : list
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then sys.argv[1:] will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Vaues can be Config objects,
dicts, or "key=value" strings. If Config or dict, when the flag
is triggered, The flag is loaded as `self.config.update(m)`.
Returns
-------
config : Config
The resulting Config object.
Examples
--------
>>> from IPython.config.loader import KeyValueConfigLoader
>>> cl = KeyValueConfigLoader()
>>> d = cl.load_config(["--A.name='brian'","--B.number=0"])
>>> sorted(d.items())
[('A', {'name': 'brian'}), ('B', {'number': 0})]
"""
super(KeyValueConfigLoader, self).__init__(**kw)
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
def clear(self):
super(KeyValueConfigLoader, self).clear()
self.extra_args = []
def _decode_argv(self, argv, enc=None):
"""decode argv if bytes, using stin.encoding, falling back on default enc"""
uargv = []
if enc is None:
enc = DEFAULT_ENCODING
for arg in argv:
if not isinstance(arg, unicode_type):
# only decode if not already decoded
arg = arg.decode(enc)
uargv.append(arg)
return uargv
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse the configuration and generate the Config object.
After loading, any arguments that are not key-value or
flags will be stored in self.extra_args - a list of
unparsed command-line arguments. This is used for
arguments such as input files or subcommands.
Parameters
----------
argv : list, optional
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then self.argv will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Values can be Config objects
or dicts. When the flag is triggered, The config is loaded as
`self.config.update(cfg)`.
"""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
# ensure argv is a list of unicode strings:
uargv = self._decode_argv(argv)
for idx,raw in enumerate(uargv):
# strip leading '-'
item = raw.lstrip('-')
if raw == '--':
# don't parse arguments after '--'
# this is useful for relaying arguments to scripts, e.g.
# ipython -i foo.py --matplotlib=qt -- args after '--' go-to-foo.py
self.extra_args.extend(uargv[idx+1:])
break
if kv_pattern.match(raw):
lhs,rhs = item.split('=',1)
# Substitute longnames for aliases.
if lhs in aliases:
lhs = aliases[lhs]
if '.' not in lhs:
# probably a mistyped alias, but not technically illegal
self.log.warn("Unrecognized alias: '%s', it will probably have no effect.", raw)
try:
self._exec_config_str(lhs, rhs)
except Exception:
raise ArgumentError("Invalid argument: '%s'" % raw)
elif flag_pattern.match(raw):
if item in flags:
cfg,help = flags[item]
self._load_flag(cfg)
else:
raise ArgumentError("Unrecognized flag: '%s'"%raw)
elif raw.startswith('-'):
kv = '--'+item
if kv_pattern.match(kv):
raise ArgumentError("Invalid argument: '%s', did you mean '%s'?"%(raw, kv))
else:
raise ArgumentError("Invalid argument: '%s'"%raw)
else:
# keep all args that aren't valid in a list,
# in case our parent knows what to do with them.
self.extra_args.append(item)
return self.config
class ArgParseConfigLoader(CommandLineConfigLoader):
"""A loader that uses the argparse module to load from the command line."""
def __init__(self, argv=None, aliases=None, flags=None, log=None, *parser_args, **parser_kw):
"""Create a config loader for use with argparse.
Parameters
----------
argv : optional, list
If given, used to read command-line arguments from, otherwise
sys.argv[1:] is used.
parser_args : tuple
A tuple of positional arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
parser_kw : dict
A tuple of keyword arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
Returns
-------
config : Config
The resulting Config object.
"""
super(CommandLineConfigLoader, self).__init__(log=log)
self.clear()
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
self.parser_args = parser_args
self.version = parser_kw.pop("version", None)
kwargs = dict(argument_default=argparse.SUPPRESS)
kwargs.update(parser_kw)
self.parser_kw = kwargs
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse command line arguments and return as a Config object.
Parameters
----------
args : optional, list
If given, a list with the structure of sys.argv[1:] to parse
arguments from. If not given, the instance's self.argv attribute
(given at construction time) is used."""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
self._create_parser(aliases, flags)
self._parse_args(argv)
self._convert_to_config()
return self.config
def get_extra_args(self):
if hasattr(self, 'extra_args'):
return self.extra_args
else:
return []
def _create_parser(self, aliases=None, flags=None):
self.parser = ArgumentParser(*self.parser_args, **self.parser_kw)
self._add_arguments(aliases, flags)
def _add_arguments(self, aliases=None, flags=None):
raise NotImplementedError("subclasses must implement _add_arguments")
def _parse_args(self, args):
"""self.parser->self.parsed_data"""
# decode sys.argv to support unicode command-line options
enc = DEFAULT_ENCODING
uargs = [py3compat.cast_unicode(a, enc) for a in args]
self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs)
def _convert_to_config(self):
"""self.parsed_data->self.config"""
for k, v in iteritems(vars(self.parsed_data)):
exec("self.config.%s = v"%k, locals(), globals())
class KVArgParseConfigLoader(ArgParseConfigLoader):
"""A config loader that loads aliases and flags with argparse,
but will use KVLoader for the rest. This allows better parsing
of common args, such as `ipython -c 'print 5'`, but still gets
arbitrary config with `ipython --InteractiveShell.use_readline=False`"""
def _add_arguments(self, aliases=None, flags=None):
self.alias_flags = {}
# print aliases, flags
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
paa = self.parser.add_argument
for key,value in iteritems(aliases):
if key in flags:
# flags
nargs = '?'
else:
nargs = None
if len(key) is 1:
paa('-'+key, '--'+key, type=unicode_type, dest=value, nargs=nargs)
else:
paa('--'+key, type=unicode_type, dest=value, nargs=nargs)
for key, (value, help) in iteritems(flags):
if key in self.aliases:
#
self.alias_flags[self.aliases[key]] = value
continue
if len(key) is 1:
paa('-'+key, '--'+key, action='append_const', dest='_flags', const=value)
else:
paa('--'+key, action='append_const', dest='_flags', const=value)
def _convert_to_config(self):
"""self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
# remove subconfigs list from namespace before transforming the Namespace
if '_flags' in self.parsed_data:
subcs = self.parsed_data._flags
del self.parsed_data._flags
else:
subcs = []
for k, v in iteritems(vars(self.parsed_data)):
if v is None:
# it was a flag that shares the name of an alias
subcs.append(self.alias_flags[k])
else:
# eval the KV assignment
self._exec_config_str(k, v)
for subc in subcs:
self._load_flag(subc)
if self.extra_args:
sub_parser = KeyValueConfigLoader(log=self.log)
sub_parser.load_config(self.extra_args)
self.config.merge(sub_parser.config)
self.extra_args = sub_parser.extra_args
def load_pyconfig_files(config_files, path):
"""Load multiple Python config files, merging each of them in turn.
Parameters
==========
config_files : list of str
List of config files names to load and merge into the config.
path : unicode
The full path to the location of the config files.
"""
config = Config()
for cf in config_files:
loader = PyFileConfigLoader(cf, path=path)
try:
next_config = loader.load_config()
except ConfigFileNotFound:
pass
except:
raise
else:
config.merge(next_config)
return config
| mit |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/_sunburst.py | 1 | 79713 | from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Sunburst(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "sunburst"
_valid_props = {
"branchvalues",
"count",
"customdata",
"customdatasrc",
"domain",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"insidetextfont",
"insidetextorientation",
"labels",
"labelssrc",
"leaf",
"legendgrouptitle",
"legendrank",
"level",
"marker",
"maxdepth",
"meta",
"metasrc",
"name",
"opacity",
"outsidetextfont",
"parents",
"parentssrc",
"root",
"rotation",
"sort",
"stream",
"text",
"textfont",
"textinfo",
"textsrc",
"texttemplate",
"texttemplatesrc",
"type",
"uid",
"uirevision",
"values",
"valuessrc",
"visible",
}
# branchvalues
# ------------
@property
def branchvalues(self):
"""
Determines how the items in `values` are summed. When set to
"total", items in `values` are taken to be value of all its
descendants. When set to "remainder", items in `values`
corresponding to the root and the branches sectors are taken to
be the extra part not part of the sum of the values at their
leaves.
The 'branchvalues' property is an enumeration that may be specified as:
- One of the following enumeration values:
['remainder', 'total']
Returns
-------
Any
"""
return self["branchvalues"]
@branchvalues.setter
def branchvalues(self, val):
self["branchvalues"] = val
# count
# -----
@property
def count(self):
"""
Determines default for `values` when it is not provided, by
inferring a 1 for each of the "leaves" and/or "branches",
otherwise 0.
The 'count' property is a flaglist and may be specified
as a string containing:
- Any combination of ['branches', 'leaves'] joined with '+' characters
(e.g. 'branches+leaves')
Returns
-------
Any
"""
return self["count"]
@count.setter
def count(self, val):
self["count"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# domain
# ------
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Supported dict properties:
column
If there is a layout grid, use the domain for
this column in the grid for this sunburst trace
.
row
If there is a layout grid, use the domain for
this row in the grid for this sunburst trace .
x
Sets the horizontal domain of this sunburst
trace (in plot fraction).
y
Sets the vertical domain of this sunburst trace
(in plot fraction).
Returns
-------
plotly.graph_objs.sunburst.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['label', 'text', 'value', 'name', 'current path', 'percent root', 'percent entry', 'percent parent'] joined with '+' characters
(e.g. 'label+text')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.sunburst.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
The variables available in `hovertemplate` are the ones emitted
as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. variables
`currentPath`, `root`, `entry`, `percentRoot`, `percentEntry`
and `percentParent`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each sector. If a
single string, the same string appears for all data points. If
an array of string, the items are mapped in order of this
trace's sectors. To be seen, trace `hoverinfo` must contain a
"text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# insidetextfont
# --------------
@property
def insidetextfont(self):
"""
Sets the font used for `textinfo` lying inside the sector.
The 'insidetextfont' property is an instance of Insidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.Insidetextfont`
- A dict of string/value properties that will be passed
to the Insidetextfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.sunburst.Insidetextfont
"""
return self["insidetextfont"]
@insidetextfont.setter
def insidetextfont(self, val):
self["insidetextfont"] = val
# insidetextorientation
# ---------------------
@property
def insidetextorientation(self):
"""
Controls the orientation of the text inside chart sectors. When
set to "auto", text may be oriented in any direction in order
to be as big as possible in the middle of a sector. The
"horizontal" option orients text to be parallel with the bottom
of the chart, and may make text smaller in order to achieve
that goal. The "radial" option orients text along the radius of
the sector. The "tangential" option orients text perpendicular
to the radius of the sector.
The 'insidetextorientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['horizontal', 'radial', 'tangential', 'auto']
Returns
-------
Any
"""
return self["insidetextorientation"]
@insidetextorientation.setter
def insidetextorientation(self, val):
self["insidetextorientation"] = val
# labels
# ------
@property
def labels(self):
"""
Sets the labels of each of the sectors.
The 'labels' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["labels"]
@labels.setter
def labels(self, val):
self["labels"] = val
# labelssrc
# ---------
@property
def labelssrc(self):
"""
Sets the source reference on Chart Studio Cloud for labels .
The 'labelssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["labelssrc"]
@labelssrc.setter
def labelssrc(self, val):
self["labelssrc"] = val
# leaf
# ----
@property
def leaf(self):
"""
The 'leaf' property is an instance of Leaf
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.Leaf`
- A dict of string/value properties that will be passed
to the Leaf constructor
Supported dict properties:
opacity
Sets the opacity of the leaves. With colorscale
it is defaulted to 1; otherwise it is defaulted
to 0.7
Returns
-------
plotly.graph_objs.sunburst.Leaf
"""
return self["leaf"]
@leaf.setter
def leaf(self, val):
self["leaf"] = val
# legendgrouptitle
# ----------------
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Supported dict properties:
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
plotly.graph_objs.sunburst.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
# legendrank
# ----------
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
`*reversed* `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
# level
# -----
@property
def level(self):
"""
Sets the level from which this trace hierarchy is rendered. Set
`level` to `''` to start from the root node in the hierarchy.
Must be an "id" if `ids` is filled in, otherwise plotly
attempts to find a matching item in `labels`.
The 'level' property accepts values of any type
Returns
-------
Any
"""
return self["level"]
@level.setter
def level(self, val):
self["level"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if colorsis set to a numerical
array. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette
will be chosen according to whether numbers in
the `color` array are all positive, all
negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
colors) or the bounds set in `marker.cmin` and
`marker.cmax` Has an effect only if colorsis
set to a numerical array. Defaults to `false`
when `marker.cmin` and `marker.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if colorsis set to a numerical
array. Value should have the same units as
colors and if set, `marker.cmin` must be set as
well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if colorsis set to a numerical array.
Value should have the same units as colors. Has
no effect when `marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if colorsis set to a numerical
array. Value should have the same units as
colors and if set, `marker.cmax` must be set as
well.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.sunburst.marker.Co
lorBar` instance or dict with compatible
properties
colors
Sets the color of each sector of this trace. If
not specified, the default trace color set is
used to pick the sector colors.
colorscale
Sets the colorscale. Has an effect only if
colorsis set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorssrc
Sets the source reference on Chart Studio Cloud
for colors .
line
:class:`plotly.graph_objects.sunburst.marker.Li
ne` instance or dict with compatible properties
reversescale
Reverses the color mapping if true. Has an
effect only if colorsis set to a numerical
array. If true, `marker.cmin` will correspond
to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
colorsis set to a numerical array.
Returns
-------
plotly.graph_objs.sunburst.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# maxdepth
# --------
@property
def maxdepth(self):
"""
Sets the number of rendered sectors from any given `level`. Set
`maxdepth` to "-1" to render all the levels in the hierarchy.
The 'maxdepth' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["maxdepth"]
@maxdepth.setter
def maxdepth(self, val):
self["maxdepth"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# outsidetextfont
# ---------------
@property
def outsidetextfont(self):
"""
Sets the font used for `textinfo` lying outside the sector.
This option refers to the root of the hierarchy presented at
the center of a sunburst graph. Please note that if a hierarchy
has multiple root nodes, this option won't have any effect and
`insidetextfont` would be used.
The 'outsidetextfont' property is an instance of Outsidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.Outsidetextfont`
- A dict of string/value properties that will be passed
to the Outsidetextfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.sunburst.Outsidetextfont
"""
return self["outsidetextfont"]
@outsidetextfont.setter
def outsidetextfont(self, val):
self["outsidetextfont"] = val
# parents
# -------
@property
def parents(self):
"""
Sets the parent sectors for each of the sectors. Empty string
items '' are understood to reference the root node in the
hierarchy. If `ids` is filled, `parents` items are understood
to be "ids" themselves. When `ids` is not set, plotly attempts
to find matching items in `labels`, but beware they must be
unique.
The 'parents' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["parents"]
@parents.setter
def parents(self, val):
self["parents"] = val
# parentssrc
# ----------
@property
def parentssrc(self):
"""
Sets the source reference on Chart Studio Cloud for parents .
The 'parentssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["parentssrc"]
@parentssrc.setter
def parentssrc(self, val):
self["parentssrc"] = val
# root
# ----
@property
def root(self):
"""
The 'root' property is an instance of Root
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.Root`
- A dict of string/value properties that will be passed
to the Root constructor
Supported dict properties:
color
sets the color of the root node for a
sunburst/treemap/icicle trace. this has no
effect when a colorscale is used to set the
markers.
Returns
-------
plotly.graph_objs.sunburst.Root
"""
return self["root"]
@root.setter
def root(self, val):
self["root"] = val
# rotation
# --------
@property
def rotation(self):
"""
Rotates the whole diagram counterclockwise by some angle. By
default the first slice starts at 3 o'clock.
The 'rotation' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["rotation"]
@rotation.setter
def rotation(self, val):
self["rotation"] = val
# sort
# ----
@property
def sort(self):
"""
Determines whether or not the sectors are reordered from
largest to smallest.
The 'sort' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["sort"]
@sort.setter
def sort(self, val):
self["sort"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.sunburst.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each sector. If trace
`textinfo` contains a "text" flag, these elements will be seen
on the chart. If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in the
hover labels.
The 'text' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the font used for `textinfo`.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.sunburst.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# textinfo
# --------
@property
def textinfo(self):
"""
Determines which trace information appear on the graph.
The 'textinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['label', 'text', 'value', 'current path', 'percent root', 'percent entry', 'percent parent'] joined with '+' characters
(e.g. 'label+text')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["textinfo"]
@textinfo.setter
def textinfo(self, val):
self["textinfo"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# texttemplate
# ------------
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appear on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
Every attributes that can be specified per-point (the ones that
are `arrayOk: true`) are available. variables `currentPath`,
`root`, `entry`, `percentRoot`, `percentEntry`,
`percentParent`, `label` and `value`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
# texttemplatesrc
# ---------------
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
texttemplate .
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# values
# ------
@property
def values(self):
"""
Sets the values associated with each of the sectors. Use with
`branchvalues` to determine how the values are summed.
The 'values' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["values"]
@values.setter
def values(self, val):
self["values"] = val
# valuessrc
# ---------
@property
def valuessrc(self):
"""
Sets the source reference on Chart Studio Cloud for values .
The 'valuessrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["valuessrc"]
@valuessrc.setter
def valuessrc(self, val):
self["valuessrc"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
branchvalues
Determines how the items in `values` are summed. When
set to "total", items in `values` are taken to be value
of all its descendants. When set to "remainder", items
in `values` corresponding to the root and the branches
sectors are taken to be the extra part not part of the
sum of the values at their leaves.
count
Determines default for `values` when it is not
provided, by inferring a 1 for each of the "leaves"
and/or "branches", otherwise 0.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
domain
:class:`plotly.graph_objects.sunburst.Domain` instance
or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.sunburst.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `currentPath`, `root`,
`entry`, `percentRoot`, `percentEntry` and
`percentParent`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
insidetextorientation
Controls the orientation of the text inside chart
sectors. When set to "auto", text may be oriented in
any direction in order to be as big as possible in the
middle of a sector. The "horizontal" option orients
text to be parallel with the bottom of the chart, and
may make text smaller in order to achieve that goal.
The "radial" option orients text along the radius of
the sector. The "tangential" option orients text
perpendicular to the radius of the sector.
labels
Sets the labels of each of the sectors.
labelssrc
Sets the source reference on Chart Studio Cloud for
labels .
leaf
:class:`plotly.graph_objects.sunburst.Leaf` instance or
dict with compatible properties
legendgrouptitle
:class:`plotly.graph_objects.sunburst.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
level
Sets the level from which this trace hierarchy is
rendered. Set `level` to `''` to start from the root
node in the hierarchy. Must be an "id" if `ids` is
filled in, otherwise plotly attempts to find a matching
item in `labels`.
marker
:class:`plotly.graph_objects.sunburst.Marker` instance
or dict with compatible properties
maxdepth
Sets the number of rendered sectors from any given
`level`. Set `maxdepth` to "-1" to render all the
levels in the hierarchy.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
outsidetextfont
Sets the font used for `textinfo` lying outside the
sector. This option refers to the root of the hierarchy
presented at the center of a sunburst graph. Please
note that if a hierarchy has multiple root nodes, this
option won't have any effect and `insidetextfont` would
be used.
parents
Sets the parent sectors for each of the sectors. Empty
string items '' are understood to reference the root
node in the hierarchy. If `ids` is filled, `parents`
items are understood to be "ids" themselves. When `ids`
is not set, plotly attempts to find matching items in
`labels`, but beware they must be unique.
parentssrc
Sets the source reference on Chart Studio Cloud for
parents .
root
:class:`plotly.graph_objects.sunburst.Root` instance or
dict with compatible properties
rotation
Rotates the whole diagram counterclockwise by some
angle. By default the first slice starts at 3 o'clock.
sort
Determines whether or not the sectors are reordered
from largest to smallest.
stream
:class:`plotly.graph_objects.sunburst.Stream` instance
or dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables
`currentPath`, `root`, `entry`, `percentRoot`,
`percentEntry`, `percentParent`, `label` and `value`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values associated with each of the sectors.
Use with `branchvalues` to determine how the values are
summed.
valuessrc
Sets the source reference on Chart Studio Cloud for
values .
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
branchvalues=None,
count=None,
customdata=None,
customdatasrc=None,
domain=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextfont=None,
insidetextorientation=None,
labels=None,
labelssrc=None,
leaf=None,
legendgrouptitle=None,
legendrank=None,
level=None,
marker=None,
maxdepth=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
outsidetextfont=None,
parents=None,
parentssrc=None,
root=None,
rotation=None,
sort=None,
stream=None,
text=None,
textfont=None,
textinfo=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
uid=None,
uirevision=None,
values=None,
valuessrc=None,
visible=None,
**kwargs
):
"""
Construct a new Sunburst object
Visualize hierarchal data spanning outward radially from root
to leaves. The sunburst sectors are determined by the entries
in "labels" or "ids" and in "parents".
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Sunburst`
branchvalues
Determines how the items in `values` are summed. When
set to "total", items in `values` are taken to be value
of all its descendants. When set to "remainder", items
in `values` corresponding to the root and the branches
sectors are taken to be the extra part not part of the
sum of the values at their leaves.
count
Determines default for `values` when it is not
provided, by inferring a 1 for each of the "leaves"
and/or "branches", otherwise 0.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
domain
:class:`plotly.graph_objects.sunburst.Domain` instance
or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.sunburst.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `currentPath`, `root`,
`entry`, `percentRoot`, `percentEntry` and
`percentParent`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
insidetextorientation
Controls the orientation of the text inside chart
sectors. When set to "auto", text may be oriented in
any direction in order to be as big as possible in the
middle of a sector. The "horizontal" option orients
text to be parallel with the bottom of the chart, and
may make text smaller in order to achieve that goal.
The "radial" option orients text along the radius of
the sector. The "tangential" option orients text
perpendicular to the radius of the sector.
labels
Sets the labels of each of the sectors.
labelssrc
Sets the source reference on Chart Studio Cloud for
labels .
leaf
:class:`plotly.graph_objects.sunburst.Leaf` instance or
dict with compatible properties
legendgrouptitle
:class:`plotly.graph_objects.sunburst.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
level
Sets the level from which this trace hierarchy is
rendered. Set `level` to `''` to start from the root
node in the hierarchy. Must be an "id" if `ids` is
filled in, otherwise plotly attempts to find a matching
item in `labels`.
marker
:class:`plotly.graph_objects.sunburst.Marker` instance
or dict with compatible properties
maxdepth
Sets the number of rendered sectors from any given
`level`. Set `maxdepth` to "-1" to render all the
levels in the hierarchy.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
outsidetextfont
Sets the font used for `textinfo` lying outside the
sector. This option refers to the root of the hierarchy
presented at the center of a sunburst graph. Please
note that if a hierarchy has multiple root nodes, this
option won't have any effect and `insidetextfont` would
be used.
parents
Sets the parent sectors for each of the sectors. Empty
string items '' are understood to reference the root
node in the hierarchy. If `ids` is filled, `parents`
items are understood to be "ids" themselves. When `ids`
is not set, plotly attempts to find matching items in
`labels`, but beware they must be unique.
parentssrc
Sets the source reference on Chart Studio Cloud for
parents .
root
:class:`plotly.graph_objects.sunburst.Root` instance or
dict with compatible properties
rotation
Rotates the whole diagram counterclockwise by some
angle. By default the first slice starts at 3 o'clock.
sort
Determines whether or not the sectors are reordered
from largest to smallest.
stream
:class:`plotly.graph_objects.sunburst.Stream` instance
or dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables
`currentPath`, `root`, `entry`, `percentRoot`,
`percentEntry`, `percentParent`, `label` and `value`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values associated with each of the sectors.
Use with `branchvalues` to determine how the values are
summed.
valuessrc
Sets the source reference on Chart Studio Cloud for
values .
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Sunburst
"""
super(Sunburst, self).__init__("sunburst")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Sunburst
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Sunburst`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("branchvalues", None)
_v = branchvalues if branchvalues is not None else _v
if _v is not None:
self["branchvalues"] = _v
_v = arg.pop("count", None)
_v = count if count is not None else _v
if _v is not None:
self["count"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("domain", None)
_v = domain if domain is not None else _v
if _v is not None:
self["domain"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("insidetextfont", None)
_v = insidetextfont if insidetextfont is not None else _v
if _v is not None:
self["insidetextfont"] = _v
_v = arg.pop("insidetextorientation", None)
_v = insidetextorientation if insidetextorientation is not None else _v
if _v is not None:
self["insidetextorientation"] = _v
_v = arg.pop("labels", None)
_v = labels if labels is not None else _v
if _v is not None:
self["labels"] = _v
_v = arg.pop("labelssrc", None)
_v = labelssrc if labelssrc is not None else _v
if _v is not None:
self["labelssrc"] = _v
_v = arg.pop("leaf", None)
_v = leaf if leaf is not None else _v
if _v is not None:
self["leaf"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("level", None)
_v = level if level is not None else _v
if _v is not None:
self["level"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("maxdepth", None)
_v = maxdepth if maxdepth is not None else _v
if _v is not None:
self["maxdepth"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("outsidetextfont", None)
_v = outsidetextfont if outsidetextfont is not None else _v
if _v is not None:
self["outsidetextfont"] = _v
_v = arg.pop("parents", None)
_v = parents if parents is not None else _v
if _v is not None:
self["parents"] = _v
_v = arg.pop("parentssrc", None)
_v = parentssrc if parentssrc is not None else _v
if _v is not None:
self["parentssrc"] = _v
_v = arg.pop("root", None)
_v = root if root is not None else _v
if _v is not None:
self["root"] = _v
_v = arg.pop("rotation", None)
_v = rotation if rotation is not None else _v
if _v is not None:
self["rotation"] = _v
_v = arg.pop("sort", None)
_v = sort if sort is not None else _v
if _v is not None:
self["sort"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textinfo", None)
_v = textinfo if textinfo is not None else _v
if _v is not None:
self["textinfo"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("values", None)
_v = values if values is not None else _v
if _v is not None:
self["values"] = _v
_v = arg.pop("valuessrc", None)
_v = valuessrc if valuessrc is not None else _v
if _v is not None:
self["valuessrc"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Read-only literals
# ------------------
self._props["type"] = "sunburst"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
SpectralGroup/spectraplotpy | spectraplotpy/plotter_helper.py | 1 | 5528 | # -*- coding: utf-8 -*-
#
# This file is part of spectraplotpy.
#
# spectraplotpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# spectraplotpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with spectraplotpy. If not, see <http://www.gnu.org/licenses/>.
#
"""
Created on Wed March 19 22:36:41 2014
@author: lbressan
"""
"""
It plots multiple spectra.
"""
import custom_exceptions as ce
import warnings
import matplotlib.pyplot as plt
import spectraplotpy as spp
import numpy as np
def plot_spectra(*sp_list, **kwargs):
"""
Function that takes a list of spectra in input and plots them
by calling their spectra plot method.
"""
axes = kwargs.get('axes', plt.gca())
for sp in sp_list:
# here we pass an axes named argument the value of the axes
# local variable
sp.plot(axes=axes, label=sp.dataset.name)
return axes
def average_spectra(*sp_list, **kwargs):
"""
Create average spectra from a list of spectra.
Metadata is taken from the first spectrum. Existing y_errors are over-
written, existing x_errros are not modified.
Parameters
----------
sp_list : Array of Spectrum objects
ddof : int = 1
Delta Degrees of fredom. The standard deviation is calcualted as
s = sqrt(1/(N - ddof)*Sum(avg(x)-x_i)), where N is the number of elements
error_type = : {'st_dev', 'st_err'} = 'st_err'
The type of error (uncertantiy) returned.
'st_dev' is the standard deviation of the sample
'st_err' is the standard error (st_dev/sqrt(n))
"""
ddof = kwargs.get('ddof', 1)
error_type = kwargs.get('error_type', 'st_err')
if not error_type in ('st_dev', 'st_err'):
raise ValueError("Error_type shuld be either 'st_dev' or 'st_err'and not '"
+ error_type + "'!")
N = len(sp_list);
if N == 0:
raise ValueError("sp_list can not be empty!")
spectrum = sp_list[0].copy()
if N == 1:
warnings.warn("Only one spectrum passed to average_spectra().")
return spectrum
#Check that all spectra are compatible (ie have same x-values, etc)
#TODO: Append to exception msg, which two spectra are incompatible
for sp in sp_list[1:]:
spp.check_compatible_x(spectrum, sp)
#get the mean
for sp in sp_list[1:]:
spectrum.dataset.y += sp.dataset.y
spectrum.dataset.y /= N
#get the standard deviation
st_dev=np.zeros_like(spectrum.dataset.y)
for sp in sp_list:
st_dev += np.square(sp.dataset.y - spectrum.dataset.y)
st_dev /= N - ddof
st_dev = np.sqrt(st_dev)
if error_type.lower() == 'st_err':
st_dev/= np.sqrt(N) # should it be here N - ddof as well?
spectrum.dataset.y_errors = st_dev
return spectrum
def get_poly_baseline(spectrum, indices, deg=3):
"""
Return spectrum, that fits a polynomial of degree :deg: through
the points given by indices. The polynomial is evaluated at all x
positions of spectrum. The returned spectrum object can be directly
substracted from spectrum.
TODO: what to do with naming, metadata, errors...?
For now performs a deep copy and appends "Baseline of " to name
Parameters
----------
spectrum :
indices : array of indices or other index object.
deg = 3
The degree of the polynomial.
"""
x = spectrum.dataset.x[indices]
y = spectrum.dataset.y[indices]
#TODO give errors as weights.
poly = np.polyfit(x, y, deg=deg)
result_spectrum = spectrum.copy()
result_spectrum.dataset.name = "Baseline of " + result_spectrum.dataset.name
result_spectrum.dataset.y = np.polyval(poly, result_spectrum.dataset.x)
return result_spectrum
def minmax_normalize(spectrum):
"""Normalize a spectrum in place by dividing it by the max(abs(y)).
After normalization all the y values are between [-1 and 1].
"""
ymax = np.max(np.abs(spectrum.dataset.y))
spectrum /= ymax
def baseline_correct(spectrum, left_num=100, right_num=100, deg=3):
""" Substracts the baseline in place.
Currently only a polynomial baseline is supported.
Parameters
----------
spectrum :
spectrum-like object
left_num = 100 :
Number of point on the left side to take as baseline for fitting.
right_num = 100 :
Number of point on the right side to take as baseline for fitting.
deg = 3 :
The degree of the polynomial.
"""
left = range(0,left_num)
right = range(-right_num,0,1)
indices = np.append(left, right)
baseline = spp.get_poly_baseline(spectrum, indices, deg=deg)
spectrum -= baseline
return spectrum
def integrate(spectrum, inplace=True):
"""
Integrates a spectrum.
inplace = False
if the sepctrum should be modified in place or not
#Todo: what to do with errors?
"""
if not inplace:
spectrum = spectrum.copy()
yint = np.cumsum(spectrum.dataset.y)
spectrum.dataset.y = yint
return spectrum
| gpl-3.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/covariance/tests/test_covariance.py | 2 | 10024 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD Style.
from numpy.testing import assert_almost_equal, assert_array_almost_equal, \
assert_equal, assert_raises
import numpy as np
import warnings
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print np.amin(mahal_dist), np.amax(mahal_dist)
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
with warnings.catch_warnings(record=True):
cov.fit(X_1sample)
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
"""Tests ShrunkCovariance module on a simple dataset.
"""
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
"""Tests LedoitWolf module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True, block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# (too) large data set
X_large = np.ones((20, 200))
assert_raises(MemoryError, ledoit_wolf, X_large, block_size=100)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
X_1sample = np.arange(5)
lw = LedoitWolf()
with warnings.catch_warnings(record=True):
lw.fit(X_1sample)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_oas():
"""Tests OAS module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
### Same tests without assuming centered data
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
X_1sample = np.arange(5)
oa = OAS()
with warnings.catch_warnings(record=True):
oa.fit(X_1sample)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| agpl-3.0 |
tony-hong/roleo | wsvt/Rollenverteilung/src/scripts/evaluation/CalculateOverlap.py | 1 | 2958 | #!/usr/bin/env python
import os
import sys
from argparse import ArgumentParser
import pandas as pd
from rv.structure.Tensor import Matricisation
def compute_jaccard_index(set_1, set_2):
n = len(set_1.intersection(set_2))
return n / float(len(set_1) + len(set_2) - n)
if __name__ == "__main__":
parser = ArgumentParser(description="Measure the overlap (Jaccard index) between the candidate members of centroids across two tensors.")
parser.add_argument('word1afilename', metavar='word1a.h5', type=str, help='The word1 matricization on the left side of the comparison')
parser.add_argument('word1bfilename', metavar='word1b.h5', type=str, help='The word1 matricization on the right side of the comparison')
parser.add_argument('mappingafilename', metavar='mappinga.py', type=str, help='The mapping eval script for left-side role conversion')
parser.add_argument('mappingbfilename', metavar='mappingb.py', type=str, help='The mapping eval script for right-side role conversion')
parser.add_argument('evalfilename', metavar='evaldata', type=str, help='The evaluation file in columns')
parser.add_argument('-r', '--role', type=str, default=None, help='A role to filter by.')
args = parser.parse_args()
evaldata = pd.read_table(args.evalfilename, header=None)
evaldata = evaldata[[0,2]].sort().drop_duplicates()
if args.role:
evaldata = evaldata[evaldata[2] == args.role]
mappingafile = open(args.mappingafilename, "r")
mappingatext = mappingafile.readlines()
mappinga = eval(" ".join(mappingatext))
mappingbfile = open(args.mappingbfilename, "r")
mappingbtext = mappingbfile.readlines()
mappingb = eval(" ".join(mappingbtext))
matricisationa = Matricisation({'word1':args.word1afilename})
matricisationb = Matricisation({'word1':args.word1bfilename})
jaccards = []
for (index, verb, role) in evaldata.itertuples():
print "Verb = ", verb, "; Role = ", role
try:
responsea = matricisationa.getMemberVectors(verb, 'word1', 'word0', {'link':mappinga[role]})
#print responsea
(membersa, topsa) = responsea
(membersb, topsb) = matricisationb.getMemberVectors(verb, 'word1', 'word0', {'link':mappingb[role]})
print "From ", args.word1afilename, ":"
print str(sorted(topsa))
print "From ", args.word1bfilename, ":"
print str(sorted(topsb))
jaccard = compute_jaccard_index(set(topsa), set(topsb))
jaccards.append(jaccard)
print "Jaccard index = ", jaccard
except KeyError:
print "Role not found for one of the items. Skipping."
except ValueError:
print "One of the verb lookups failed. Possibly check mapping."
print "***"
print "Number of items processed = ", len(jaccards)
print "Average jaccard index = ", sum(jaccards)/float(len(jaccards))
| gpl-3.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/covariance/robust_covariance.py | 11 | 30680 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
dist = np.inf
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
# If the data already has singular covariance, calculate the precision,
# as the loop below will not be entered.
if np.isinf(det):
precision = linalg.pinvh(covariance)
previous_det = np.inf
while (det < previous_det and remaining_iterations > 0
and not np.isinf(det)):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Check if best fit already found (det => 0, logdet => -inf)
if np.isinf(det):
results = location, covariance, det, support, dist
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[RV]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [RV] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [RouseeuwVan]_,
see the MinCovDet object.
References
----------
.. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start] +
X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rousseeuw] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [ButlerDavies] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [RVD]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
References
----------
.. [RVD] `A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS`
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates) described
in [RVDriessen]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
References
----------
.. [RVDriessen] `A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS`
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| mit |
acimmarusti/isl_exercises | chap5/chap5lab.py | 1 | 6779 | from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
import scipy
import pandas as pd
#import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, LeaveOneOut, KFold
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score
from pandas.tools.plotting import scatter_matrix
import statsmodels.formula.api as smf
import statsmodels.api as sm
filename = '../Auto.csv'
#Load data to pandas dataframe and drop the missing values#
data = pd.read_csv(filename, na_values='?').dropna()
#Add non-linear terms#
data['horsepower2'] = np.power(data['horsepower'], 2)
data['horsepower3'] = np.power(data['horsepower'], 3)
#Random sampling#
data_train = data.sample(n=196, random_state=2)
data.loc[data.index, 'train'] = 'n'
data.loc[data_train.index, 'train'] = 'y'
data_test = data[data['train'] == 'n']
#Numeric columns#
numcols = list(data.columns)
numcols.remove('name')
numcols.remove('mpg')
numcols.remove('train')
print('\n\n### LINEAR REGRESSION WITH STATSMODELS###')
## Linear regression with statsmodels ##
lreg = smf.ols(formula='mpg~horsepower', data=data_train).fit()
print(np.mean(np.power(data_test['mpg'] - lreg.predict(data_test['horsepower']), 2)))
l2reg = smf.ols(formula='mpg~horsepower + np.power(horsepower, 2)', data=data_train).fit()
print(np.mean(np.power(data_test['mpg'] - l2reg.predict(data_test['horsepower']), 2)))
l3reg = smf.ols(formula='mpg~horsepower + np.power(horsepower, 2) + np.power(horsepower, 3)', data=data_train).fit()
print(np.mean(np.power(data_test['mpg'] - l3reg.predict(data_test['horsepower']), 2)))
"""
print(lreg.summary())
print(l2reg.summary())
print(l3reg.summary())
print('\nFit parameters')
print(lreg.params)
print(l2reg.params)
print(l3reg.params)
print('\n test vs predition statsmodels (debug)')
print(data_test['mpg'].head())
print(lreg.predict(data_test['horsepower']).head())
"""
print('\n\n### LINEAR REGRESSION WITH SKLEARN###')
#Reshaping data into sklearn's preferred format#
train_size = len(data_train.index)
y_train = np.reshape(data_train['mpg'], (train_size, 1))
x_train = np.reshape(data_train['horsepower'], (train_size, 1))
x2_train = np.reshape(data_train[['horsepower', 'horsepower2']], (train_size, 2))
x3_train = np.reshape(data_train[['horsepower', 'horsepower2', 'horsepower3']], (train_size, 3))
test_size = len(data_test.index)
y_test = data_test['mpg']
x_test = np.reshape(data_test['horsepower'], (test_size, 1))
x2_test = np.reshape(data_test[['horsepower', 'horsepower2']], (test_size, 2))
x3_test = np.reshape(data_test[['horsepower', 'horsepower2', 'horsepower3']], (test_size, 3))
#X_train, X_test, Y_train, Y_test = train_test_split(x_data, y_data, test_size=0.5, random_state=2)
# Initiate linear regression object
lin_obj = LinearRegression()
lin2_obj = LinearRegression()
lin3_obj = LinearRegression()
# Fit model. Let X_train = matrix of predictors, Y_train = matrix of variables.
reslin_obj = lin_obj.fit(x_train, y_train)
reslin2_obj = lin2_obj.fit(x2_train, y_train)
reslin3_obj = lin3_obj.fit(x3_train, y_train)
#Predicted values for training set
pred_lin = reslin_obj.predict(x_test)
pred_lin2 = reslin2_obj.predict(x2_test)
pred_lin3 = reslin3_obj.predict(x3_test)
"""
print('\n test vs predition sklearn (debug)')
print(y_test[:10])
print(pred_lin[:10])
"""
print(np.mean(np.power(y_test - pred_lin, 2)))
print(np.mean(np.power(y_test - pred_lin2, 2)))
print(np.mean(np.power(y_test - pred_lin3, 2)))
#Calculated mean error on validation sets#
def mean_cv_err(x_data, y_data, cvobj, regobj):
cv_errs = []
for train_idx, test_idx in cvobj.split(x_data):
xtrain, xtest = x_data[train_idx], x_data[test_idx]
ytrain, ytest = y_data[train_idx], y_data[test_idx]
res_reg = regobj.fit(xtrain, ytrain)
pred_reg = res_reg.predict(xtest)
#Reshape necessary because predition produces a (1, n) numpy array, while ytest is (n, 1)#
cv_errs.append(np.mean(np.power(np.reshape(ytest, pred_reg.shape) - pred_reg, 2)))
mean_err_out = np.mean(cv_errs)
print('Mean error:')
print(mean_err_out)
return mean_err_out
#LOOCV strategy#
def loocv_err(x_data, y_data):
#Leave One Out Cross-validation#
loo = LeaveOneOut()
llreg = LinearRegression()
return mean_cv_err(x_data, y_data, loo, llreg)
#10-fold CV strategy#
def kfold_err(x_data, y_data):
#Kfold Cross-validation#
kfcv = KFold(n_splits=10)
klreg = LinearRegression()
return mean_cv_err(x_data, y_data, kfcv, klreg)
#Splitting the data for train/test#
data_size = len(data.index)
#Polynomial order#
poly_ord = 5
#Columns to use: polynomials#
poly_cols = ['horsepower']
order = 1
while True:
x_data = np.array(np.reshape(data[poly_cols], (data_size, order)))
y_data = np.array(np.reshape(data['mpg'], (data_size, 1)))
print('\n\nPolynomial order: ' + str(order))
print('LOOCV')
looerr = loocv_err(x_data, y_data)
print('\nKFold CV')
kfolderr = kfold_err(x_data, y_data)
order += 1
if poly_ord < 2 or order > poly_ord:
break
poly_hp = 'horsepower' + str(order)
data[poly_hp] = np.power(data['horsepower'], order)
poly_cols.append(poly_hp)
#NO PORTFOLIO DATA FOUND#
#Get linear fit parameter function#
def get_lreg_param(data, ylabel='y', xlabel='x', polyord=1):
bp_form = ylabel + '~' + xlabel
for order in range(2, polyord + 1):
bp_form += '+ np.power(' + xlabel + ', ' + str(order) + ')'
return smf.ols(formula=bp_form, data=data).fit().params
#print(get_lreg_param(data_train, ylabel='mpg', xlabel='horsepower', polyord=1))
def bootfn(data, target='y', predictor='x', order=1, repeat=1000):
boot_table = pd.DataFrame()
for ite in range(repeat):
data_boot = data.sample(n=len(data.index), replace=True)
boot_table[str(ite+1)] = get_lreg_param(data_boot, ylabel=target, xlabel=predictor, polyord=order)
results = pd.DataFrame()
boot_tab = boot_table.transpose()
results['estimate'] = boot_tab.mean()
results['stderr'] = boot_tab.std()
return results
print('\n\nBootstrapping coef estimation:')
print('\nOrder 1')
print(bootfn(data, target='mpg', predictor='horsepower', order=1))
print('\nOrder 2')
print(bootfn(data, target='mpg', predictor='horsepower', order=2))
print('\nOrder 3')
print(bootfn(data, target='mpg', predictor='horsepower', order=3))
| gpl-3.0 |
rafaelmartins/rst2pdf | rst2pdf/math_flowable.py | 8 | 6239 | # -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
import tempfile
import os
import re
from reportlab.platypus import *
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfmetrics
from opt_imports import mathtext
from log import log
HAS_MATPLOTLIB = mathtext is not None
if HAS_MATPLOTLIB:
from matplotlib.font_manager import FontProperties
from matplotlib.colors import ColorConverter
fonts = {}
def enclose(s):
"""Enclose the string in $...$ if needed"""
if not re.match(r'.*\$.+\$.*', s, re.MULTILINE | re.DOTALL):
s = u"$%s$" % s
return s
class Math(Flowable):
def __init__(self, s, label=None, fontsize=12,color='black'):
self.s = s
self.label = label
self.fontsize = fontsize
self.color = color
if HAS_MATPLOTLIB:
self.parser = mathtext.MathTextParser("Pdf")
else:
log.error("Math support not available,"
" some parts of this document will be rendered incorrectly."
" Install matplotlib.")
Flowable.__init__(self)
self.hAlign='CENTER'
def wrap(self, aW, aH):
if HAS_MATPLOTLIB:
try:
width, height, descent, glyphs, \
rects, used_characters = self.parser.parse(
enclose(self.s), 72, prop=FontProperties(size=self.fontsize))
return width, height
except:
pass
# FIXME: report error
return 10, 10
def drawOn(self, canv, x, y, _sW=0):
if _sW and hasattr(self,'hAlign'):
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
a = self.hAlign
if a in ('CENTER','CENTRE', TA_CENTER):
x = x + 0.5*_sW
elif a in ('RIGHT',TA_RIGHT):
x = x + _sW
elif a not in ('LEFT',TA_LEFT):
raise ValueError, "Bad hAlign value "+str(a)
height = 0
if HAS_MATPLOTLIB:
global fonts
canv.saveState()
canv.translate(x, y)
try:
width, height, descent, glyphs, \
rects, used_characters = self.parser.parse(
enclose(self.s), 72, prop=FontProperties(size=self.fontsize))
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if not fontname in fonts:
fonts[fontname] = fontname
pdfmetrics.registerFont(TTFont(fontname, fontname))
canv.setFont(fontname, fontsize)
col_conv=ColorConverter()
rgb_color=col_conv.to_rgb(self.color)
canv.setFillColorRGB(rgb_color[0],rgb_color[1],rgb_color[2])
canv.drawString(ox, oy, unichr(num))
canv.setLineWidth(0)
canv.setDash([])
for ox, oy, width, height in rects:
canv.rect(ox, oy+2*height, width, height, fill=1)
except:
# FIXME: report error
col_conv=ColorConverter()
rgb_color=col_conv.to_rgb(self.color)
canv.setFillColorRGB(rgb_color[0],rgb_color[1],rgb_color[2])
canv.drawString(0,0,self.s)
canv.restoreState()
else:
canv.saveState()
canv.drawString(x, y, self.s)
canv.restoreState()
if self.label:
log.info('Drawing equation-%s'%self.label)
canv.bookmarkHorizontal('equation-%s'%self.label,0,height)
def descent(self):
"""Return the descent of this flowable,
useful to align it when used inline."""
if HAS_MATPLOTLIB:
width, height, descent, glyphs, rects, used_characters = \
self.parser.parse(enclose(self.s), 72, prop=FontProperties(size=self.fontsize))
return descent
return 0
def genImage(self):
"""Create a PNG from the contents of this flowable.
Required so we can put inline math in paragraphs.
Returns the file name.
The file is caller's responsability.
"""
dpi = 72
scale = 10
try:
import Image
import ImageFont
import ImageDraw
import ImageColor
except ImportError:
from PIL import (
Image,
ImageFont,
ImageDraw,
ImageColor,
)
if not HAS_MATPLOTLIB:
img = Image.new('RGBA', (120, 120), (255,255,255,0))
else:
width, height, descent, glyphs,\
rects, used_characters = self.parser.parse(
enclose(self.s), dpi, prop=FontProperties(size=self.fontsize))
img = Image.new('RGBA', (int(width*scale), int(height*scale)),(255,255,255,0))
draw = ImageDraw.Draw(img)
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
font = ImageFont.truetype(fontname, int(fontsize*scale))
tw, th = draw.textsize(unichr(num), font=font)
# No, I don't understand why that 4 is there.
# As we used to say in the pure math
# department, that was a numerical solution.
col_conv=ColorConverter()
fc=col_conv.to_rgb(self.color)
rgb_color=(int(fc[0]*255),int(fc[1]*255),int(fc[2]*255))
draw.text((ox*scale, (height - oy - fontsize + 4)*scale),
unichr(num), font=font,fill=rgb_color)
for ox, oy, w, h in rects:
x1 = ox*scale
x2 = x1 + w*scale
y1 = (height - oy)*scale
y2 = y1 + h*scale
draw.rectangle([x1, y1, x2, y2],(0,0,0))
fh, fn = tempfile.mkstemp(suffix=".png")
os.close(fh)
img.save(fn)
return fn
if __name__ == "__main__":
doc = SimpleDocTemplate("mathtest.pdf")
Story = [Math(r'\mathcal{R}\prod_{i=\alpha\mathcal{B}}'\
r'^\infty a_i\sin(2 \pi f x_i)')]
doc.build(Story)
| mit |
jkarnows/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
HeraclesHX/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
yipenggao/moose | python/peacock/tests/postprocessor_tab/test_FigurePlugin.py | 6 | 3010 | #!/usr/bin/env python
import sys
from PyQt5 import QtWidgets
from peacock.PostprocessorViewer.plugins.FigurePlugin import main
from peacock.utils import Testing
import mooseutils
class TestFigurePlugin(Testing.PeacockImageTestCase):
"""
Test class for FigureWidget.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
def setUp(self):
"""
Creates the GUI.
"""
# Read some data
filename = '../input/white_elephant_jan_2016.csv'
self._reader = mooseutils.PostprocessorReader(filename)
# Create the widget with FigurePlugin only
self._widget = main()
self._window = self._widget.currentWidget().FigurePlugin
def testEmpty(self):
"""
Test that an empty plot with two projection options gets created.
"""
self._window.draw()
self.assertImage('testEmpty.png')
def testPlotLeft(self):
"""
Draws on left axis.
"""
ax = self._window.axes()[0]
ax.plot(self._reader('air_temp_low_24_hour_set_1'), '-b')
self._window.draw()
self.assertImage('testPlotLeft.png')
def testPlotRight(self):
"""
Draws right axis.
"""
ax = self._window.axes()[1]
ax.plot(self._reader('air_temp_high_24_hour_set_1'), '-r')
self._window.draw()
self.assertImage('testPlotRight.png')
def testPlotDual(self):
"""
Draws on both.
"""
ax = self._window.axes()[0]
ax.plot(self._reader('air_temp_low_24_hour_set_1'), '-b')
ax = self._window.axes()[1]
ax.plot(self._reader('air_temp_high_24_hour_set_1'), '-r')
self._window.draw()
self.assertImage('testPlotDual.png')
def testClear(self):
"""
Test that a plot can be created and cleared.
"""
ax = self._window.axes()[0]
ax.plot(self._reader('snow_water_equiv_set_1'), '-b')
self._window.draw()
self.assertImage('testClearPlot.png')
ax.clear()
self._window.draw()
self.assertImage('testEmpty.png')
def testRepr(self):
"""
Test the "repr" script output.
"""
output, imports = self._window.repr()
self.assertIn('import matplotlib.pyplot as plt', imports)
self.assertIn("figure = plt.figure(facecolor='white')", output)
self.assertIn('axes0 = figure.add_subplot(111)', output)
# This only appears if data exists on axes2
ax1 = 'axes1 = axes0.twinx()'
self.assertNotIn(ax1, output)
# Plot data on right and make sure axes1 appears
ax = self._window.axes()[1]
ax.plot(self._reader('air_temp_high_24_hour_set_1'), '-r')
output, imports = self._window.repr()
self.assertIn(ax1, output)
if __name__ == '__main__':
import unittest
unittest.main(module=__name__, verbosity=2)
| lgpl-2.1 |
bikash/h2o-dev | h2o-py/tests/testdir_algos/kmeans/pyunit_iris_h2o_vs_sciKmeans.py | 1 | 1292 | import sys
sys.path.insert(1, "../../../")
import h2o
import numpy as np
from sklearn.cluster import KMeans
def iris_h2o_vs_sciKmeans(ip,port):
# Connect to a pre-existing cluster
h2o.init(ip,port) # connect to localhost:54321
iris_h2o = h2o.import_frame(path=h2o.locate("smalldata/iris/iris.csv"))
iris_sci = np.genfromtxt(h2o.locate("smalldata/iris/iris.csv"), delimiter=',')
iris_sci = iris_sci[:,0:4]
#TODO: implement row slicing
s =[[4.9,3.0,1.4,0.2],
[5.6,2.5,3.9,1.1],
[6.5,3.0,5.2,2.0]]
start = h2o.H2OFrame(s)
start_key = start.send_frame()
h2o_km = h2o.kmeans(x=iris_h2o[0:4], k=3, user_points=start_key, standardize=False)
sci_km = KMeans(n_clusters=3, init=np.asarray(s), n_init=1)
sci_km.fit(iris_sci)
# Log.info("Cluster centers from H2O:")
print "Cluster centers from H2O:"
h2o_centers = h2o_km.centers()
print h2o_centers
# Log.info("Cluster centers from scikit:")
print "Cluster centers from scikit:"
sci_centers = sci_km.cluster_centers_.tolist()
print sci_centers
for hcenter, scenter in zip(h2o_centers, sci_centers):
for hpoint, spoint in zip(hcenter,scenter):
assert (hpoint- spoint) < 1e-10, "expected centers to be the same"
if __name__ == "__main__":
h2o.run_test(sys.argv, iris_h2o_vs_sciKmeans)
| apache-2.0 |
Windy-Ground/scikit-learn | sklearn/semi_supervised/label_propagation.py | 71 | 15342 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted, check_array
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse = ['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
nipy/brainx | brainx/modularity.py | 1 | 52650 | # encoding: utf-8
"""Detect modules in a network.
Citation: He Y, Wang J, Wang L, Chen ZJ, Yan C, et al. (2009) Uncovering
Intrinsic Modular Organization of Spontaneous Brain Activity in Humans. PLoS
ONE 4(4): e5226. doi:10.1371/journal.pone.0005226
Comparing community structure identification
J. Stat. Mech. (2005) P0900
Leon Danon1,2, Albert Diaz-Guilera1, Jordi Duch2 and Alex Arenas
Online at stacks.iop.org/JSTAT/2005/P09008
doi:10.1088/1742-5468/2005/09/P09008
"""
from __future__ import print_function
# Modules from the stdlib
import math
import copy
# Third-party modules
import networkx as nx
import numpy as np
import numpy.testing as npt
import numpy.linalg as nl
import scipy.linalg as sl
from matplotlib import pyplot as plt
# Our own modules
from . import util
#-----------------------------------------------------------------------------
# Class declarations
#-----------------------------------------------------------------------------
class EmptyModuleError(ValueError):
pass
class GraphPartition(object):
"""Represent a graph partition.
The main object keeping track of the data is the .index attribute, a dict
that maps integer module labels to node sets. This dict's labels are
always assumed to start at 0 and not to point ever to empty modules. If
empty modules are created during any module manipulations, they will be
removed from the index and the remaining modules will be relabeled."""
def __init__(self, graph, index):
"""New partition, given a graph and a dict of module->nodes.
Parameters
----------
graph : network graph instance
Graph to which the partition index refers to.
index : dict
A dict of sets that maps module/partition labels to sets of
nodes, this describes the partition in full.
Note
----
The values in the index dict MUST be real sets, not lists.
"""
# Store references to the original graph and label dict
if not type(index) == type({}):
raise TypeError('index should be of type dict(),'\
'not %s'%type(index))
self.index = copy.deepcopy(index)
## add quick check to make sure the passed index is
## a dict of sets
self._check_index_contains_sets()
# We'll need the graph's adjacency matrix often, so store it once
self.graph_adj_matrix = nx.adj_matrix(graph).todense() # Must convert to dense matrix before making into a numpy array (line was previously: self.graph_adj_matrix = nx.adj_matrix(graph))
#make sure adj_matrix is binary otherwise raise exception
if not self.graph_adj_matrix.sum() == \
self.graph_adj_matrix.astype(bool).sum():
raise ValueError('Adjacency matrix is weighted, need binary matrix')
# Just to be sure, we don't want to count self-links, so we zero out the
# diagonal.
util.fill_diagonal(self.graph_adj_matrix, 0)
# Store statically a few things about the graph that don't change (as
# long as the graph does not change
self.num_nodes = graph.number_of_nodes()
self.num_edges = graph.number_of_edges()
if self.num_edges == 0:
raise ValueError("Cannot create a graph partition "\
"if graph has no edges")
# Store the nodes as a set of contiguous integers (indices into
#the adjacency_matrix), needed for many operations
self._node_set = set(range(self.num_nodes))
self._node_names = graph.nodes()
## raise useful error if index is missing nodes in graph
self._check_allnodes_in_index()
# Now, build the edge information used in modularity computations
self.mod_e, self.mod_a = self._edge_info()
def copy(self):
return copy.deepcopy(self)
def __len__(self):
return len(self.index)
def _check_index_contains_sets(self):
""" the index in a GraphPartition is a dict of node sets
validate that the values of this dict are all of type(set)"""
index_types = [ type(x) for x in self.index.values() ]
if not all([ x== type(set()) for x in index_types]):
raise TypeError('index values should be of type set():: %s'%(index_types))
def _check_allnodes_in_index(self):
"""Check that index contains all nodes in graph"""
sets = self.index.values()
indexnodes = set.union(*sets)
missing = self._node_set.difference(indexnodes)
if missing:
raise ValueError('index does not contain all graph nodes: missing %s'%missing)
def _edge_info(self, mod_e=None, mod_a=None, index=None):
"""Create the vectors of edge information.
Returns
-------
mod_e: diagonal of the edge matrix E
mod_a: sum of the rows of the E matrix
"""
num_mod = len(self)
if mod_e is None: mod_e = [0] * num_mod
if mod_a is None: mod_a = [0] * num_mod
if index is None: index = self.index
norm_factor = 1.0 / (2.0 * self.num_edges)
mat = self.graph_adj_matrix
node_set = self._node_set
for m, modnodes in index.items():
btwnnodes = list(node_set - modnodes)
modnodes = list(modnodes)
mat_within = mat[modnodes,:][:,modnodes]
mat_between = mat[modnodes,:][:,btwnnodes]
perc_within = mat_within.sum() * norm_factor
perc_btwn = mat_between.sum() * norm_factor
mod_e[m] = perc_within #all of the E's
mod_a[m] = perc_btwn+perc_within #all of the A's
if np.isnan(mod_e[m]) or np.isnan(mod_a[m]):
raise ArithmeticError('NAN found: mod_e=%s, mod_a=%s'%(mod_e[m], mod_a[m]))
return mod_e, mod_a
def modularity_newman(self):
""" Function using other version of expressing modularity, from the
Newman papers (2004 Physical Review)
Parameters:
g = graph
part = partition
Returns:
mod = modularity
"""
if np.isnan((np.array(self.mod_e) - (np.array(self.mod_a)**2)).sum()):
raise ArithmeticError('NAN found: mod_e=%s, mod_a=%s'%(mod_e[m], mod_a[m]))
return (np.array(self.mod_e) - (np.array(self.mod_a)**2)).sum()
##TODO can we remove this?? CM
modularity = modularity_newman
def find_unconnected_nodes(self):
""" checks for nodes in graph with no edges """
graph = nx.from_numpy_matrix(self.graph_adj_matrix)
unconnected = [ n for n,d in graph.degree_iter() if d==0 ]
return unconnected
def compute_module_merge(self, m1, m2):
"""Merges two modules in a given partition.
This updates in place the mod_e and mod_a arrays (both of which lose a
row). The new, merged module will be identified as m1.
Parameters
----------
m1: name (i.e., index) of one module
m2: name (i.e., index) of the other module
Returns
-------
merged_module : set of merged nodes
e1[0] : element of e vector for merged module
a1[0] : element of a vector for merged module
-delta_q : change in modularity induced by this change
'merge' : string indicating the type of change that was applied
m1 : index of the first merged module
m2 : index of the second merged module
m2 : index of the second merged module
"""
# Below, we want to know that m1<m2, so we enforce that:
if m1>m2:
m1, m2 = m2, m1
# Pull from m2 the nodes and merge them into m1
merged_module = self.index[m1] | self.index[m2]
#make an empty matrix for computing "modularity" level values
e1 = [0]
a1 = [0]
e0, a0 = self.mod_e, self.mod_a
# The values that change: _edge_info with arguments will update the e,
# a vectors only for the modules in index
e1, a1 = self._edge_info(e1, a1, {0:merged_module})
# Compute the change in modularity
delta_q = (e1[0]-a1[0]**2) - \
( (e0[m1]-a0[m1]**2) + (e0[m2]-a0[m2]**2) )
#print 'NEW: ',e1,a1,e0[m1],a0[m1],e0[m2],a0[m2]
return merged_module, e1[0], a1[0], -delta_q, 'merge',m1,m2,m2
def apply_module_merge(self, m1, m2, merged_module, e_new, a_new):
"""Merges two modules in a given partition.
This updates in place the mod_e and mod_a arrays (both of which lose a
row). The new, merged module will be identified as m1.
Parameters
----------
m1: name (i.e., index) of one module
m2: name (i.e., index) of the other module
merged_module: set of all nodes from m1 and m2
e_new: mod_e of merged_module
a_new: mod_a of merged_module
Returns
-------
Does not return anything -- operates on self.mod_e and self.mod_a in
place
"""
# Below, we want to know that m1<m2, so we enforce that:
if m1>m2:
m1, m2 = m2, m1
# Pull from m2 the nodes and merge them into m1
self.index[m1] = merged_module
del self.index[m2]
# We need to shift the keys to account for the fact that we popped out
# m2
rename_keys(self.index,m2)
self.mod_e[m1] = e_new
self.mod_a[m1] = a_new
self.mod_e.pop(m2)
self.mod_a.pop(m2)
def compute_module_split(self, m, n1, n2):
"""Splits a module into two new ones.
This updates in place the mod_e and mod_a arrays (both of which lose a
row). The new, merged module will be identified as m1.
Parameters
----------
m : module identifier
n1, n2 : sets of nodes
The two sets of nodes in which the nodes originally in module m will
be split. Note: It is the responsibility of the caller to ensure
that the set n1+n2 is the full set of nodes originally in module m.
Returns
-------
The change in modularity resulting from the change
(Q_final-Q_initial)"""
# FIXME : docstring is wrong (the partition is modified in-place), and
# we shouldn't be returning split_modules at all from this.
# create a dict that contains the new modules 0 and 1 that have the
# sets n1 and n2 of nodes from module m.
split_modules = {0: n1, 1: n2}
#make an empty matrix for computing "modularity" level values
e1 = [0,0]
a1 = [0,0]
e0, a0 = self.mod_e, self.mod_a
# The values that change: _edge_info with arguments will update the e,
# a vectors only for the modules in index
e1, a1 = self._edge_info(e1, a1, split_modules)
# Compute the change in modularity
delta_q = ( (e1[0]-a1[0]**2) + (e1[1]- a1[1]**2) ) - (e0[m]-a0[m]**2)
return split_modules, e1, a1, -delta_q,'split',m,n1,n2
def apply_module_split(self, m, n1, n2, split_modules, e_new, a_new):
"""Splits a module into two new ones.
This updates in place the mod_e and mod_a arrays (both of which lose a
row). The new, merged module will be identified as m1.
Parameters
----------
m : module identifier
n1, n2 : sets of nodes
The two sets of nodes in which the nodes originally in module m will
be split. Note: It is the responsibility of the caller to ensure
that the set n1+n2 is the full set of nodes originally in module m.
split_modules : dict DEPRECATED - will be removed soon.
The dict ``{0: n1, 1: n2}``.
e_new : array
The e vector for the resulting partition after the split has been applied.
a_new : array
The a vector for the resulting partition after the split has been applied.
Returns
-------
None : the partition is modified in-place.
"""
# To reuse slicing code, use m1/m2 lables like in merge code
m1 = m
m2 = len(self)
# Add a new module to the end of the index dictionary
self.index[m1] = n1
self.index[m2] = n2
self.mod_e[m1] = e_new[0]
self.mod_a[m1] = a_new[0]
self.mod_e.insert(m2,e_new[1])
self.mod_a.insert(m2,a_new[1])
#self.mod_e[m2] = e_new[1]
#self.mod_a[m2] = a_new[1]
#EN: Not sure if this is necessary, but sometimes it finds a partition
#with an empty module...
## CG: should not be necessary... but may mess things up by renaming
#keys in dictionary but not updating mod_e and mod_a. Maybe we should
#take care of this case earlier to ensure that it can not happen?
#Otherwise need to create a new function to update/recompute mod_e and
#mod_a.
# If there are empty modules after the operation, remove them from the
# index and rename the partition labels
if len(self.index[m1])<1:
EmptyModuleError('Empty module after module split, old mod')
if len(self.index[m2])<1:
EmptyModuleError('Empty module after module split, old mod')
def node_update(self, n, m1, m2):
"""Moves a single node within or between modules
Parameters
----------
n : node identifier
The node that will be moved from module m1 to module m2
m1 : module identifier
The module that n used to belong to.
m2 : module identifier
The module that n will now belong to.
Returns
-------
The change in modularity resulting from the change
(Q_final-Q_initial)"""
#Update the index with the change
index = self.index
index[m1].remove(n)
index[m2].add(n)
# This checks whether there is an empty module. If so, renames the keys.
if len(self.index[m1])<1:
self.index.pop(m1)
rename_keys(self.index,m1)
# Before we overwrite the mod vectors, compute the contribution to
# modularity from before the change
e0, a0 = self.mod_e, self.mod_a
mod_old = (e0[m1]-a0[m1]**2) + (e0[m2]-a0[m2]**2)
# Update in place mod vectors with new index
self._edge_info(self.mod_e, self.mod_a, {m1:index[m1], m2:index[m2]})
e1, a1 = self.mod_e, self.mod_a
#Compute the change in modularity
return (e1[m1]-a1[m1]**2) + (e1[m2]-a1[m2]**2) - mod_old
def compute_node_update(self, n, m1, m2):
"""Moves a single node within or between modules
Parameters
----------
n : node identifier
The node that will be moved from module m1 to module m2
m1 : module identifier
The module that n used to belong to.
m2 : module identifier
The module that n will now belong to.
Returns
-------
The change in modularity resulting from the change
(Q_final-Q_initial)"""
n1 = self.index[m1]
n2 = self.index[m2]
node_moved_mods = {0: n1 - set([n]),1: n2 | set([n])}
# Before we overwrite the mod vectors, compute the contribution to
# modularity from before the change
e1 = [0,0]
a1 = [0,0]
e0, a0 = self.mod_e, self.mod_a
# The values that change: _edge_info with arguments will update the e,
# a vectors only for the modules in index
e1, a1 = self._edge_info(e1, a1, node_moved_mods)
#Compute the change in modularity
delta_q = ( (e1[0]-a1[0]**2) + (e1[1]-a1[1]**2)) - \
( (e0[m1]-a0[m1]**2) + (e0[m2]-a0[m2]**2) )
#print n,m1,m2,node_moved_mods,n1,n2
return node_moved_mods, e1, a1, -delta_q, n, m1, m2
def apply_node_update(self, n, m1, m2, node_moved_mods, e_new, a_new):
"""Moves a single node within or between modules
Parameters
----------
n : node identifier
The node that will be moved from module m1 to module m2
m1 : module identifier
The module that n used to belong to.
m2 : module identifier
The module that n will now belong to.
node_moved_mods : tuple
The two sets of modules for modules m1 and m2.
e_new : 2-tuple of arrays
The E arrays for m1 and m2
a_new : 2-tuple of arrays
The A arrays for m1 and m2
Returns
-------
The change in modularity resulting from the change
(Q_final-Q_initial)"""
self.index[m1] = node_moved_mods[0]
self.index[m2] = node_moved_mods[1]
# If we end up with an empty module, we need to remove it from the
# partition, and store the information only for the new one.
# This checks whether there is an empty module. If so, renames the keys.
if len(self.index[m1]) < 1:
#EmptyModuleError('Empty module after node move')
self.index.pop(m1)
rename_keys(self.index, m1)
# Once the index structure changes, the labeling of E and A arrays
# will need to be recomputed (we could propagate the changes
# throughout, but it's extremely brittle and easy to make a very
# hard to debug error. Safer to just recompute the arrays in this
# case).
self.mod_e, self.mod_a = self._edge_info()
#if m1 < m2:
# m2 = m2 - 1 #only need to rename this index if m1 is before m2
else:
self.mod_e[m1] = e_new[0]
self.mod_a[m1] = a_new[0]
self.mod_e[m2] = e_new[1]
self.mod_a[m2] = a_new[1]
return m2
def random_mod(self):
"""Makes a choice whether to merge or split modules in a partition
Returns:
-------
if splitting: m1, n1, n2
m1: the module to split
n1: the set of nodes to put in the first output module
n2: the set of nodes to put in the second output module
if merging: m1, m2
m1: module 1 to merge
m2: module 2 to merge
"""
# number of modules in the partition
num_mods=len(self)
# Make a random choice bounded between 0 and 1,
# less than 0.5 means we will split the modules
# greater than 0.5 means we will merge the modules.
if num_mods >= self.num_nodes-1: ### CG: why are we subtracting 1 here?
coin_flip = 1 #always merge if each node is in a separate module
elif num_mods <= 2: ### Why 2 and not 1?
coin_flip = 0 #always split if there's only one module
else:
coin_flip = np.random.random()
#randomly select two modules to operate on
rand_mods = np.random.permutation(range(num_mods))
m1 = rand_mods[0]
m2 = rand_mods[1]
if coin_flip > 0.5:
#merge
return self.compute_module_merge(m1,m2)
else:
#split
# cannot have a module with less than 1 node
while len(self.index[m1]) <= 1:
#reselect the first module
rand_mods = np.random.permutation(range(num_mods))
m1 = rand_mods[0]
#m1 = random.randint(0,num_mods)
### CG: why not just work your way through the list?
n1,n2 = self.determine_node_split(m1)
#We may want to return output of merging/splitting directly, but
#for now we're returning inputs for those modules.
return self.compute_module_split(m1,n1,n2)
def determine_node_split(self,m1):
""" Determine how to split nodes within a module
"""
# list of nodes within that module
list_nods = list(self.index[m1])
# randomly partition the list of nodes into 2
nod_split_ind = np.random.randint(1,len(list_nods)) #can't pick the first node as the division
### CG: but it's ok to put up to the last because
## np.random.randint is exclusive on the second number
n1 = set(list_nods[:nod_split_ind]) #at least 1 large
n2 = set(list_nods[nod_split_ind:]) #at least 1 large
return n1,n2
def random_node(self):
""" Randomly reassign one node from one module to another
Returns:
-------
n: node to move
m1: module node is currently in
m2: module node will be moved to """
# number of modules in the partition
num_mods=len(self)
if num_mods < 2:
raise ValueError("Can not reassign node with only one module")
# initialize a variable so we can search the modules to find one with
# at least 1 node
node_len = 0
# select 2 random modules (the first must have at least 2 nodes in it)
while node_len <= 1:
# randomized list of modules
rand_mods=np.random.permutation(range(num_mods))
node_len = len(self.index[rand_mods[0]])
m1 = rand_mods[0]
m2 = rand_mods[1]
# select a random node within one module
node_list = list(self.index[m1])
rand_perm = np.random.permutation(node_list)
n = rand_perm[0]
return self.compute_node_update(n,m1,m2)
def store_best(self):
""" Keeps the best partition stored for later. It should 'refresh' each time. """
#attempting to initialize this every time this function is called...make sure this works
self.bestindex = dict()
#Store references to the original graph and label dict
self.bestindex = copy.deepcopy(self.index)
def index_as_node_names(self):
""" index by default contains references to integers represented the
nodes as indexed in the adjacency matrix defined in the original graph.
This will return the index (partition) using the graph node names"""
named_part = []
for nmod, part in self.index.items():
named_part.append( [self._node_names[x] for x in part] )
return named_part
def check_integrity(self, partition):
""" Raises error if partition structure contains
empty partitions or Nan values"""
for tmpset in partition.values():
if tmpset == set([]):
raise ValueError("Partition has empty key : %s"%partition)
if any([np.isnan(x) for x in tmpset]):
raise ValueError("Partition contains NaN value(s)")
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def random_modular_graph(nnod, nmod, av_degree, between_fraction=0.0):
"""
Parameters
----------
nnod : int
Total number of nodes in the graph.
nmod : int
Number of modules. Note that nmod must divide nnod evenly.
av_degree : int
Average degree of the nodes.
between_fraction : float
A number in [0,1], indicating the fraction of edges in each module which
are wired to go between modules.
"""
# sanity checks:
if nnod%nmod:
raise ValueError("nmod must divide nnod evenly")
# Compute the number of nodes per module
nnod_mod = nnod/nmod
# The average degree requested can't be more than what the graph can
# support if it were to be fully dense
if av_degree > nnod_mod - 1:
e = "av_degree can not be larger than (nnod_mod-1) = %i" % (nnod_mod-1)
raise ValueError(e)
# Compute the probabilities to generate the graph with, both for
# within-module (p_in) and between-modules (p_out). See [1] L. Danon,
# A. Díaz-Guilera, J. Duch, and A. Arenas, “Comparing community structure
# identifcation,” Journal of Statistical Mechanics: Theory and Experiment,
# 2005. for definitions of these quantities.
z_out = between_fraction*av_degree
p_in = (av_degree-z_out)/(nnod_mod-1.0)
p_out = float(z_out)/(nnod-nnod_mod)
# Some sanity checks
assert 0 <= p_in <=1, "Invalid p_in=%s, not in [0,1]" % p_in
assert 0 <= p_out <=1, "Invalid p_out=%s, not in [0,1]" % p_out
# Create initial matrix with uniform random numbers in the 0-1 interval.
mat = util.symm_rand_arr(nnod)
# Create the masking matrix
blocks = [np.ones((nnod_mod, nnod_mod))] * nmod
mask = util.diag_stack(blocks)
# Threshold the random matrix to create an actual adjacency graph.
# Emi's trick: we need to use thresholding in only certain parts of the
# matrix, corresponding to where the mask is 0 or 1. Rather than having a
# complex indexing operation, we'll just multiply the numbers in one region
# by -1, and then we can do the thresholding over negative and positive
# values. As long as we correct for this, it's a much simpler approach.
mat[mask==1] *= -1
adj = np.zeros((nnod, nnod))
# Careful to flip the sign of the thresholding for p_in, since we used the
# -1 trick above
adj[np.logical_and(-p_in < mat, mat <= 0)] = 1
adj[np.logical_and(0 < mat, mat < p_out)] = 1
# no self-links
util.fill_diagonal(adj, 0)
# Our return object is a graph, not the adjacency matrix
return nx.from_numpy_matrix(adj)
def rename_keys(dct, key):
"""This function reads in a partition and a single module to be
removed,pops out the value(s) and shifts the key names accordingly.
Parameters
----------
dct : dict
Input dict with all integer keys.
key : int
Key after which all other keys are downshifted by one.
Returns
-------
None. The input dict is modified in place.
"""
for m in range(key, len(dct)):
try:
dct[m] = dct.pop(m+1)
except KeyError:
# If we can't pop a key, it's simply missing from the dict and we
# can safely ignore it. This is likely to happen at the edge of
# the dict, if the function is called on the last key.
pass
def rand_partition(g, num_mods=None):
"""This function takes in a graph and returns a dictionary of labels for
each node. Eventually it needs to be part of the simulated annealing
program, but for now it will just make a random partition.
Parameters
----------
g : graph
Graph for which the partition is to be computed.
num_mods : optional, int
If given, the random partition will have these many modules. If not
given, the number of modules in the partition will be chosen as at
random, up to the number of nodes in the graph."""
num_nodes = g.number_of_nodes()
# randomly select a number of modules
if num_mods is None:
num_mods = np.random.randint(1, num_nodes)
# randomize the order of nodes into a list
rand_nodes = np.random.permutation(num_nodes)
# We'll use this twice below, don't re-generate it.
mod_range = range(num_mods)
# set up a dictionary containing each module and the nodes under it.
# Note: the following loop *does* cover the entire range, even if it
# doesn't appear obvious immediately. The easiest way to see this is to
# write the execution of the loop row-wise, assuming an ordered permutation
# (rand_nodes), and then to read it column-wise. It will be then obvious
# that when each column ends at the last row, the next column starts with
# the next node in the list, and no node is ever skipped.
out = [set(rand_nodes[i::num_mods]) for i in mod_range]
## # a simpler version of the partitioning
## # We need to split the list of nodes into (num_mods) partitions which means we need (num_mods-1) slices.
## # The slices need to be in increasing order so we can use them as indices
## rand_slices=sort(np.random.permutation(rand_nodes)[:num_mods-1])
## # initialize a dictionary
## out = dict()
## # initialize the first element of the node list
## init_node=0
## for m in range_mods:
## #length of the current module
## len_mod=rand_slices[s]-init_node
## out[mod_ind] = rand_nodes[init_node:len_mod+init_node]
## init_node=rand_slices[m]
# The output is the final partition
return dict(zip(mod_range,out))
def perfect_partition(nmod,nnod_mod):
"""This function takes in the number of modules and number of nodes per module
and returns the perfect partition depending on the number of modules
where the module number is fixed according to random_modular_graph()"""
#empty dictionary to fill with the correct partition
part=dict()
#set up a dictionary containing each module and the nodes under it
for m in range(nmod):
part[m]=set(np.arange(nnod_mod)+m*nnod_mod) #dict([(nmod,nnod)])# for x in range(num_mods)])
#print 'Part ' + str(m) + ': '+ str(part[m])
return part
def plot_partition(g,part,title,fname='figure',nod_labels = None, pos = None,
within_mod = 'none', part_coeff = 'none',les_dam='none'):
"""This function takes in a graph and a partition and makes a figure that
has each node labeled according to its partition assignment"""
write_labels = False
nnod = g.number_of_nodes()
if nod_labels == None:
nod_labels = dict(zip(range(nnod),range(nnod)))
else:
nod_labels = dict(zip(range(nnod),nod_labels))
plt.figure()
plt.subplot(111)
plt.axis('off')
if pos == None:
pos=nx.circular_layout(g)
#col=colors.cnames.keys()
col = ['r','g','b','m','c','y']
col2 = ['#000066','#000099','#660000','#CC6633','#FF0099','#FF00FF','#33FFFF','#663366','#FFCC33','#CCFF66','#FFCC99','#33CCCC','#FF6600','#FFCCFF','#CCFFFF','#CC6699','#CC9900','#FF6600','#99FF66','#CC0033','#99FFFF','#CC00CC','#CC99CC','#660066','#33CC66','#336699','#3399FF','#339900','#003300','#00CC00','#330033','#333399','#0033CC','#333333','#339966','#333300']
niter = 0
edge_list_between = []
for m,val in part.items():
if niter <len(col):
if within_mod == 'none': #note: assumes part_coeff also there
for v in val:
if les_dam != 'none':
plt.scatter(pos[v][0],pos[v][1],s=100*les_dam[v],c='orange',marker=(10,1,0))
nx.draw_networkx_nodes(g,pos,nodelist=list(val),node_color=col[niter],node_size=50)
else:
for v in val:
if les_dam != 'none':
plt.scatter(pos[v][0],pos[v][1],s=500*les_dam[v],c='orange',marker=(10,1,0))
if within_mod[v] > 1:
nx.draw_networkx_nodes(g,pos,nodelist=[v],node_color=col[niter],node_size=part_coeff[v] * 500+50,node_shape='s',linewidths=2)
else:
nx.draw_networkx_nodes(g,pos,nodelist=[v],node_color=col[niter],node_size=part_coeff[v] * 500+50,node_shape='o',linewidths=0.5)
else:
#print 'out of colors!!'
if within_mod == 'none': #note: assumes part_coeff also there
for v in val:
if les_dam != 'none':
plt.scatter(pos[v][0],pos[v][1],s=100*les_dam[v],c='orange',marker=(10,1,0))
nx.draw_networkx_nodes(g,pos,nodelist=list(val),node_color=col2[niter],node_size=50)
else:
for v in val:
if les_dam != 'none':
plt.scatter(pos[v][0],pos[v][1],s=500*les_dam[v],c='orange',marker=(10,1,0))
if within_mod[v] > 1:
nx.draw_networkx_nodes(g,pos,nodelist=[v],node_color=col2[niter],node_size=part_coeff[v] * 500+50,node_shape='s',linewidths=2)
else:
nx.draw_networkx_nodes(g,pos,nodelist=[v],node_color=col2[niter],node_size=part_coeff[v] * 500+50,node_shape='o',linewidths=0.5)
val_array = np.array(val)
edge_list_within = []
for edg in g.edges():
#temp = np.array(edge_list_between)
n1_ind = np.where(val_array == edg[0])[0]
n2_ind = np.where(val_array == edg[1])[0]
#edg_ind = np.where(temp == edg)
if len(n1_ind) > 0 and len(n2_ind) > 0:
#add on the edge if it is within the partition
edge_list_within.append(edg)
elif len(n1_ind)>0 and len(n2_ind) == 0:
#add on the edge if it hasn't been seen before
edge_list_between.append(edg)
elif len(n2_ind)>0 and len(n1_ind) == 0:
edge_list_between.append(edg)
if niter <len(col):
nx.draw_networkx_edges(g,pos,edgelist=edge_list_within,edge_color=col[niter])
else:
nx.draw_networkx_edges(g,pos,edgelist=edge_list_within,edge_color=col2[niter])
niter += 1
#nx.draw_networkx_edges(g,pos,edgelist=nx.edges(g))
nx.draw_networkx_edges(g,pos,edgelist=edge_list_between,edge_color='k')
if write_labels:
nx.draw_networkx_labels(g,pos,nod_labels,font_size=6)
#add loop for damage labels
if les_dam != 'none':
for m,val in part.items():
for v in val:
if les_dam[v] > 0:
plt.scatter(pos[v][0],pos[v][1],s=500*les_dam[v]+100,c='orange',marker=(10,1,0))
plt.title(title)
#plt.savefig(fname)
#plt.close()
#plt.show()
def confusion_matrix(d1, d2):
"""Return the confusion matrix for two graph partitions.
See Danon et al, 2005, for definition details.
Parameters
----------
d1 : dict
dictionary with first partition.
d2 : dict
dictionary with second partition.
Returns
-------
N : numpy 2d array.
Confusion matrix for d1 and d2.
"""
# define a 'confusion matrix' where rows = 'real communities' and columns =
# 'found communities' The element of N (Nij) = the number of nodes in the
# real community i that appear in the found community j
# Compute the sets of the values of d1/d2 only once, to avoid quadratic
# recomputation.
rows = len(d1)
cols = len(d2)
sd1 = [set(d1[i]) for i in range(rows)]
sd2 = [set(d2[j]) for j in range(cols)]
N = np.empty((rows,cols))
for i, sd1i in enumerate(sd1):
for j, sd2j in enumerate(sd2):
N[i,j] = len(sd1i & sd2j)
return N
def mutual_information(d1, d2):
"""Mutual information between two graph partitions.
Read in two dictionaries of sets (i.e. a graph partition) and assess how
similar they are using mutual information as in Danon, Diaz-Guilera, Duch &
Arenas, J Statistical Mechanics 2005.
Parameters
----------
d1 : dict
dictionary of 'real communities'
d2 : dict
dictionary of 'found communities'
Returns
-------
mi : float
Value of mutual information between the two partitions.
"""
log = np.log
nansum = np.nansum
N = confusion_matrix(d1, d2)
nsum_row = N.sum(0)[np.newaxis, :]
nsum_col = N.sum(1)[:, np.newaxis]
# Sanity checks: a zero in either of these can only happen if there was an
# empty module in one of the input partitions. Rather than manually check
# the entire partitions, we look for this problem at this stage, and bail
# if there was an empty module.
## if (nsum_row==0).any():
## EmptyModuleError("Empty module in second partition.")
## if (nsum_col==0).any():
## EmptyModuleError("Empty module in first partition.")
# nn is the total number of nodes
nn = nsum_row.sum()
num = nansum(N*log(N*nn/(nsum_row*nsum_col)))
den = nansum(nsum_row*log(nsum_row/nn)) + nansum(nsum_col*log(nsum_col/nn))
return -2*num/den
def decide_if_keeping(dE,temperature):
"""Function which uses the rule from Guimera & Amaral (2005) Nature paper to decide whether or not to keep new partition
Parameters:
dE = delta energy
temperature = current state of the system
=
Returns:
keep = 1 or 0 to decide if keeping new partition """
if dE <= 0:
return True
else:
return np.random.random() < math.exp(-dE/temperature)
def simulated_annealing(g, p0=None, temperature = 50, temp_scaling = 0.995, tmin=1e-5,
bad_accept_mod_ratio_max = 0.8 ,
bad_accept_nod_ratio_max = 0.8, accept_mod_ratio_min =
0.05, accept_nod_ratio_min = 0.05,
extra_info = False,
debug = False):
""" This function does simulated annealing on a graph
Parameters:
g = graph #to anneal over
temperature = 5777 #temperature of the sun in Kelvin, where we're starting
tmin = 0.0 # minimum temperature
n_nochanges = 25 # number of times to allow no change in modularity before
breaking out of loop search
Return:
part = final partition
M = final modularity """
#Make a random partition for the graph
nnod = g.number_of_nodes()
part = dict()
#check if there is only one module or nnod modules
while (len(part) <= 1) or (len(part) == nnod):
part = rand_partition(g)
# make a graph partition object
if p0 is None:
graph_partition = GraphPartition(g,part)
else:
graph_partition = p0
# The number of times we switch nodes in a partition and the number of
# times we modify the partition, at each temperature. These values were
# suggested by Guimera and Amaral, Nature 443, p895. This is achieved
# simply by running two nested loops of length nnod
nnod = graph_partition.num_nodes
rnod = range(nnod)
#initialize some counters
count = 0
#Initialize empty lists for keeping track of values
energy_array = []#negative modularity
temp_array = []
energy_best = 0
energy = -graph_partition.modularity()
energy_array.append(energy)
while temperature > tmin:
# Initialize counters
bad_accept_mod = 0
accept_mod = 0
reject_mod = 0
count_mod = 0
count_bad_mod = 0.0001 # small offset to avoid occasional 1/0 errors
for i_mod in rnod:
# counters for module change attempts
count_mod+=1
count+=1
# Assess energy change of a new partition without changing the partition
calc_dict,e_new,a_new,delta_energy,movetype,p1,p2,p3 = graph_partition.random_mod()
# Increase the 'count_bad_mod' if the new partition increases the energy
if delta_energy > 0:
count_bad_mod += 1
# Decide whether the new partition is better than the old
keep = decide_if_keeping(delta_energy,temperature)
# Append the current temperature to the temp list
temp_array.append(temperature)
if keep:
# this applies changes in place if energy decreased; the
# modules will either be merged or split depending on a random
# coin flip
if movetype=='merge':
graph_partition.apply_module_merge(p1,p2,calc_dict,e_new,a_new)
else:
graph_partition.apply_module_split(p1,p2,p3,calc_dict,e_new,a_new)
# add the change in energy to the total energy
energy += delta_energy
accept_mod += 1 #counts times accept mod because lower energy
# Increase the 'bad_accept_mod' if the new partition increases
# the energy and was accepted
if delta_energy > 0 :
bad_accept_mod += 1
if debug:
debug_partition = GraphPartition(g, graph_partition.index)
npt.assert_almost_equal(debug_partition.modularity(),
graph_partition.modularity(), 11)
for mod in graph_partition.index:
if len(graph_partition.index[mod]) < 1:
EmptyModuleError('Empty module after module %s,SA' % (movetype))
#maybe store the best one here too?
#graph_partition.store_best()
#else:
#make a new graph partition with the last partition
#reject_mod += 1
#graph_partition = GraphPartition(g,graph_partition.index)
if energy < energy_best:
energy_best = energy
graph_partition.store_best()
energy_array.append(energy)
#break out if we are accepting too many "bad" options (early on)
#break out if we are accepting too few options (later on)
if count_mod > 10:
# Only compute this quantity after enough steps for these
# ratios to make any sense (they are ~1 at the first step).
bad_accept_mod_ratio = float(bad_accept_mod)/(count_bad_mod)
accept_mod_ratio = float(accept_mod)/(count_mod)
#print 'ba_mod_r', bad_accept_mod_ratio # dbg
if (bad_accept_mod_ratio > bad_accept_mod_ratio_max) \
or (accept_mod_ratio < accept_mod_ratio_min):
#print 'MOD BREAK'
break
# Second loop over node changes
bad_accept_nod = 0
accept_nod = 0
count_nod = 0
count_bad_nod = 0.0001 # init at 1 to avoid 1/0 errors later
for i_nod in rnod:
count_nod+=1
count+=1
#if (np.mod(count,10000)==0) and (temperature < 1e-1):
# plot_partition(g,part,'../SA_graphs2/try'+str(count)+'.png')
# Assess energy change of a new partition
calc_dict,e_new,a_new,delta_energy,p1,p2,p3 = graph_partition.random_node()
if delta_energy > 0:
count_bad_nod += 1
temp_array.append(temperature)
keep = decide_if_keeping(delta_energy,temperature)
if keep:
nnn = graph_partition.apply_node_update(p1,p2,p3,calc_dict,e_new,a_new)
energy += delta_energy
accept_nod += 1
if delta_energy > 0 :
bad_accept_nod += 1
#maybe store the best one here too?
#graph_partition.store_best()
if debug:
debug_partition = GraphPartition(g,
graph_partition.index)
npt.assert_almost_equal(debug_partition.modularity(),
graph_partition.modularity(), 11)
for mod in graph_partition.index:
if len(graph_partition.index[mod]) < 1:
EmptyModuleError('Empty module after node move,SA')
#else:
#graph_partition = GraphPartition(g,graph_partition.index)
if energy < energy_best:
energy_best = energy
graph_partition.store_best()
energy_array.append(energy)
#break out if we are accepting too many "bad" options (early on)
#break out if we are accepting too few options (later on)
if count_nod > 10:
bad_accept_nod_ratio = float(bad_accept_nod)/count_bad_nod
accept_nod_ratio = float(accept_nod)/(count_nod)
# if (bad_accept_nod_ratio > bad_accept_nod_ratio_max) \
# or (accept_nod_ratio < accept_nod_ratio_min):
# print 'nod BREAK'
# break
if (bad_accept_nod_ratio > bad_accept_nod_ratio_max):
#print 'too many accept'
break
if (accept_nod_ratio < accept_nod_ratio_min):
#print 'too many reject'
break
if 0: #for debugging. 0 suppresses this for now.
print('T: %.2e' % temperature,
'accept nod ratio: %.2e ' %accept_nod_ratio,
'bad accept nod ratio: %.2e' % bad_accept_nod_ratio,
'energy: %.2e' % energy)
#print 'T: %.2e' % temperature, \
# 'accept mod ratio: %.2e ' %accept_mod_ratio, \
# 'bad accept mod ratio: %.2e' % bad_accept_mod_ratio, \
# 'energy: %.2e' %energy, 'best: %.2e' %energy_best
print('T: %.2e' % temperature,
'energy: %.2e' %energy, 'best: %.2e' %energy_best)
temperature *= temp_scaling
#NEED TO APPLY THE BEST PARTITION JUST IN CASE...
#make a new graph object, apply the best partition
#graph_partition.index = graph_partition.bestindex
print(graph_partition.modularity())
graph_part_final = GraphPartition(g,graph_partition.bestindex)
if debug:
debug_partition = GraphPartition(g, graph_part_final.index)
npt.assert_almost_equal(debug_partition.modularity(),
graph_part_final.modularity(), 11)
debug_partition.check_integrity(graph_part_final.index)
if extra_info:
extra_dict = dict(energy = energy_array, temp = temp_array)
graph_part_final.check_integrity(graph_part_final.index)
#return graph_partition, extra_dict
return graph_part_final, extra_dict
else:
#return graph_partition
#print graph_part_final.modularity()
#check that the energy matches the computed modularity value of the partition
finalmodval = graph_part_final.modularity()
print(finalmodval)
print(-energy_best)
print(graph_part_final.index)
print(np.abs(finalmodval - (-energy_best)))
if np.abs(finalmodval - (-energy_best)) > 0.000001: #to account for float error
raise ValueError('mismatch in energy and modularity')
return graph_part_final, graph_part_final.modularity()
def modularity_matrix(g):
"""Modularity matrix of the graph.
Parameters
----------
g : NetworkX graph
input graph
Returns
-------
B : numpy array
modularity matrix (graph laplacian)
"""
A = np.asarray(nx.adjacency_matrix(g).todense()) # Must convert to dense matrix before making into a numpy array (line was previously: A = np.asarray(nx.adjacency_matrix(g)))
k = np.sum(A, axis=0) #vertex degree
M = np.sum(k) # 2x number of edges
return A - ((k * k[:, None]) / float(M))
def newman_partition(g, max_div=np.inf):
"""Greedy estimation of optimal partition of a graph, using
Newman (2006) spectral method.
Parameters
----------
g : NetworkX Graph
Input graph.
max_div : int
Maximum number of times to sub-divide partitions.
Returns
-------
p : GraphPartition
Estimated optimal partitioning.
"""
A = np.asarray(nx.adjacency_matrix(g).todense()) # Must convert to dense matrix before making into a numpy array (line was previously:(A = np.asarray(nx.adjacency_matrix(g)))
if not A.sum() == A.astype(bool).sum():
raise ValueError('Adjacency matrix is weighted, need binary matrix')
## add line to binarize adj_matrix if not binary
## warning?
nedges = g.number_of_edges()
k = np.sum(A, axis=0)
M = np.sum(A) # 2x number of edges
B = modularity_matrix(g)
p = range(len(g))
def _divide_partition(p, max_div=np.inf):
"""
Parameters
----------
p : array of ints
Node labels.
max_div : int
maximum number of divisions (default np.inf)
Returns
-------
out : list of ints
Partitioning of node labels.
"""
p = np.asarray(p)
if max_div <= 0 or p.size == 1:
return [p]
# Construct the subgraph modularity matrix
A_ = A[p, p[:, None]]
graph_A_ = nx.from_numpy_matrix(A_, nx.Graph())
# make sure partition has edges
if graph_A_.number_of_edges() <= 1:
return [p]
## grab the relevent part of the modularity matrix
Bij = B[p, p[:,None]]
w, v = sl.eigh(Bij, eigvals=(len(Bij) - 2, len(Bij) - 1))
# Find the maximum eigenvalue of the modularity matrix
# If it is smaller than zero, then we won't be able to
# increase the modularity any further by partitioning.
n = np.argsort(w)[-1]
if w[n] <= 0:
return [p]
# Construct the partition vector s, that has value -1 corresponding
# to nodes in the first partition and 1 for nodes in the second
v_max = v[:, n]
mask = (v_max < 0)
# if the mask is all True or all False, this will not split the partition
# and would create an empty partition
# catch by checking max vector contains pos and neg values
is_positive = np.sign(v_max) >= 0
is_negative = np.sign(v_max) <= 0
if np.all(is_positive) or np.all(is_negative):
return [p]
s = np.ones_like(v_max)
s[mask] = -1
# Compute the increase in modularity due to this partitioning.
# If it is less than zero, we should rather not have partitioned.
Bg = Bij - np.diag(Bij.sum(axis=1))
deltaq = s[None,:].dot(Bg).dot(s) / (4.0 * nedges)
if deltaq <= 0:
return [p]
# Make the partitioning, and subdivide each
# partition in turn.
out = []
for pp in (p[mask], p[~mask]):
out.extend(_divide_partition(pp, max_div - 1))
return out
p = _divide_partition(p, max_div)
index = {}
for k, nodes in enumerate(p):
index[k] = set(nodes)
return GraphPartition(g, index)
def adjust_partition(g, partition, max_iter=None):
"""Adjust partition, using the heuristic method described in Newman (2006),
to have higher modularity.
## TODO BROKEN FIX ME
Parameters
----------
g : NetworkX graph
Input graph.
partition : GraphPartition
Existing partitioning.
max_iter : int, optional
Maximum number of improvement iterations. By default,
continue until 10 iterations without any improvement.
Returns
-------
improved_partition : GraphPartition
Partition with higher modularity.
"""
# Static copy of the entire list of nodes in the graph
nodes = g.nodes()
# Set of module labels in the initial partition
P = set(range(len(partition)))
# Create a dict that maps nodes to the partition label they belong to.
# This is effectively a reverse of the partition.index.
node_map = {}
for p in P:
for node in partition.index[p]:
node_map[node] = p
L = len(nodes)
no_improvement = 0
iterations = 0
max_iter = max_iter or np.inf
best_modularity = partition.modularity()
while nodes and no_improvement < 10 and iterations <= max_iter:
moves = []
move_modularity = []
iterations += 1
for n in nodes:
for p in P.difference([node_map[n]]):
moves.append((n, node_map[n], p))
M = -partition.compute_node_update(n, node_map[n], p)[3]
move_modularity.append(M)
(n, p0, p1) = moves[np.argmax(move_modularity)]
split_modules, e_new, a_new = partition.compute_node_update(n, p0, p1)[:3]
partition.apply_node_update(n, p0, p1, split_modules, e_new, a_new)
node_map[n] = p1
nodes.remove(n)
print('[%d/%d] -> %.4f' % (len(nodes), L, partition.modularity()))
M = partition.modularity()
if M > best_modularity:
gp_best = copy.deepcopy(partition)
best_modularity = M
no_improvement = 0
else:
no_improvement += 1
return gp_best
| bsd-3-clause |
h2educ/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
person142/scipy | scipy/misc/common.py | 20 | 9678 | """
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from numpy import arange, newaxis, hstack, prod, array, frombuffer, load
__all__ = ['central_diff_weights', 'derivative', 'ascent', 'face',
'electrocardiogram']
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Returns
-------
w : ndarray
Weights for an Np-point central derivative. Its size is `Np`.
Notes
-----
Can be inaccurate for a large number of points.
Examples
--------
We can calculate a derivative value of a function.
>>> from scipy.misc import central_diff_weights
>>> def f(x):
... return 2 * x**2 + 3
>>> x = 3.0 # derivative point
>>> h = 0.1 # differential step
>>> Np = 3 # point number for central derivative
>>> weights = central_diff_weights(Np) # weights for first derivative
>>> vals = [f(x + (i - Np/2) * h) for i in range(Np)]
>>> sum(w * v for (w, v) in zip(weights, vals))/h
11.79999999999998
This value is close to the analytical solution:
f'(x) = 4x, so f'(3) = 12
References
----------
.. [1] https://en.wikipedia.org/wiki/Finite_difference
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = prod(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the nth derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the nth derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which the nth derivative is found.
dx : float, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> from scipy.misc import derivative
>>> def f(x):
... return x**3 + x**2
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n == 2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / prod((dx,)*n,axis=0)
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True return 8-bit grey-scale image, otherwise return a color image
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
255
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = frombuffer(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
def electrocardiogram():
"""
Load an electrocardiogram as an example for a 1-D signal.
The returned signal is a 5 minute long electrocardiogram (ECG), a medical
recording of the heart's electrical activity, sampled at 360 Hz.
Returns
-------
ecg : ndarray
The electrocardiogram in millivolt (mV) sampled at 360 Hz.
Notes
-----
The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_
(lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on
PhysioNet [2]_. The excerpt includes noise induced artifacts, typical
heartbeats as well as pathological changes.
.. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208
.. versionadded:: 1.1.0
References
----------
.. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database.
IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001).
(PMID: 11446209); :doi:`10.13026/C2F305`
.. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank,
PhysioToolkit, and PhysioNet: Components of a New Research Resource
for Complex Physiologic Signals. Circulation 101(23):e215-e220;
:doi:`10.1161/01.CIR.101.23.e215`
Examples
--------
>>> from scipy.misc import electrocardiogram
>>> ecg = electrocardiogram()
>>> ecg
array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385])
>>> ecg.shape, ecg.mean(), ecg.std()
((108000,), -0.16510875, 0.5992473991177294)
As stated the signal features several areas with a different morphology.
E.g., the first few seconds show the electrical activity of a heart in
normal sinus rhythm as seen below.
>>> import matplotlib.pyplot as plt
>>> fs = 360
>>> time = np.arange(ecg.size) / fs
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(9, 10.2)
>>> plt.ylim(-1, 1.5)
>>> plt.show()
After second 16, however, the first premature ventricular contractions, also
called extrasystoles, appear. These have a different morphology compared to
typical heartbeats. The difference can easily be observed in the following
plot.
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(46.5, 50)
>>> plt.ylim(-2, 1.5)
>>> plt.show()
At several points large artifacts disturb the recording, e.g.:
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(207, 215)
>>> plt.ylim(-2, 3.5)
>>> plt.show()
Finally, examining the power spectrum reveals that most of the biosignal is
made up of lower frequencies. At 60 Hz the noise induced by the mains
electricity can be clearly observed.
>>> from scipy.signal import welch
>>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum")
>>> plt.semilogy(f, Pxx)
>>> plt.xlabel("Frequency in Hz")
>>> plt.ylabel("Power spectrum of the ECG in mV**2")
>>> plt.xlim(f[[0, -1]])
>>> plt.show()
"""
import os
file_path = os.path.join(os.path.dirname(__file__), "ecg.dat")
with load(file_path) as file:
ecg = file["ecg"].astype(int) # np.uint16 -> int
# Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain
ecg = (ecg - 1024) / 200.0
return ecg
| bsd-3-clause |
wzbozon/statsmodels | statsmodels/sandbox/examples/ex_mixed_lls_0.py | 34 | 5233 | # -*- coding: utf-8 -*-
"""Example using OneWayMixed
Created on Sat Dec 03 10:15:55 2011
Author: Josef Perktold
This example constructs a linear model with individual specific random
effects and random coefficients, and uses OneWayMixed to estimate it.
"""
from __future__ import print_function
import numpy as np
from statsmodels.sandbox.panel.mixed import OneWayMixed, Unit
examples = ['ex1']
if 'ex1' in examples:
#np.random.seed(54321)
np.random.seed(978326)
nsubj = 2000
units = []
nobs_i = 4 #number of observations per unit, changed below
nx = 4 #number fixed effects
nz = 2 ##number random effects
beta = np.ones(nx)
gamma = 0.5 * np.ones(nz) #mean of random effect
gamma[0] = 0
gamma_re_true = []
for i in range(nsubj):
#create data for one unit
#random effect/coefficient
gamma_re = gamma + 0.2 * np.random.standard_normal(nz)
#store true parameter for checking
gamma_re_true.append(gamma_re)
#for testing unbalanced case, let's change nobs per unit
if i > nsubj//4:
nobs_i = 6
#generate exogenous variables
X = np.random.standard_normal((nobs_i, nx))
Z = np.random.standard_normal((nobs_i, nz-1))
Z = np.column_stack((np.ones(nobs_i), Z))
noise = 0.1 * np.random.randn(nobs_i) #sig_e = 0.1
#generate endogenous variable
Y = np.dot(X, beta) + np.dot(Z, gamma_re) + noise
#add random effect design matrix also to fixed effects to
#capture the mean
#this seems to be necessary to force mean of RE to zero !?
#(It's not required for estimation but interpretation of random
#effects covariance matrix changes - still need to check details.
X = np.hstack((X,Z))
#create units and append to list
unit = Unit(Y, X, Z)
units.append(unit)
m = OneWayMixed(units)
import time
t0 = time.time()
m.initialize()
res = m.fit(maxiter=100, rtol=1.0e-5, params_rtol=1e-6, params_atol=1e-6)
t1 = time.time()
print('time for initialize and fit', t1-t0)
print('number of iterations', m.iterations)
#print(dir(m)
#print(vars(m)
print('\nestimates for fixed effects')
print(m.a)
print(m.params)
bfixed_cov = m.cov_fixed()
print('beta fixed standard errors')
print(np.sqrt(np.diag(bfixed_cov)))
print(m.bse)
b_re = m.params_random_units
print('RE mean:', b_re.mean(0))
print('RE columns std', b_re.std(0))
print('np.cov(b_re, rowvar=0), sample statistic')
print(np.cov(b_re, rowvar=0))
print('std of above')
print(np.sqrt(np.diag(np.cov(b_re, rowvar=0))))
print('m.cov_random()')
print(m.cov_random())
print('std of above')
print(res.std_random())
print(np.sqrt(np.diag(m.cov_random())))
print('\n(non)convergence of llf')
print(m.history['llf'][-4:])
print('convergence of parameters')
#print(np.diff(np.vstack(m.history[-4:])[:,1:],axis=0)
print(np.diff(np.vstack(m.history['params'][-4:]),axis=0))
print('convergence of D')
print(np.diff(np.array(m.history['D'][-4:]), axis=0))
#zdotb = np.array([np.dot(unit.Z, unit.b) for unit in m.units])
zb = np.array([(unit.Z * unit.b[None,:]).sum(0) for unit in m.units])
'''if Z is not included in X:
>>> np.dot(b_re.T, b_re)/100
array([[ 0.03270611, -0.00916051],
[-0.00916051, 0.26432783]])
>>> m.cov_random()
array([[ 0.0348722 , -0.00909159],
[-0.00909159, 0.26846254]])
>>> #note cov_random doesn't subtract mean!
'''
print('\nchecking the random effects distribution and prediction')
gamma_re_true = np.array(gamma_re_true)
print('mean of random effect true', gamma_re_true.mean(0))
print('mean from fixed effects ', m.params[-2:])
print('mean of estimated RE ', b_re.mean(0))
print('')
absmean_true = np.abs(gamma_re_true).mean(0)
mape = ((m.params[-2:] + b_re) / gamma_re_true - 1).mean(0)*100
mean_abs_perc = np.abs((m.params[-2:] + b_re) - gamma_re_true).mean(0) \
/ absmean_true*100
median_abs_perc = np.median(np.abs((m.params[-2:] + b_re) - gamma_re_true), 0) \
/ absmean_true*100
rmse_perc = ((m.params[-2:] + b_re) - gamma_re_true).std(0) \
/ absmean_true*100
print('mape ', mape)
print('mean_abs_perc ', mean_abs_perc)
print('median_abs_perc', median_abs_perc)
print('rmse_perc (std)', rmse_perc)
from numpy.testing import assert_almost_equal
#assert is for n_units=100 in original example
#I changed random number generation, so this won't work anymore
#assert_almost_equal(rmse_perc, [ 34.14783884, 11.6031684 ], decimal=8)
#now returns res
print(res.llf) #based on MLE, does not include constant
print(res.tvalues)
print(res.pvalues)
print(res.t_test([1,-1,0,0,0,0]))
print('test mean of both random effects variables is zero')
print(res.f_test([[0,0,0,0,1,0], [0,0,0,0,0,1]]))
plots = res.plot_random_univariate(bins=50)
fig = res.plot_scatter_pairs(0, 1)
import matplotlib.pyplot as plt
plt.show()
| bsd-3-clause |
mik01aj/corthus | corthus/toolkit/PairManager.py | 1 | 2716 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
A class for managing large sets of translation pairs.
"""
from __future__ import unicode_literals
import codecs
import re
from NewAlignment import NewAlignment
class PairManager:
def __init__(self):
self.pairs = {}
self.pairs_by_prob = []
self.hapax_prob = None
@classmethod
def from_file(cls, file_path):
m = re.match('.*(\w\w)-(\w\w)$', file_path)
lang1 = m.group(1)
lang2 = m.group(2)
pm = PairManager()
with codecs.open(file_path) as f:
na = NewAlignment.read(f)
first = True
for row in na:
if first:
pm.hapax_prob = float(row['_f'].split()[-1])
first = False
continue
count = int(row['_f'].split()[0])
prob = float(row['_f'].split()[-1])
pm.pairs[row[lang1], row[lang2]] = (count, prob)
pm.pairs_by_prob.append((prob, row[lang1], row[lang2]))
pm.pairs_by_prob.sort(reverse=True)
return pm
def iter_best_pairs(self, threshold=None, count=None):
"""Iterate over `count` best pairs. If there are more than
`count` with probability estimated above `threshold`, it will
yield more."""
for i, (prob, s1, s2) in enumerate(self.pairs_by_prob):
if threshold and prob < threshold:
if not count or i > count:
break
yield prob, s1, s2
def get_pair_prob(self, s1, s2):
try:
return self.pairs[s1, s2][1]
except KeyError:
return self.hapax_prob
def has_pair(self, s1, s2):
return (s1, s2) in self.pairs
if __name__ == '__main__':
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
# size = 20
# fig = plt.figure(figsize=(size, size))
fig = plt.figure()
ax = fig.add_subplot(111)
pms = [PairManager.from_file('data/pairs.pl-cu'),
PairManager.from_file('data/pairs.cu-el'),
PairManager.from_file('data/pairs.pl-el')]
styles = ['D', 'o', 's']
for pm, st in zip(pms, styles):
histogram = { i : 0 for i in range(100) }
for count, _ in pm.pairs.itervalues():
try:
histogram[count] += 1
except KeyError:
histogram[count] = 1
xs, ys = zip(*histogram.items())
ax.plot(xs, ys, c=(0, 0, 0, 1))
ax.set_xlabel(r'liczba wystąpień pary', fontsize=20)
ax.set_ylabel(r'liczba par', fontsize=20)
ax.set_xlim([0, 100])
ax.set_ylim([0, 100])
ax.grid(True)
plt.show()
| apache-2.0 |
rseubert/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
wangmiao1981/spark | python/pyspark/pandas/window.py | 11 | 53852 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
from functools import partial
from typing import ( # noqa: F401 (SPARK-34943)
Any,
Callable,
Generic,
List,
Optional,
)
from pyspark.sql import Window
from pyspark.sql import functions as F
from pyspark.pandas.missing.window import (
MissingPandasLikeRolling,
MissingPandasLikeRollingGroupby,
MissingPandasLikeExpanding,
MissingPandasLikeExpandingGroupby,
)
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas._typing import FrameLike
from pyspark.pandas.groupby import GroupBy
from pyspark.pandas.internal import NATURAL_ORDER_COLUMN_NAME, SPARK_INDEX_NAME_FORMAT
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.utils import scol_for
from pyspark.sql.column import Column
from pyspark.sql.window import WindowSpec
class RollingAndExpanding(Generic[FrameLike], metaclass=ABCMeta):
def __init__(self, window: WindowSpec, min_periods: int):
self._window = window
# This unbounded Window is later used to handle 'min_periods' for now.
self._unbounded_window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(
Window.unboundedPreceding, Window.currentRow
)
self._min_periods = min_periods
@abstractmethod
def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:
"""
Wraps a function that handles Spark column in order
to support it in both pandas-on-Spark Series and DataFrame.
Note that the given `func` name should be same as the API's method name.
"""
pass
@abstractmethod
def count(self) -> FrameLike:
pass
def sum(self) -> FrameLike:
def sum(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.sum(scol).over(self._window),
).otherwise(SF.lit(None))
return self._apply_as_series_or_frame(sum)
def min(self) -> FrameLike:
def min(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.min(scol).over(self._window),
).otherwise(SF.lit(None))
return self._apply_as_series_or_frame(min)
def max(self) -> FrameLike:
def max(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.max(scol).over(self._window),
).otherwise(SF.lit(None))
return self._apply_as_series_or_frame(max)
def mean(self) -> FrameLike:
def mean(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.mean(scol).over(self._window),
).otherwise(SF.lit(None))
return self._apply_as_series_or_frame(mean)
def std(self) -> FrameLike:
def std(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.stddev(scol).over(self._window),
).otherwise(SF.lit(None))
return self._apply_as_series_or_frame(std)
def var(self) -> FrameLike:
def var(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.variance(scol).over(self._window),
).otherwise(SF.lit(None))
return self._apply_as_series_or_frame(var)
class RollingLike(RollingAndExpanding[FrameLike]):
def __init__(
self,
window: int,
min_periods: Optional[int] = None,
):
if window < 0:
raise ValueError("window must be >= 0")
if (min_periods is not None) and (min_periods < 0):
raise ValueError("min_periods must be >= 0")
if min_periods is None:
# TODO: 'min_periods' is not equivalent in pandas because it does not count NA as
# a value.
min_periods = window
window_spec = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(
Window.currentRow - (window - 1), Window.currentRow
)
super().__init__(window_spec, min_periods)
def count(self) -> FrameLike:
def count(scol: Column) -> Column:
return F.count(scol).over(self._window)
return self._apply_as_series_or_frame(count).astype("float64") # type: ignore
class Rolling(RollingLike[FrameLike]):
def __init__(
self,
psdf_or_psser: FrameLike,
window: int,
min_periods: Optional[int] = None,
):
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import Series
super().__init__(window, min_periods)
if not isinstance(psdf_or_psser, (DataFrame, Series)):
raise TypeError(
"psdf_or_psser must be a series or dataframe; however, got: %s"
% type(psdf_or_psser)
)
self._psdf_or_psser = psdf_or_psser
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeRolling, item):
property_or_func = getattr(MissingPandasLikeRolling, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError(item)
def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:
return self._psdf_or_psser._apply_series_op(
lambda psser: psser._with_new_scol(func(psser.spark.column)), # TODO: dtype?
should_resolve=True,
)
def count(self) -> FrameLike:
"""
The rolling count of any non-NaN observations inside the window.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.count : Count of the full Series.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 3, float("nan"), 10])
>>> s.rolling(1).count()
0 1.0
1 1.0
2 0.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.to_frame().rolling(1).count()
0
0 1.0
1 1.0
2 0.0
3 1.0
>>> s.to_frame().rolling(3).count()
0
0 1.0
1 2.0
2 2.0
3 2.0
"""
return super().count()
def sum(self) -> FrameLike:
"""
Calculate rolling summation of given DataFrame or Series.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
rolling summation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = ps.Series([4, 3, 5, 2, 6])
>>> s
0 4
1 3
2 5
3 2
4 6
dtype: int64
>>> s.rolling(2).sum()
0 NaN
1 7.0
2 8.0
3 7.0
4 8.0
dtype: float64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 12.0
3 10.0
4 13.0
dtype: float64
For DataFrame, each rolling summation is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df
A B
0 4 16
1 3 9
2 5 25
3 2 4
4 6 36
>>> df.rolling(2).sum()
A B
0 NaN NaN
1 7.0 25.0
2 8.0 34.0
3 7.0 29.0
4 8.0 40.0
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 12.0 50.0
3 10.0 38.0
4 13.0 65.0
"""
return super().sum()
def min(self) -> FrameLike:
"""
Calculate the rolling minimum.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with a Series.
DataFrame.rolling : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
>>> s = ps.Series([4, 3, 5, 2, 6])
>>> s
0 4
1 3
2 5
3 2
4 6
dtype: int64
>>> s.rolling(2).min()
0 NaN
1 3.0
2 3.0
3 2.0
4 2.0
dtype: float64
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
For DataFrame, each rolling minimum is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df
A B
0 4 16
1 3 9
2 5 25
3 2 4
4 6 36
>>> df.rolling(2).min()
A B
0 NaN NaN
1 3.0 9.0
2 3.0 9.0
3 2.0 4.0
4 2.0 4.0
>>> df.rolling(3).min()
A B
0 NaN NaN
1 NaN NaN
2 3.0 9.0
3 2.0 4.0
4 2.0 4.0
"""
return super().min()
def max(self) -> FrameLike:
"""
Calculate the rolling maximum.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.rolling : Series rolling.
DataFrame.rolling : DataFrame rolling.
Series.max : Similar method for Series.
DataFrame.max : Similar method for DataFrame.
Examples
--------
>>> s = ps.Series([4, 3, 5, 2, 6])
>>> s
0 4
1 3
2 5
3 2
4 6
dtype: int64
>>> s.rolling(2).max()
0 NaN
1 4.0
2 5.0
3 5.0
4 6.0
dtype: float64
>>> s.rolling(3).max()
0 NaN
1 NaN
2 5.0
3 5.0
4 6.0
dtype: float64
For DataFrame, each rolling maximum is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df
A B
0 4 16
1 3 9
2 5 25
3 2 4
4 6 36
>>> df.rolling(2).max()
A B
0 NaN NaN
1 4.0 16.0
2 5.0 25.0
3 5.0 25.0
4 6.0 36.0
>>> df.rolling(3).max()
A B
0 NaN NaN
1 NaN NaN
2 5.0 25.0
3 5.0 25.0
4 6.0 36.0
"""
return super().max()
def mean(self) -> FrameLike:
"""
Calculate the rolling mean of the values.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
>>> s = ps.Series([4, 3, 5, 2, 6])
>>> s
0 4
1 3
2 5
3 2
4 6
dtype: int64
>>> s.rolling(2).mean()
0 NaN
1 3.5
2 4.0
3 3.5
4 4.0
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 4.000000
3 3.333333
4 4.333333
dtype: float64
For DataFrame, each rolling mean is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df
A B
0 4 16
1 3 9
2 5 25
3 2 4
4 6 36
>>> df.rolling(2).mean()
A B
0 NaN NaN
1 3.5 12.5
2 4.0 17.0
3 3.5 14.5
4 4.0 20.0
>>> df.rolling(3).mean()
A B
0 NaN NaN
1 NaN NaN
2 4.000000 16.666667
3 3.333333 12.666667
4 4.333333 21.666667
"""
return super().mean()
def std(self) -> FrameLike:
"""
Calculate rolling standard deviation.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the rolling calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Examples
--------
>>> s = ps.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
For DataFrame, each rolling standard deviation is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.rolling(2).std()
A B
0 NaN NaN
1 0.000000 0.000000
2 0.707107 7.778175
3 0.707107 9.192388
4 1.414214 16.970563
5 0.000000 0.000000
6 0.000000 0.000000
"""
return super().std()
def var(self) -> FrameLike:
"""
Calculate unbiased rolling variance.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the rolling calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Examples
--------
>>> s = ps.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
For DataFrame, each unbiased rolling variance is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.rolling(2).var()
A B
0 NaN NaN
1 0.0 0.0
2 0.5 60.5
3 0.5 84.5
4 2.0 288.0
5 0.0 0.0
6 0.0 0.0
"""
return super().var()
class RollingGroupby(RollingLike[FrameLike]):
def __init__(
self,
groupby: GroupBy[FrameLike],
window: int,
min_periods: Optional[int] = None,
):
super().__init__(window, min_periods)
self._groupby = groupby
self._window = self._window.partitionBy(*[ser.spark.column for ser in groupby._groupkeys])
self._unbounded_window = self._unbounded_window.partitionBy(
*[ser.spark.column for ser in groupby._groupkeys]
)
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeRollingGroupby, item):
property_or_func = getattr(MissingPandasLikeRollingGroupby, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError(item)
def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:
"""
Wraps a function that handles Spark column in order
to support it in both pandas-on-Spark Series and DataFrame.
Note that the given `func` name should be same as the API's method name.
"""
from pyspark.pandas import DataFrame
groupby = self._groupby
psdf = groupby._psdf
# Here we need to include grouped key as an index, and shift previous index.
# [index_column0, index_column1] -> [grouped key, index_column0, index_column1]
new_index_scols = [] # type: List[Column]
new_index_spark_column_names = []
new_index_names = []
new_index_fields = []
for groupkey in groupby._groupkeys:
index_column_name = SPARK_INDEX_NAME_FORMAT(len(new_index_scols))
new_index_scols.append(groupkey.spark.column.alias(index_column_name))
new_index_spark_column_names.append(index_column_name)
new_index_names.append(groupkey._column_label)
new_index_fields.append(groupkey._internal.data_fields[0].copy(name=index_column_name))
for new_index_scol, index_name, index_field in zip(
psdf._internal.index_spark_columns,
psdf._internal.index_names,
psdf._internal.index_fields,
):
index_column_name = SPARK_INDEX_NAME_FORMAT(len(new_index_scols))
new_index_scols.append(new_index_scol.alias(index_column_name))
new_index_spark_column_names.append(index_column_name)
new_index_names.append(index_name)
new_index_fields.append(index_field.copy(name=index_column_name))
if groupby._agg_columns_selected:
agg_columns = groupby._agg_columns
else:
agg_columns = [
psdf._psser_for(label)
for label in psdf._internal.column_labels
if label not in groupby._column_labels_to_exlcude
]
applied = []
for agg_column in agg_columns:
applied.append(agg_column._with_new_scol(func(agg_column.spark.column))) # TODO: dtype?
# Seems like pandas filters out when grouped key is NA.
cond = groupby._groupkeys[0].spark.column.isNotNull()
for c in groupby._groupkeys[1:]:
cond = cond | c.spark.column.isNotNull()
sdf = psdf._internal.spark_frame.filter(cond).select(
new_index_scols + [c.spark.column for c in applied]
)
internal = psdf._internal.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in new_index_spark_column_names],
index_names=new_index_names,
index_fields=new_index_fields,
column_labels=[c._column_label for c in applied],
data_spark_columns=[
scol_for(sdf, c._internal.data_spark_column_names[0]) for c in applied
],
data_fields=[c._internal.data_fields[0] for c in applied],
)
return groupby._cleanup_and_return(DataFrame(internal))
def count(self) -> FrameLike:
"""
The rolling count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.count : Count of the full Series.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).rolling(3).count().sort_index()
2 0 1.0
1 2.0
3 2 1.0
3 2.0
4 3.0
4 5 1.0
6 2.0
7 3.0
8 3.0
5 9 1.0
10 2.0
dtype: float64
For DataFrame, each rolling count is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).rolling(2).count().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
A
2 0 1.0 1.0
1 2.0 2.0
3 2 1.0 1.0
3 2.0 2.0
4 2.0 2.0
4 5 1.0 1.0
6 2.0 2.0
7 2.0 2.0
8 2.0 2.0
5 9 1.0 1.0
10 2.0 2.0
"""
return super().count()
def sum(self) -> FrameLike:
"""
The rolling summation of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.sum : Sum of the full Series.
DataFrame.sum : Sum of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).rolling(3).sum().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 9.0
4 5 NaN
6 NaN
7 12.0
8 12.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each rolling summation is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).rolling(2).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
A
2 0 NaN NaN
1 4.0 8.0
3 2 NaN NaN
3 6.0 18.0
4 6.0 18.0
4 5 NaN NaN
6 8.0 32.0
7 8.0 32.0
8 8.0 32.0
5 9 NaN NaN
10 10.0 50.0
"""
return super().sum()
def min(self) -> FrameLike:
"""
The rolling minimum of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.min : Min of the full Series.
DataFrame.min : Min of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).rolling(3).min().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 4.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each rolling minimum is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).rolling(2).min().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
A
2 0 NaN NaN
1 2.0 4.0
3 2 NaN NaN
3 3.0 9.0
4 3.0 9.0
4 5 NaN NaN
6 4.0 16.0
7 4.0 16.0
8 4.0 16.0
5 9 NaN NaN
10 5.0 25.0
"""
return super().min()
def max(self) -> FrameLike:
"""
The rolling maximum of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.max : Max of the full Series.
DataFrame.max : Max of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).rolling(3).max().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 4.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each rolling maximum is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).rolling(2).max().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
A
2 0 NaN NaN
1 2.0 4.0
3 2 NaN NaN
3 3.0 9.0
4 3.0 9.0
4 5 NaN NaN
6 4.0 16.0
7 4.0 16.0
8 4.0 16.0
5 9 NaN NaN
10 5.0 25.0
"""
return super().max()
def mean(self) -> FrameLike:
"""
The rolling mean of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.mean : Mean of the full Series.
DataFrame.mean : Mean of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).rolling(3).mean().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 4.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each rolling mean is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).rolling(2).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
A
2 0 NaN NaN
1 2.0 4.0
3 2 NaN NaN
3 3.0 9.0
4 3.0 9.0
4 5 NaN NaN
6 4.0 16.0
7 4.0 16.0
8 4.0 16.0
5 9 NaN NaN
10 5.0 25.0
"""
return super().mean()
def std(self) -> FrameLike:
"""
Calculate rolling standard deviation.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the rolling calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
"""
return super().std()
def var(self) -> FrameLike:
"""
Calculate unbiased rolling variance.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the rolling calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
"""
return super().var()
class ExpandingLike(RollingAndExpanding[FrameLike]):
def __init__(self, min_periods: int = 1):
if min_periods < 0:
raise ValueError("min_periods must be >= 0")
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(
Window.unboundedPreceding, Window.currentRow
)
super().__init__(window, min_periods)
def count(self) -> FrameLike:
def count(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.count(scol).over(self._window),
).otherwise(F.lit(None))
return self._apply_as_series_or_frame(count).astype("float64") # type: ignore
class Expanding(ExpandingLike[FrameLike]):
def __init__(self, psdf_or_psser: FrameLike, min_periods: int = 1):
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import Series
super().__init__(min_periods)
if not isinstance(psdf_or_psser, (DataFrame, Series)):
raise TypeError(
"psdf_or_psser must be a series or dataframe; however, got: %s"
% type(psdf_or_psser)
)
self._psdf_or_psser = psdf_or_psser
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeExpanding, item):
property_or_func = getattr(MissingPandasLikeExpanding, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError(item)
# TODO: when add 'center' and 'axis' parameter, should add to here too.
def __repr__(self) -> str:
return "Expanding [min_periods={}]".format(self._min_periods)
_apply_as_series_or_frame = Rolling._apply_as_series_or_frame
def count(self) -> FrameLike:
"""
The expanding count of any non-NaN observations inside the window.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.count : Count of the full Series.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 3, float("nan"), 10])
>>> s.expanding().count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
>>> s.to_frame().expanding().count()
0
0 1.0
1 2.0
2 2.0
3 3.0
"""
return super().count()
def sum(self) -> FrameLike:
"""
Calculate expanding summation of given DataFrame or Series.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
expanding summation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
For DataFrame, each expanding summation is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.expanding(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 10.0 30.0
4 15.0 55.0
"""
return super().sum()
def min(self) -> FrameLike:
"""
Calculate the expanding minimum.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.expanding : Calling object with a Series.
DataFrame.expanding : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a expanding minimum with a window size of 3.
>>> s = ps.Series([4, 3, 5, 2, 6])
>>> s.expanding(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
return super().min()
def max(self) -> FrameLike:
"""
Calculate the expanding maximum.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.max : Similar method for Series.
DataFrame.max : Similar method for DataFrame.
Examples
--------
Performing a expanding minimum with a window size of 3.
>>> s = ps.Series([4, 3, 5, 2, 6])
>>> s.expanding(3).max()
0 NaN
1 NaN
2 5.0
3 5.0
4 6.0
dtype: float64
"""
return super().max()
def mean(self) -> FrameLike:
"""
Calculate the expanding mean of the values.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show expanding mean calculations with window sizes of
two and three, respectively.
>>> s = ps.Series([1, 2, 3, 4])
>>> s.expanding(2).mean()
0 NaN
1 1.5
2 2.0
3 2.5
dtype: float64
>>> s.expanding(3).mean()
0 NaN
1 NaN
2 2.0
3 2.5
dtype: float64
"""
return super().mean()
def std(self) -> FrameLike:
"""
Calculate expanding standard deviation.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the expanding calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Examples
--------
>>> s = ps.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
For DataFrame, each expanding standard deviation variance is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.expanding(2).std()
A B
0 NaN NaN
1 0.000000 0.000000
2 0.577350 6.350853
3 0.957427 11.412712
4 0.894427 10.630146
5 0.836660 9.928075
6 0.786796 9.327379
"""
return super().std()
def var(self) -> FrameLike:
"""
Calculate unbiased expanding variance.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the expanding calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Examples
--------
>>> s = ps.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
For DataFrame, each unbiased expanding variance is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.expanding(2).var()
A B
0 NaN NaN
1 0.000000 0.000000
2 0.333333 40.333333
3 0.916667 130.250000
4 0.800000 113.000000
5 0.700000 98.566667
6 0.619048 87.000000
"""
return super().var()
class ExpandingGroupby(ExpandingLike[FrameLike]):
def __init__(self, groupby: GroupBy[FrameLike], min_periods: int = 1):
super().__init__(min_periods)
self._groupby = groupby
self._window = self._window.partitionBy(*[ser.spark.column for ser in groupby._groupkeys])
self._unbounded_window = self._window.partitionBy(
*[ser.spark.column for ser in groupby._groupkeys]
)
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeExpandingGroupby, item):
property_or_func = getattr(MissingPandasLikeExpandingGroupby, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError(item)
_apply_as_series_or_frame = RollingGroupby._apply_as_series_or_frame
def count(self) -> FrameLike:
"""
The expanding count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.count : Count of the full Series.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).expanding(3).count().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 3.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each expanding count is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).expanding(2).count().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
A
2 0 NaN NaN
1 2.0 2.0
3 2 NaN NaN
3 2.0 2.0
4 3.0 3.0
4 5 NaN NaN
6 2.0 2.0
7 3.0 3.0
8 4.0 4.0
5 9 NaN NaN
10 2.0 2.0
"""
return super().count()
def sum(self) -> FrameLike:
"""
Calculate expanding summation of given DataFrame or Series.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
expanding summation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).expanding(3).sum().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 9.0
4 5 NaN
6 NaN
7 12.0
8 16.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each expanding summation is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).expanding(2).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
A
2 0 NaN NaN
1 4.0 8.0
3 2 NaN NaN
3 6.0 18.0
4 9.0 27.0
4 5 NaN NaN
6 8.0 32.0
7 12.0 48.0
8 16.0 64.0
5 9 NaN NaN
10 10.0 50.0
"""
return super().sum()
def min(self) -> FrameLike:
"""
Calculate the expanding minimum.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.expanding : Calling object with a Series.
DataFrame.expanding : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).expanding(3).min().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 4.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each expanding minimum is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).expanding(2).min().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
A
2 0 NaN NaN
1 2.0 4.0
3 2 NaN NaN
3 3.0 9.0
4 3.0 9.0
4 5 NaN NaN
6 4.0 16.0
7 4.0 16.0
8 4.0 16.0
5 9 NaN NaN
10 5.0 25.0
"""
return super().min()
def max(self) -> FrameLike:
"""
Calculate the expanding maximum.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.max : Similar method for Series.
DataFrame.max : Similar method for DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).expanding(3).max().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 4.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each expanding maximum is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).expanding(2).max().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
A
2 0 NaN NaN
1 2.0 4.0
3 2 NaN NaN
3 3.0 9.0
4 3.0 9.0
4 5 NaN NaN
6 4.0 16.0
7 4.0 16.0
8 4.0 16.0
5 9 NaN NaN
10 5.0 25.0
"""
return super().max()
def mean(self) -> FrameLike:
"""
Calculate the expanding mean of the values.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).expanding(3).mean().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 4.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each expanding mean is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).expanding(2).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
A
2 0 NaN NaN
1 2.0 4.0
3 2 NaN NaN
3 3.0 9.0
4 3.0 9.0
4 5 NaN NaN
6 4.0 16.0
7 4.0 16.0
8 4.0 16.0
5 9 NaN NaN
10 5.0 25.0
"""
return super().mean()
def std(self) -> FrameLike:
"""
Calculate expanding standard deviation.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the expanding calculation.
See Also
--------
Series.expanding: Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
"""
return super().std()
def var(self) -> FrameLike:
"""
Calculate unbiased expanding variance.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the expanding calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
"""
return super().var()
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.window
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.window.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.window tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.window,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
markchil/bayesimp | bayesimp.py | 1 | 413747 | # Copyright 2016 Mark Chilenski
# This program is distributed under the terms of the GNU General Purpose License (GPL).
# Refer to http://www.gnu.org/licenses/gpl.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from signal import signal, SIGPIPE, SIG_DFL
#Ignore SIG_PIPE and don't throw exceptions on it... (http://docs.python.org/library/signal.html)
signal(SIGPIPE, SIG_DFL)
import sys
# Override the system eqtools:
sys.path.insert(0, "/home/markchil/codes/efit/development/EqTools")
import os
from distutils.dir_util import copy_tree
import subprocess
import scipy
import scipy.io
import scipy.interpolate
import scipy.optimize
import numpy.random
import numpy.polynomial
import MDSplus
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
plt.ion()
import matplotlib.gridspec as mplgs
import matplotlib.widgets as mplw
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LogNorm
import profiletools
import profiletools.gui
import gptools
import eqtools
import Tkinter as tk
import re
import glob
import copy
import cPickle as pkl
import collections
import pexpect
import time as time_
import shutil
import tempfile
import multiprocessing
import emcee
from emcee.interruptible_pool import InterruptiblePool
import warnings
import TRIPPy
import TRIPPy.XTOMO
from gptools.splines import spev
import itertools
# try:
# import PyGMO
# _have_PyGMO = True
# except ImportError:
# warnings.warn("Could not import PyGMO!", RuntimeWarning)
# _have_PyGMO = False
import pymultinest
import periodictable
import lines
import nlopt
import pymysql as MS
import traceback
from connect import get_connection, send_email
import fcntl
import sobol
import numdifftools as nd
from sklearn.preprocessing import PolynomialFeatures
# Store the PID of the main thread:
MAIN_PID = os.getpid()
# List of directories for use in main thread:
MAIN_THREAD_DIRS = [None]
# Lock object to handle threads with PyGMO:
DIR_POOL_LOCK = multiprocessing.Lock()
# Regex used to split lists up. This will let the list be delimted by any
# non-numeric characters, where the decimal point and minus sign are NOT
# considered numeric.
LIST_REGEX = r'([0-9]+)[^0-9]*'
# Region of interest for Ca lines on XEUS (in nanometers):
XEUS_ROI = (1.8, 2.2)
# Locations of Ca 17+ lines (in nanometers, taken from pue#ca17.dat):
CA_17_LINES = (
1.8683, 30.2400, 1.9775, 1.8727, 34.4828, 1.9632, 2.0289, 1.4080, 2.0122,
1.4091, 1.4736, 1.2642, 1.9790, 1.4652, 1.4852, 1.2647, 1.3181, 1.4763,
5.4271, 1.3112, 1.3228, 5.7753, 1.4739, 5.4428, 3.7731, 5.7653, 5.8442,
5.6682, 1.3157, 3.9510, 5.6332, 3.7770, 5.8015, 3.9457, 3.8990, 1.3182,
3.9404, 5.8320, 3.8812, 3.9210, 5.8370, 11.8177, 12.4743, 5.6734, 3.9644,
12.6866, 12.4557, 11.8555, 5.7780, 12.2669, 3.9627, 3.9002, 12.6020, 12.6091,
3.9517, 12.2001, 5.8190, 12.6265, 12.4970, 12.4883, 3.9585, 12.2793, 12.4807,
12.5836, 12.5252, 12.5256, 12.5007, 107.5003, 12.5127, 124.3039, 260.0374,
301.5136, 229.1056, 512.7942, 286.7219, 595.2381, 321.8228, 545.2265,
682.1748, 1070.0909, 1338.6881, 766.2248, 1505.1174, 3374.9577, 4644.6817,
6583.2783, 9090.9089, 7380.0736, 14430.0141
)
# Locations of Ca 16+ lines (in nanometers):
CA_16_LINES = (19.2858,)
# Combined Ca 16+, 17+ lines (in nanometers):
CA_LINES = scipy.asarray(CA_17_LINES + CA_16_LINES)
# Threshold for size of HiReX-SR errorbar to reject:
HIREX_THRESH = 0.05
# Template for the Ca.atomdat file:
CA_ATOMDAT_TEMPLATE = """cv main ion brems SXR spectral brems thermal CX
0 0 0 0
cv diagnostic lines
1
cc begin atomicData
acd:acd85_ca.dat recombination
scd:scd85_ca.dat ionisation
prb:prb00_ca.dat continuum radiation
plt:plt00_ca.dat line radiation
cc end atomic Data
********************Diagnostic Lines********************
cd excitation recombination charge exchange
1 0 0
cd # of lines
{num_lines:d}
cd charge of ion wavelength(A) half width of window(A) file extension
{line_spec}
"""
# Template for each line of the line_spec in Ca.atomdat:
LINE_SPEC_TEMPLATE = " {charge:d} {wavelength:.3f} {halfwidth:.3f} 'pue'\n"
# Global variable to keep track of the currently-running IDL session. Handled
# with global variables so that it works with threading and provides
# persistence of the IDL session.
IDL_SESSION = None
# Global variable to keep track of the working directory for the current thread.
WORKING_DIR = None
# Global variable to keep track of the master directory to copy from.
MASTER_DIR = None
# Global variable to keep track of number of calls to STRAHL.
NUM_STRAHL_CALLS = 0
# Translate status codes into human-readable strings:
OPT_STATUS = {
1: "success",
2: "stopval reached",
3: "ftol reached",
4: "xtol reached",
5: "unconverged: maxeval reached",
6: "unconverged: maxtime reached",
-1: "failure",
-2: "failure: invalid args",
-3: "failure: out of memory",
-4: "failure: roundoff limited",
-5: "failure: forced stop",
-100: "unconverged: no work done",
0: "unconverged: in progress",
10: "bayesimp failure"
}
# def test_lock(*args, **kwargs):
# """Test the thread-safety of putting the lock in the module namespace.
#
# Looks like it works!
# """
# with DIR_POOL_LOCK:
# time_.sleep(5)
# print("done.")
#
# return 0
# POS vector for XEUS:
XEUS_POS = [2.561, 0.2158, 0.196, 0.1136]
# POS vector for LoWEUS:
LOWEUS_POS = [2.561, -0.2158, 0.196, -0.1136]
# Default time grid specification:
DEFAULT_TIME_SPEC = (
" {time_1:.5f} 0.00010 1.001 10\n"
" {time_2:.5f} 0.00010 1.001 10\n"
)
def get_idl_session():
"""Launch an IDL session and store it in the global variable IDL_SESSION.
The global variable is used to provide persistence and support for
multiprocessing.
If there is no active IDL session when called, it launches IDL, compiles
execute_strahl.pro and restores run_data to the interactive workspace. It
also checks to see if view_data.sav exists and, if so, loads that.
"""
global IDL_SESSION
if IDL_SESSION is None:
IDL_SESSION = pexpect.spawn('idl70')
# Use the following line for debugging:
# IDL_SESSION.logfile = sys.stdout
IDL_SESSION.expect('IDL> ')
IDL_SESSION.sendline('.compile execute_strahl')
IDL_SESSION.expect('IDL> ')
IDL_SESSION.sendline('run_data = load_run_data()')
IDL_SESSION.expect('IDL> ')
IDL_SESSION.sendline('if file_test("view_data.sav") then restore, "view_data.sav"')
IDL_SESSION.expect('IDL> ')
return IDL_SESSION
def acquire_working_dir(lockmode='pool'):
"""Get the first available working directory. If none is available, create one.
Parameters
----------
lockmode : {'pool', 'file'}
Type of lock to use. Default is to use `DIR_POOL_LOCK`. If 'file', will
attempt to use `flock` to acquire an exclusive lock on the working dirs
file.
"""
# Some clever trickery is needed to deal with execution in the main thread
# for connected topologies, since PaGMO seems to spawn way too many at once
# and there are concurrent access issues. The solution adopted is to use
# MAIN_THREAD_DIRS as a sentinel. If it is empty, the main directory is in
# use. If it has one element, the main directory can be used. So, to acquire
# the directory you pop (an atomic operation) and to release it you append
# (also an atomic operation).
global WORKING_DIR
global MASTER_DIR
if os.getpid() == MAIN_PID and lockmode == 'pool':
# This will hammer away indefinitely until the directory is acquired,
# trying every 10ms.
while True:
try:
status = MAIN_THREAD_DIRS.pop()
except IndexError:
time_.sleep(1e-2)
else:
WORKING_DIR = MASTER_DIR
os.chdir(WORKING_DIR)
return
else:
if lockmode == 'pool':
global DIR_POOL_LOCK
with DIR_POOL_LOCK:
with open(os.path.join(MASTER_DIR, 'working_dirs.txt'), 'r') as f:
lines = f.read().splitlines(True)
# Handle case where all directories are already taken:
if len(lines) == 0:
WORKING_DIR = tempfile.mkdtemp(prefix='bayesimp')
os.chdir(WORKING_DIR)
copy_tree(MASTER_DIR, WORKING_DIR)
else:
WORKING_DIR = lines[0][:-1]
os.chdir(WORKING_DIR)
with open(os.path.join(MASTER_DIR, 'working_dirs.txt'), 'w') as f:
f.writelines(lines[1:])
elif lockmode == 'file':
# Need to open for writing for this to work on Linux:
fl = open(os.path.join(MASTER_DIR, 'lockfile'), 'a')
fcntl.flock(fl.fileno(), fcntl.LOCK_EX)
try:
with open(os.path.join(MASTER_DIR, 'working_dirs.txt'), 'r') as f:
lines = f.read().splitlines(True)
# Handle case where all directories are already taken:
if len(lines) == 0:
WORKING_DIR = tempfile.mkdtemp(prefix='bayesimp')
os.chdir(WORKING_DIR)
copy_tree(MASTER_DIR, WORKING_DIR)
else:
WORKING_DIR = lines[0][:-1]
os.chdir(WORKING_DIR)
with open(os.path.join(MASTER_DIR, 'working_dirs.txt'), 'w') as f:
f.writelines(lines[1:])
finally:
fcntl.flock(fl.fileno(), fcntl.LOCK_UN)
fl.close()
def release_working_dir(lockmode='pool'):
"""Release the current working directory, adding its name back onto the list of available ones.
Parameters
----------
lockmode : {'pool', 'file'}
Type of lock to use. Default is to use `DIR_POOL_LOCK`. If 'file', will
attempt to use `flock` to acquire an exclusive lock on the working dirs
file.
"""
global WORKING_DIR
global MASTER_DIR
# Some clever trickery is needed to deal with execution in the main thread
# for connected topologies, since PaGMO seems to spawn way too many at once
# and there are concurrent access issues. The solution adopted is to use
# MAIN_THREAD_DIRS as a sentinel. If it is empty, the main directory is in
# use. If it has one element, the main directory can be used. So, to acquire
# the directory you pop (an atomic operation) and to release it you append
# (also an atomic operation).
if os.getpid() == MAIN_PID and lockmode == 'pool':
# Append is atomic, so I can safely just push this back on:
MAIN_THREAD_DIRS.append(None)
else:
if lockmode == 'pool':
global DIR_POOL_LOCK
with DIR_POOL_LOCK:
with open(os.path.join(MASTER_DIR, 'working_dirs.txt'), 'a') as f:
f.write(WORKING_DIR + '\n')
os.chdir(MASTER_DIR)
elif lockmode == 'file':
# Need to open for writing for this to work on Linux:
fl = open(os.path.join(MASTER_DIR, 'lockfile'), 'a')
fcntl.flock(fl.fileno(), fcntl.LOCK_EX)
try:
with open(os.path.join(MASTER_DIR, 'working_dirs.txt'), 'a') as f:
f.write(WORKING_DIR + '\n')
os.chdir(MASTER_DIR)
finally:
fcntl.flock(fl.fileno(), fcntl.LOCK_UN)
fl.close()
def setup_working_dir(*args, **kwargs):
"""Setup a temporary working directory, and store its name in WORKING_DIR.
"""
global WORKING_DIR
global MASTER_DIR
global DIR_POOL_LOCK
# global IDL_SESSION
global NUM_STRAHL_CALLS
assert WORKING_DIR is None
MASTER_DIR = os.getcwd()
WORKING_DIR = tempfile.mkdtemp(prefix='bayesimp')
NUM_STRAHL_CALLS = 0
print("Setting up %s..." % (WORKING_DIR,))
os.chdir(WORKING_DIR)
copy_tree(MASTER_DIR, WORKING_DIR)
with DIR_POOL_LOCK:
with open(os.path.join(MASTER_DIR, 'working_dirs.txt'), 'a') as f:
f.write(WORKING_DIR + '\n')
# Also open an IDL session:
# print("Launching IDL session...")
# assert IDL_SESSION is None
# idl = get_idl_session()
print("Ready to work!")
def cleanup_working_dir(*args, **kwargs):
"""Remove the WORKING_DIR. This should be called in each worker when closing out a pool.
"""
global WORKING_DIR
global MASTER_DIR
if WORKING_DIR is not None:
print("Cleaning up %s..." % (WORKING_DIR,))
# Also quit the active IDL session:
# get_idl_session().sendline('exit')
os.chdir(MASTER_DIR)
shutil.rmtree(WORKING_DIR)
WORKING_DIR = None
def finalize_pool(pool):
"""Have each worker in a pool clean up its working directory.
"""
# Use a factor of 4 to ensure each worker gets called, since I don't
# know a better way to ensure this.
dum = [1] * (4 * pool._processes)
pool.map(cleanup_working_dir, dum)
# Blank out the list of available directories:
with DIR_POOL_LOCK:
f = open(os.path.join(MASTER_DIR, 'working_dirs.txt'), 'w')
f.close()
def make_pool(num_proc=None):
"""Create and return a pool.
"""
global MASTER_DIR
global DIR_POOL_LOCK
MASTER_DIR = os.getcwd()
# Blank out the list of available directories:
with DIR_POOL_LOCK:
f = open(os.path.join(MASTER_DIR, 'working_dirs.txt'), 'w')
f.close()
if num_proc is None:
num_proc = multiprocessing.cpu_count()
# Close out the IDL session before creating the pools, since this
# seems to cause some issues.
global IDL_SESSION
if IDL_SESSION is not None:
IDL_SESSION.sendline('exit')
IDL_SESSION = None
pool = InterruptiblePool(processes=num_proc, initializer=setup_working_dir)
return pool
class Run(object):
"""Class to load and run bayesimp/STRAHL.
Must be run from the directory containing bayesimp!
If the directory strahl_<SHOT>_<VERSION> does not exist, creates it.
Most of the parameters are required, but are used with keywords to make
setup simpler.
Parameters
----------
shot : int
The shot number to analyze.
version : int
The version of the analysis to perform. Default is 0.
time_1 : float
The start time of the simulation.
time_2 : float
The end time of the simulation.
injections : list of :py:class:`Injection`
Objects describing the injections.
tht : int
The THT to use. Default is 0.
line : int
The HiReX-SR line to use. Default is 6. OTHER LINES ARE NOT SUPPORTED
YET!
Te_args : list of str
List of command-line arguments to pass to gpfit when fitting the Te
profile.
ne_args : list of str
List of command-line arguments to pass to gpfit when fitting the ne
profile.
debug_plots : int
Set to 0 to suppress superfluous plots. Set to 1 to enable some plots.
Set to 2 to enable all plots. Default is 0.
num_eig_D : int
The number of eigenvalues/free coefficients to use for the D profile.
num_eig_V : int
The number of eigenvalues/free coefficients to use for the V profile.
roa_grid : array of float
r/a grid to evaluate the ne, Te profiles on.
roa_grid_DV : array of float
r/a grid to evaluate the D, V profiles on.
source_file : str
If present, this is a path to a properly-formatted source file to use
instead of the source model. This overrides the other options related
to sources (though note that source_prior still acts as the prior for
the temporal shift applied to the data, and hence should be univariate).
method : {'linterp', 'spline'}
The method to use when evaluating the D, V profiles.
* If 'linterp', (the default) piecewise linear functions will be used
for the D and V profiles. `num_eig_D` and `num_eig_V` will then be the
number of free values which are then linearly-interpolated. Because
there is a slope constraint on D and a value constraint on V, the
total number of knots is `num_eig + 1`. But the first and last knots
should usually be at the edges of the domain, so there are only
`num_eig - 1` free knots.
* If 'spline', B-splines will be used for the D and V profiles.
`num_eig_D` and `num_eig_V` will then be the number of free
coefficients in the respective splines. Because there is a slope
constraint on D and a value constraint on V, this is one fewer than
the actual number of spline coefficients. The total number of knots is
then `num_eig - k + 2`, or `num_eig - k` free knots.
free_knots : bool
If True, the (internal) knot locations will be taken to be free
parameters included in the inference. There will always be a knot at 0
and a knot at 1. Default is to use fixed knots. This does not have an
effect if `fixed_params` is provided.
spline_k_D : int
The spline order to use for the D profile with method = 'spline'.
Default is 3 (cubic spline).
spline_k_V : int
The spline order to use for the V profile with method = 'spline'.
Default is 3 (cubic spline).
include_loweus : bool
If True, the data from the LoWEUS spectrometer will be included in the
likelihood. Otherwise, LoWEUS will only be evaluated to compare the
brightness.
use_scaling : bool
If True, a scale factor applied to each diagnostic signal will be
included as a free parameter. This can help deal with issues in the
normalization. Note that the signals are still normalized, so this
factor should end up being very close to unity for each signal. Default
is False (only normalize). This does not have an effect if `fixed_params`
is provided.
use_shift : bool
If True, a temporal shift applied to each diagnostic will be included as
a free parameter. This can help deal with issues of synchronization.
Default is False (do not shift). This does not have an effect if
`fixed_params` is provided.
sort_knots : bool
If True, the knots will be sorted when splitting the params. Default is
False (don't sort knots, unsorted knots are treated as infeasible cases).
params_true : array of float, (`num_params`,), optional
If provided, these are used to construct synthetic data (using the
equilibrium and temperature/density profiles from the specified shot).
Default is to use actual experimental data.
synth_li_lines : array of int, optional
The indices of the Li-like Ca lines to use when constructing the
synthetic data. The default is to use the ones normally seen on XEUS.
synth_be_lines : array of int, optional
The indices of the Be-like Ca lines to use when constructing the
synthetic data. The default is to use the one normally seen on LoWEUS.
hirex_time_res : float, optional
The time resolution (in seconds) to use for synthetic HiReX-SR data.
Default is 1e-3.
vuv_time_res : float, optional
The time resolution (in seconds) to use for synthetic VUV data. Default
is 1e-3.
xtomo_time_res : float, optional
The time resolution (in seconds) to use for the synthetic XTOMO data.
Default is 2e-6.
local_time_res : float, optional
The time resolution (in seconds) to use for the synthetic local data.
Default is 1e-4.
num_local_space : int, optional
Number of spatial channels to use for the synthetic local data. Default
is 20.
local_cs : List of int, optional
Charge state(s) to include in the synthetic local data. Include `None`
to use the total impurity density. Default is `[None,]`.
presampling_time : float, optional
The time (in seconds) to keep from before `time_1` when generating
synthetic data. Default is 15e-3.
synth_noises : array of float, optional
The relative noise levels to use with the synthetic data. Default is
[0.03, 0.03, 0.1].
local_synth_noise : float, optional
The relative noise level to use with the synthetic data. Default is 5e-3.
normalize : bool, optional
If True, normalized signals will be used when comparing to STRAHL output.
Default is True.
time_spec : str, optional
The time grid specification to use when writing the param file. Default
is `DEFAULT_TIME_SPEC`.
fixed_params : array of bool, (`num_params`,), optional
An array with True wherever a parameter should be held fixed. Default is
for all parameters to be free.
initial_params : array of bool, (`num_params`,), optional
Initial values for all of the parameters (both free and fixed). Default
is to use `params_true` if it is provided, or benign defaults. This
exists primarily as a mechanism to set the values of fixed parameters.
D_lb : float, optional
The lower bound on the diffusion coefficient parameters. Default is 0.0.
D_ub : float, optional
The upper bound on the diffusion coefficient parameters. Default is 30.0.
V_lb : float, optional
The lower bound on the convective velocity coefficient parameters.
Default is -100.0.
V_ub : float, optional
The upper bound on the convective velocity coefficient parameters.
Default is 50.0.
V_lb_outer : float, optional
The lower bound on the outermost convective velocity coefficient
parameter. Default is -200.0.
V_ub_outer : float, optional
The upper bound on the outermost convective velocity coefficient
parameter. Default is 0.0.
num_eig_ne : int, optional
Number of eigenvalues to use when sampling the ne profile. Default is 5.
num_eig_Te : int, optional
Number of eigenvalues to use when sampling the Te profile. Default is 3.
free_ne : bool, optional
If True, the ne profile eigenvalues will be free to vary. Default is
False.
free_Te : bool, optional
If True, the Te profile eigenvalues will be free to vary. Default is
False.
signal_mask : array of bool, optional
Indicates which synthetic signals should be generated/used. Order is:
[HiReX-SR, VUV, XTOMO]. If passed when creating the run for the first
time, only the indicated synthetic signals will be created. If passed
when restoring a run, will simply remove the synthetic signals from the
inference.
noise_type: {'proportional Gaussian', 'Poisson'}
The noise type to use. Options are:
* 'proportional Gaussian': Gaussian noise for which the standard
deviation is equal to the relative noise level times the value.
* 'Poisson' : Gaussian noise for which the standard deviation is
equal to the relative noise level times the value divided by the
square root of the ratio of the value to the max value. This
simulates Poisson noise.
shift_prior : :py:class:`gptools.JointPrior` instance, optional
The prior distribution to use for the time shifts for each diagnostic.
Default is zero-mean Gaussian priors with 2ms standard deviation.
explicit_D : array of float, (`M`,), optional
Explicit values of D to construct the truth data with. Overrides the D
coefficient parts of `params_true`.
explicit_D_grid : array of float, (`M`,), optional
The grid the explicit values of D are given on.
explicit_V : array of float, (`M`,), optional
Explicit values of V to construct the truth data with. Overrides the V
coefficient parts of `params_true`.
explicit_V_grid : array of float, (`M`,), optional
The grid the explicit values of V are given on.
use_PMMCMC : bool, optional
If True, use pseudo-marginal nested sampling to handle the time shift
parameters. Default is False (either hold fixed or sample full
posterior).
num_pts_PMMCMC : int, optional
Number of points to use when evaluating the marginal distribution with
pseudo-marginal nested sampling. Default is 10.
method_PMMCMC : {'QMC', 'GHQ'}, optional
Method to use when computing the marginal distribution with
pseudo-marginal nested sampling. Default is 'QMC' (use quasi Monte Carlo
sampling with a Sobol sequence). The other valid option is 'GHQ' (use
Gauss-Hermite quadrature with a dense tensor product grid).
"""
def __init__(
self,
shot=0,
version=0,
time_1=0.0,
time_2=0.0,
injections=[],
tht=0,
line=6,
Te_args=[],
ne_args=[],
debug_plots=0,
num_eig_D=5,
num_eig_V=5,
roa_grid=scipy.linspace(0, 1.2, 100),
roa_grid_DV=scipy.linspace(0, 1.05, 100),
source_file=None,
method='linterp',
free_knots=False,
spline_k_D=3,
spline_k_V=3,
include_loweus=False,
use_scaling=False,
use_shift=False,
sort_knots=False,
params_true=None,
synth_li_lines=[0, 3, 5, 2, 12, 8, 6],
synth_be_lines=[0,],
hirex_time_res=1e-3,
vuv_time_res=1e-3,
xtomo_time_res=2e-6,
local_time_res=1e-4,
num_local_space=20,
inner_local_space=0.0,
outer_local_space=1.0,
local_cs=[None,],
presampling_time=15e-3,
synth_noises=[0.03, 0.03, 0.1],
local_synth_noise=5e-3,
use_line_integral=True,
use_local=True,
normalize=True,
time_spec=DEFAULT_TIME_SPEC,
fixed_params=None,
initial_params=None,
D_lb=0.0,
D_ub=30.0,
V_lb=-100.0,
V_ub=50.0,
V_lb_outer=-200.0,
V_ub_outer=0.0,
num_eig_ne=5,
num_eig_Te=3,
free_ne=False,
free_Te=False,
signal_mask=[True, True, True],
noise_type='proportional Gaussian',
shift_prior=None,
explicit_D=None,
explicit_D_grid=None,
explicit_V=None,
explicit_V_grid=None,
use_PMMCMC=False,
num_pts_PMMCMC=10,
method_PMMCMC='QMC'
):
global MASTER_DIR
if shift_prior is None:
self.shift_prior = gptools.NormalJointPrior([0.0,] * 3, [2e-3,] * 3)
else:
self.shift_prior = shift_prior
self._ll_normalization_local = None
self._ll_normalization = None
self._ar_ll_normalization = None
self.use_PMMCMC = bool(use_PMMCMC)
self.signal_mask = scipy.asarray(signal_mask, dtype=bool)
self.shot = int(shot)
self.version = int(version)
self.time_1 = float(time_1)
self.time_2 = float(time_2)
self.time_spec = time_spec.format(time_1=time_1, time_2=time_2)
self.injections = injections
self.tht = int(tht)
self.line = int(line)
self.debug_plots = bool(debug_plots)
self.include_loweus = bool(include_loweus)
self.normalize = bool(normalize)
self.params_true = scipy.asarray(params_true, dtype=float) if params_true is not None else None
self.explicit_D = scipy.asarray(explicit_D, dtype=float) if explicit_D is not None else None
self.explicit_D_grid = scipy.asarray(explicit_D_grid, dtype=float) if explicit_D_grid is not None else None
self.explicit_V = scipy.asarray(explicit_V, dtype=float) if explicit_V is not None else None
self.explicit_V_grid = scipy.asarray(explicit_V_grid, dtype=float) if explicit_V_grid is not None else None
self.sort_knots = bool(sort_knots)
self.noise_type = noise_type
self.D_lb = float(D_lb)
self.D_ub = float(D_ub)
self.V_lb = float(V_lb)
self.V_ub = float(V_ub)
self.V_lb_outer = float(V_lb_outer)
self.V_ub_outer = float(V_ub_outer)
self.num_eig_ne = num_eig_ne
self.num_eig_Te = num_eig_Te
if method == 'spline':
self.spline_k_D = int(spline_k_D)
self.spline_k_V = int(spline_k_V)
elif method == 'linterp':
self.spline_k_D = 1
self.spline_k_V = 1
else:
raise ValueError("Unknown method: %s" % (method,))
self.method = str(method)
if num_eig_D < 1:
raise ValueError("Must have at least one free coefficient for D!")
if num_eig_V < 1:
raise ValueError("Must have at least one free coefficient for V!")
self.num_eig_D = int(num_eig_D)
self.num_eig_V = int(num_eig_V)
self.source_file = str(source_file)
self.roa_grid = scipy.asarray(roa_grid, dtype=float)
self.roa_grid_DV = scipy.asarray(roa_grid_DV, dtype=float)
# Convert the psinorm grids to r/a:
self.efit_tree = eqtools.CModEFITTree(self.shot)
self.psinorm_grid = self.efit_tree.roa2psinorm(
self.roa_grid,
(self.time_1 + self.time_2) / 2.0
)
# In case there is a NaN:
self.psinorm_grid[0] = 0.0
self.psinorm_grid_DV = self.efit_tree.roa2psinorm(
self.roa_grid_DV,
(self.time_1 + self.time_2) / 2.0
)
# In case there is a NaN:
self.psinorm_grid_DV[0] = 0.0
# If a STRAHL directory doesn't exist yet, create one and set it up:
current_dir = os.getcwd()
strahl_dir = os.path.join(current_dir, self.working_dir)
if not (os.path.isdir(strahl_dir) and os.path.exists(strahl_dir)):
self.setup_files()
else:
print("STRAHL directory %s is already in place." % (strahl_dir,))
os.chdir(strahl_dir)
MASTER_DIR = os.getcwd()
# Create the strahl.control file:
self.write_control()
# Load run data into Python, save the processed data for later use:
print("Loading background profile data...")
try:
with open('run_data.pkl', 'rb') as f:
self.run_data = pkl.load(f)
print("Loaded profile data from run_data.pkl.")
except IOError:
self.run_data = RunData(self.shot, self.time_1, self.time_2, self.roa_grid, Te_args, ne_args)
with open('run_data.pkl', 'wb') as f:
pkl.dump(self.run_data, f, protocol=pkl.HIGHEST_PROTOCOL)
self._PEC = None
self.load_PEC()
self._Ar_PEC = None
self.load_Ar_PEC()
self.atomdat = read_atomdat('Ca.atomdat')
self.Ar_atomdat = read_atomdat('Ar.atomdat')
self.atdata = lines.read_atdata()
self.sindat = lines.read_sindat()
with open('Be_50_um_PEC.pkl', 'rb') as f:
self.filter_trans = pkl.load(f)
if not os.path.isfile('nete/Caflx%d.dat' % (self.shot,)):
print("Fetching source data...")
shutil.copyfile(
self.source_file,
'nete/Caflx%d.dat' % (self.shot,)
)
print("Loading signals...")
self.signals = []
self.ar_signal = None
self.local_signals = []
try:
if use_line_integral:
with open('signals.pkl', 'rb') as f:
self.signals = pkl.load(f)
with open('ar_signal.pkl', 'rb') as f:
self.ar_signal = pkl.load(f)
if use_local:
with open('local_signals.pkl', 'rb') as f:
self.local_signals = pkl.load(f)
if os.path.isfile('truth_data.pkl'):
with open('truth_data.pkl', 'rb') as f:
self.truth_data = pkl.load(f)
print("Loaded signals from signals.pkl.")
except IOError:
if self.params_true is None:
if use_line_integral:
if self.signal_mask[0]:
hirex_data = HirexData(self.injections, debug_plots=self.debug_plots)
else:
hirex_data = None
self.signals.append(hirex_data.signal)
if self.signal_mask[1]:
vuv_data = VUVData(self.shot, self.injections, debug_plots=self.debug_plots)
else:
vuv_data = None
self.signals.append(vuv_data.signal)
if self.signal_mask[2]:
xtomo_data = XTOMOData(self.shot, self.injections)
else:
xtomo_data = None
self.signals.append(xtomo_data.signal)
ar_data = HirexData(self.injections, ar=True, debug_plots=self.debug_plots)
self.ar_signal = ar_data.signal
# Write the atomdat file:
self.write_atomdat(vuv_data)
# Read it back in to ensure consistency:
self.atomdat = read_atomdat('Ca.atomdat')
# Need to put dummy values in for this to work:
# The prior won't work until the signals are created, so
# this must be done here.
self.params = self.get_prior().random_draw()
self.fixed_params = scipy.zeros_like(self.params, dtype=bool)
self.compute_view_data()
else:
# Generate the synthetic data:
# Write the atomdat file:
self.write_atomdat(None, li_like=synth_li_lines, be_like=synth_be_lines)
# Read it back in to ensure consistency:
self.atomdat = read_atomdat('Ca.atomdat')
if use_local:
# Local:
npts = int(scipy.ceil((self.time_2 - self.time_1 + presampling_time) / local_time_res))
t = scipy.linspace(-presampling_time, -presampling_time + local_time_res * (npts - 1), npts)
roa = scipy.linspace(inner_local_space, outer_local_space, num_local_space)
sqrtpsinorm_local = self.efit_tree.roa2psinorm(roa, (self.time_1 + self.time_2) / 2.0, sqrt=True)
for idx in local_cs:
self.local_signals.append(
LocalSignal(
scipy.zeros((len(t), len(sqrtpsinorm_local))),
scipy.zeros((len(t), len(sqrtpsinorm_local))),
scipy.zeros((len(t), len(sqrtpsinorm_local))),
scipy.zeros((len(t), len(sqrtpsinorm_local))),
t,
sqrtpsinorm_local,
idx
)
)
if use_line_integral:
# Load the HiReX-SR POS vectors:
data = scipy.io.readsav('run_data.sav')
ar_pos = scipy.asarray(data.ar_data.pos[0], dtype=float)
hirex_pos = scipy.asarray(data.hirex_data.pos[0], dtype=float)
# HiReX-SR:
if self.signal_mask[0]:
npts = int(scipy.ceil((self.time_2 - self.time_1 + presampling_time) / hirex_time_res))
t = scipy.linspace(-presampling_time, -presampling_time + hirex_time_res * (npts - 1), npts)
self.signals.append(
Signal(
scipy.zeros((len(t), hirex_pos.shape[0])),
scipy.zeros((len(t), hirex_pos.shape[0])),
scipy.zeros((len(t), hirex_pos.shape[0])),
scipy.zeros((len(t), hirex_pos.shape[0])),
t,
'HiReX-SR',
0,
pos=hirex_pos
)
)
else:
self.signals.append(None)
# HiReX-SR argon:
self.ar_signal = Signal(
scipy.zeros((len(t), ar_pos.shape[0])),
scipy.zeros((len(t), ar_pos.shape[0])),
scipy.zeros((len(t), ar_pos.shape[0])),
scipy.zeros((len(t), ar_pos.shape[0])),
t,
'HiReX-SR (Ar)',
0,
pos=ar_pos
)
# VUV:
if self.signal_mask[1]:
npts = int(scipy.ceil((self.time_2 - self.time_1 + presampling_time) / vuv_time_res))
t = scipy.linspace(-presampling_time, -presampling_time + vuv_time_res * (npts - 1), npts)
self.signals.append(
Signal(
scipy.zeros((len(t), len(synth_li_lines) + len(synth_be_lines))),
scipy.zeros((len(t), len(synth_li_lines) + len(synth_be_lines))),
scipy.zeros((len(t), len(synth_li_lines) + len(synth_be_lines))),
scipy.zeros((len(t), len(synth_li_lines) + len(synth_be_lines))),
t,
['XEUS',] * len(synth_li_lines) + ['LoWEUS',] * len(synth_be_lines),
range(1, len(synth_li_lines) + len(synth_be_lines) + 1),
pos=scipy.vstack([XEUS_POS,] * len(synth_li_lines) + [LOWEUS_POS,] * len(synth_be_lines)),
blocks=range(0, len(synth_li_lines) + len(synth_be_lines))
)
)
else:
self.signals.append(None)
# XTOMO:
if self.signal_mask[2]:
npts = int(scipy.ceil((self.time_2 - self.time_1 + presampling_time) / xtomo_time_res))
t = scipy.linspace(-presampling_time, -presampling_time + xtomo_time_res * (npts - 1), npts)
self.signals.append(
Signal(
scipy.zeros((len(t), 38 * 2)),
scipy.zeros((len(t), 38 * 2)),
scipy.zeros((len(t), 38 * 2)),
scipy.zeros((len(t), 38 * 2)),
t,
['XTOMO 1',] * 38 + ['XTOMO 3',] * 38,
-1,
blocks=[1,] * 38 + [3,] * 38
)
)
self.signals[-1].weight_idxs = scipy.hstack((range(0, 38), range(0, 38)))
else:
self.signals.append(None)
# Compute the view data:
self.params = self.params_true.copy()
self.fixed_params = scipy.zeros_like(self.params, dtype=bool)
self.compute_view_data()
# Get the base cs_den truth data:
# The prior won't work until the signals are created, so
# this must be done here.
self.params = self.params_true.copy()
self.fixed_params = scipy.zeros_like(self.params, dtype=bool)
cs_den, sqrtpsinorm, time, ne, Te = self.DV2cs_den(
debug_plots=debug_plots,
explicit_D=self.explicit_D,
explicit_D_grid=self.explicit_D_grid,
explicit_V=self.explicit_V,
explicit_V_grid=self.explicit_V_grid
)
# Temporarily override normalization so we can get the
# absolute and relative signals:
# First, absolute:
self.normalize = False
# First for (time-dependent) CaF2:
self.params = self.params_true.copy()
dlines = self.cs_den2dlines(cs_den, sqrtpsinorm, time, ne, Te, debug_plots=debug_plots)
sig_abs = self.dlines2sig(dlines, time, debug_plots=debug_plots)
for s, ss in zip(sig_abs, self.signals):
if ss is not None:
ss.y = s
# Now for Ar:
cs_den_ar, sqrtpsinorm, time_ar, ne, Te = self.DV2cs_den(
params=self.params_true,
debug_plots=debug_plots,
steady_ar=1e15,
explicit_D=self.explicit_D,
explicit_D_grid=self.explicit_D_grid,
explicit_V=self.explicit_V,
explicit_V_grid=self.explicit_V_grid
)
dlines_ar = self.cs_den2dlines(cs_den_ar, sqrtpsinorm, time_ar, ne, Te, debug_plots=debug_plots, steady_ar=1e15)
sig_abs_ar = self.dlines2sig(dlines_ar, time_ar, debug_plots=debug_plots, steady_ar=1e15)
self.ar_signal.y[:, :] = sig_abs_ar
# Now, normalized:
self.normalize = True
# First for (time-dependent) CaF2:
sig_norm = self.dlines2sig(dlines, time, debug_plots=debug_plots)
for s, ss in zip(sig_norm, self.signals):
if ss is not None:
ss.y_norm = s
# Now for Ar:
sig_norm_ar = self.dlines2sig(dlines_ar, time_ar, debug_plots=debug_plots, steady_ar=1e15)
self.ar_signal.y_norm[:, :] = sig_norm_ar
# Now set it back:
self.normalize = normalize
# Write a file with the truth values:
self.truth_data = TruthData(
params_true,
cs_den,
time,
sqrtpsinorm,
dlines=dlines,
sig_abs=sig_abs,
sig_norm=sig_norm,
cs_den_ar=cs_den_ar,
dlines_ar=dlines_ar,
sig_abs_ar=sig_abs_ar,
sig_norm_ar=sig_norm_ar,
time_ar=time_ar,
explicit_D=self.explicit_D,
explicit_D_grid=self.explicit_D_grid,
explicit_V=self.explicit_V,
explicit_V_grid=self.explicit_V_grid
)
else:
# Get the base cs_den truth data:
# The prior won't work until the signals are created, so
# this must be done here.
self.params = self.params_true.copy()
self.fixed_params = scipy.zeros_like(self.params, dtype=bool)
cs_den, sqrtpsinorm, time, ne, Te = self.DV2cs_den(
debug_plots=debug_plots,
explicit_D=self.explicit_D,
explicit_D_grid=self.explicit_D_grid,
explicit_V=self.explicit_V,
explicit_V_grid=self.explicit_V_grid
)
self.ar_signal=None
self.truth_data = TruthData(
params_true,
cs_den,
time,
sqrtpsinorm,
explicit_D=self.explicit_D,
explicit_D_grid=self.explicit_D_grid,
explicit_V=self.explicit_V,
explicit_V_grid=self.explicit_V_grid
)
with open('truth_data.pkl', 'wb') as f:
pkl.dump(self.truth_data, f, protocol=pkl.HIGHEST_PROTOCOL)
# Apply noise:
self.apply_noise(noises=synth_noises, local_noise=local_synth_noise, noise_type=self.noise_type)
if use_line_integral:
with open('signals.pkl', 'wb') as f:
pkl.dump(self.signals, f, protocol=pkl.HIGHEST_PROTOCOL)
with open('ar_signal.pkl', 'wb') as f:
pkl.dump(self.ar_signal, f, protocol=pkl.HIGHEST_PROTOCOL)
if use_local:
with open('local_signals.pkl', 'wb') as f:
pkl.dump(self.local_signals, f, protocol=pkl.HIGHEST_PROTOCOL)
# Sort the time axes:
for s in self.signals:
if s is not None:
s.sort_t()
if self.ar_signal is not None:
self.ar_signal.sort_t()
# Handle the initial params:
if initial_params is not None:
self.params = scipy.asarray(initial_params, dtype=float)
elif params_true is not None:
self.params = self.params_true.copy()
else:
# Get the structure with a random draw (this sets D, V):
self.params = self.get_prior().random_draw()
# TODO: This is pretty hackish, can probably be done more cleanly...
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
# Number of diagnostics (determines number of time shifts):
nDiag = len(self.signals)
knots_D = scipy.linspace(0, 1.05, nkD + 2)[1:-1]
knots_V = scipy.linspace(0, 1.05, nkV + 2)[1:-1]
# Set each class of param to a benign default:
# Knots for D:
self.params[nD + nV:nD + nV + nkD] = knots_D
# Knots for V:
self.params[nD + nV + nkD:nD + nV + nkD + nkV] = knots_V
# Scaling factors:
self.params[nD + nV + nkD + nkV:nD + nV + nkD + nkV + nS] = 1.0
# Time shifts:
self.params[nD + nV + nkD + nkV + nS:nD + nV + nkD + nkV + nS + nDiag] = 0.0
# ne eigenvalues:
self.params[
nD + nV + nkD + nkV + nS + nDiag:
nD + nV + nkD + nkV + nS + nDiag + self.num_eig_ne
] = 0.0
# Te eigenvalues:
self.params[
nD + nV + nkD + nkV + nS + nDiag + self.num_eig_ne:
nD + nV + nkD + nkV + nS + nDiag + self.num_eig_ne + self.num_eig_Te
] = 0.0
# Handle the fixed params:
if fixed_params is not None:
self.fixed_params = scipy.asarray(fixed_params, dtype=bool)
else:
self.fixed_params = scipy.zeros_like(self.params, dtype=bool)
# Set these here:
self.free_knots = free_knots
self.use_scaling = use_scaling
self.use_shift = use_shift
self.free_ne = free_ne
self.free_Te = free_Te
# Set up the grids for PMMCMC:
# Here I will use quasi Monte Carlo importance sampling, but the
# implementation is done in a way that makes it trivial to switch to
# sparse grid quadrature at a later date.
if self.use_PMMCMC:
self.method_PMMCMC = str(method_PMMCMC)
num_pts_PMMCMC = int(num_pts_PMMCMC)
if self.method_PMMCMC == 'QMC':
self.dt_quad_arr = scipy.zeros((num_pts_PMMCMC, self.signal_mask.sum()))
for i in range(0, num_pts_PMMCMC):
# start from 1 to not include the -inf point:
q, dum = sobol.i4_sobol(self.signal_mask.sum(), i + 1)
# Pad this out to the point that I can use the source_prior:
u = 0.5 * scipy.ones(len(self.signals))
u[self.signal_mask] = q
p = self.shift_prior.sample_u(u)
self.dt_quad_arr[i, :] = p[self.signal_mask]
# Mask out the inf/nan values:
mask = (scipy.isinf(self.dt_quad_arr).any(axis=1)) | (scipy.isnan(self.dt_quad_arr).any(axis=1))
self.dt_quad_arr = self.dt_quad_arr[~mask, :]
elif self.method_PMMCMC == 'GHQ':
if not isinstance(self.shift_prior, gptools.NormalJointPrior):
raise ValueError("PMMCMC method GHQ only works for normal priors on the time shifts!")
mu = self.shift_prior.mu[self.signal_mask]
sigma = self.shift_prior.sigma[self.signal_mask]
pts, wts = numpy.polynomial.hermite.hermgauss(num_pts_PMMCMC)
self.dt_quad_arr = scipy.sqrt(2.0) * sigma * pts[:, None] + mu
self.ln_dt_quad_wts = scipy.log(1.0 / (scipy.sqrt(2.0 * scipy.pi) * sigma) * wts[:, None])
else:
raise ValueError("Unknown method for PMMCMC marginalization!")
@property
def num_params(self):
return len(self.fixed_params)
@property
def param_bounds(self):
return self.get_prior().bounds
@param_bounds.setter
def param_bounds(self, value):
self.get_prior().bounds = value
@property
def num_free_params(self):
"""Returns the number of free parameters.
"""
return sum(~self.fixed_params)
@property
def free_param_idxs(self):
"""Returns the indices of the free parameters in the main arrays of parameters, etc.
"""
return scipy.arange(0, self.num_params)[~self.fixed_params]
@property
def free_params(self):
"""Returns the values of the free parameters.
Returns
-------
free_params : :py:class:`Array`
Array of the free parameters, in order.
"""
return gptools.MaskedBounds(self.params, self.free_param_idxs)
@free_params.setter
def free_params(self, value):
self.params[self.free_param_idxs] = scipy.asarray(value, dtype=float)
@property
def free_param_bounds(self):
"""Returns the bounds of the free parameters.
Returns
-------
free_param_bounds : :py:class:`Array`
Array of the bounds of the free parameters, in order.
"""
return scipy.asarray(self.get_prior().bounds[:], dtype=float)[self.free_param_idxs, :]
@free_param_bounds.setter
def free_param_bounds(self, value):
# Need to use a loop since self.get_prior().bounds is NOT guaranteed to support fancy indexing.
p = self.get_prior()
for i, v in zip(self.free_param_idxs, value):
self.p.bounds[i] = v
@property
def free_param_names(self):
"""Returns the names of the free parameters.
Returns
-------
free_param_names : :py:class:`Array`
Array of the names of the free parameters, in order.
"""
return gptools.MaskedBounds(self.param_names, self.free_param_idxs)
# The following won't work because of how I build the array of labels with
# get_labels().
# @free_param_names.setter
# def free_param_names(self, value):
# # Cast to array in case it hasn't been done already:
# self.param_names = scipy.asarray(self.param_names, dtype=str)
# self.param_names[~self.fixed_params] = value
@property
def param_names(self):
return scipy.asarray(self.get_labels(), dtype=str)
@property
def free_knots(self):
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
return (~self.fixed_params[nD + nV:nD + nV + nkD + nkV]).any()
@free_knots.setter
def free_knots(self, val):
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
self.fixed_params[nD + nV:nD + nV + nkD + nkV] = not val
@property
def use_scaling(self):
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
# # Number of diagnostics (determines number of time shifts):
# nDiag = len(self.signals)
return (~self.fixed_params[nD + nV + nkD + nkV:nD + nV + nkD + nkV + nS]).any()
# Split up:
# eig_D = params[:nD]
# eig_V = params[nD:nD + nV]
# knots_D = params[nD + nV:nD + nV + nkD]
# knots_V = params[nD + nV + nkD:nD + nV + nkD + nkV]
param_scaling = params[nD + nV + nkD + nkV:nD + nV + nkD + nkV + nS]
# param_source = params[nD + nV + nkD + nkV + nS:nD + nV + nkD + nkV + nS + nDiag]
@use_scaling.setter
def use_scaling(self, val):
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
self.fixed_params[nD + nV + nkD + nkV:nD + nV + nkD + nkV + nS] = not val
@property
def use_shift(self):
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
# Number of diagnostics (determines number of time shifts):
nDiag = len(self.signals)
return (~self.fixed_params[nD + nV + nkD + nkV + nS:nD + nV + nkD + nkV + nS + nDiag]).any()
@use_shift.setter
def use_shift(self, val):
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
# Number of diagnostics (determines number of time shifts):
nDiag = len(self.signals)
self.fixed_params[nD + nV + nkD + nkV + nS:nD + nV + nkD + nkV + nS + nDiag] = not val
@property
def free_ne(self):
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
# Number of diagnostics (determines number of time shifts):
nDiag = len(self.signals)
return (
~self.fixed_params[
nD + nV + nkD + nkV + nS + nDiag:
nD + nV + nkD + nkV + nS + nDiag + self.num_eig_ne
]
).any()
@free_ne.setter
def free_ne(self, val):
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
# Number of diagnostics (determines number of time shifts):
nDiag = len(self.signals)
self.fixed_params[
nD + nV + nkD + nkV + nS + nDiag:
nD + nV + nkD + nkV + nS + nDiag + self.num_eig_ne
] = not val
@property
def free_Te(self):
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
# Number of diagnostics (determines number of time shifts):
nDiag = len(self.signals)
return (
~self.fixed_params[
nD + nV + nkD + nkV + nS + nDiag + self.num_eig_ne:
nD + nV + nkD + nkV + nS + nDiag + self.num_eig_ne + self.num_eig_Te
]
).any()
@free_Te.setter
def free_Te(self, val):
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
# Number of diagnostics (determines number of time shifts):
nDiag = len(self.signals)
self.fixed_params[
nD + nV + nkD + nkV + nS + nDiag + self.num_eig_ne:
nD + nV + nkD + nkV + nS + nDiag + self.num_eig_ne + self.num_eig_Te
] = not val
@property
def knotgrid_D(self):
"""Grid of knots to use when evaluating the D profile.
Takes the (internal) knots given in :py:attr:`self.params` and puts the
boundary knots given by the extreme values of :py:attr:`self.roa_grid_DV`
at either end.
"""
eig_D, eig_V, knots_D, knots_V, param_scaling, param_source, eig_ne, eig_Te = self.split_params()
return scipy.concatenate(([self.roa_grid_DV[0],], knots_D, [self.roa_grid_DV[-1],]))
@property
def knotgrid_V(self):
"""Grid of knots to use when evaluating the V profile.
Takes the (internal) knots given in :py:attr:`self.params` and puts the
boundary knots given by the extreme values of :py:attr:`self.roa_grid_DV`
at either end.
"""
eig_D, eig_V, knots_D, knots_V, param_scaling, param_source, eig_ne, eig_Te = self.split_params()
return scipy.concatenate(([self.roa_grid_DV[0],], knots_V, [self.roa_grid_DV[-1],]))
def apply_noise(self, noises=[0.03, 0.03, 0.1], local_noise=5e-3, noise_type='proportional Gaussian'):
"""Apply random noise to the data.
Parameters
----------
noises : array of float, optional
The relative noise level to apply to each signal in
:py:attr:`self.signals`. The first element is also used for the
argon data. Default is [0.03, 0.03, 0.1].
local_noise : float, optional
The noise level to use for the local signals. Default is 5e-3.
noise_type: {'proportional Gaussian', 'Poisson'}
The noise type to use. Options are:
* 'proportional Gaussian': Gaussian noise for which the standard
deviation is equal to the relative noise level times the value.
* 'Poisson' : Gaussian noise for which the standard deviation is
equal to the relative noise level times the value divided by the
square root of the ratio of the value to the max value. This
simulates Poisson noise.
"""
for i, (n, s) in enumerate(zip(noises, self.signals)):
if s is not None:
if noise_type == 'proportional Gaussian':
s.y = self.truth_data.sig_abs[i] * (1.0 + n * scipy.randn(*self.truth_data.sig_abs[i].shape))
s.y[s.y < 0.0] = 0.0
s.std_y = n * self.truth_data.sig_abs[i]
s.std_y[s.std_y < 1e-4 * s.y.max()] = 1e-4 * s.y.max()
s.y_norm = self.truth_data.sig_norm[i] * (1.0 + n * scipy.randn(*self.truth_data.sig_norm[i].shape))
s.y_norm[s.y < 0.0] = 0.0
s.std_y_norm = n * self.truth_data.sig_norm[i]
s.std_y_norm[s.std_y_norm < 1e-4 * s.y_norm.max()] = 1e-4 * s.y_norm.max()
elif noise_type == 'Poisson':
sig_max = self.truth_data.sig_abs[i].max()
s.std_y = n * scipy.sqrt(sig_max * self.truth_data.sig_abs[i])
s.std_y[s.std_y < 1e-4 * sig_max] = 1e-4 * sig_max
s.y = self.truth_data.sig_abs[i] + s.std_y * scipy.randn(*self.truth_data.sig_abs[i].shape)
s.y[s.y < 0.0] = 0.0
sig_max_norm = self.truth_data.sig_norm[i].max()
s.std_y_norm = n * scipy.sqrt(sig_max_norm * self.truth_data.sig_norm[i])
s.std_y_norm[s.std_y_norm < 1e-4 * sig_max_norm] = 1e-4 * sig_max_norm
s.y_norm = self.truth_data.sig_norm[i] + s.std_y_norm * scipy.randn(*self.truth_data.sig_norm[i].shape)
s.y_norm[s.y_norm < 0.0] = 0.0
else:
raise ValueError("Unknown noise type!")
n = local_noise
for sls in self.local_signals:
if sls.cs_den_idx is None:
spl = scipy.interpolate.RectBivariateSpline(
self.truth_data.time - self.time_1,
self.truth_data.sqrtpsinorm,
self.truth_data.cs_den.sum(axis=1)
)
cs_den_norm = self.truth_data.cs_den.sum(axis=1)
else:
spl = scipy.interpolate.RectBivariateSpline(
self.truth_data.time - self.time_1,
self.truth_data.sqrtpsinorm,
self.truth_data.cs_den[:, sls.cs_den_idx, :]
)
cs_den_norm = self.truth_data.cs_den[:, sls.cs_den_idx, :]
if noise_type == 'proportional Gaussian':
sls.y = spl(sls.t, sls.sqrtpsinorm)
sls.std_y = n * sls.y
sls.y *= (1.0 + n * scipy.randn(*sls.y.shape))
sls.y[sls.y < 0.0] = 0.0
sls.std_y[sls.std_y < 1e-4 * sls.y.max()] = 1e-4 * sls.y.max()
# Just normalize to the innermost chord:
cs_den_norm = cs_den_norm / cs_den_norm[:, 0].max()
spl = scipy.interpolate.RectBivariateSpline(self.truth_data.time - self.time_1, self.truth_data.sqrtpsinorm, cs_den_norm)
sls.y_norm = spl(sls.t, sls.sqrtpsinorm)
sls.std_y_norm = n * sls.y_norm
sls.y_norm *= (1.0 + n * scipy.randn(*sls.y_norm.shape))
sls.y_norm[sls.y_norm < 0.0] = 0.0
sls.std_y_norm[sls.std_y_norm < 1e-4 * sls.y_norm.max()] = 1e-4 * sls.y_norm.max()
else:
raise NotImplementedError("Not done!")
# Needs special handling since sig_*_ar just has a single timepoint:
if self.ar_signal is not None:
if noise_type == 'proportional Gaussian':
self.ar_signal.y[:, :] = self.truth_data.sig_abs_ar * (1.0 + noises[0] * scipy.randn(*self.ar_signal.y.shape))
self.ar_signal.y[self.ar_signal.y < 0.0] = 0.0
self.ar_signal.std_y[:, :] = noises[0] * self.truth_data.sig_abs_ar
self.ar_signal.std_y[self.ar_signal.std_y < 1e-4 * self.ar_signal.y.max()] = 1e-4 * self.ar_signal.y.max()
self.ar_signal.y_norm[:, :] = self.truth_data.sig_norm_ar * (1.0 + noises[0] * scipy.randn(*self.ar_signal.y.shape))
self.ar_signal.y_norm[self.ar_signal.y_norm < 0.0] = 0.0
self.ar_signal.std_y_norm[:, :] = noises[0] * self.truth_data.sig_norm_ar
self.ar_signal.std_y_norm[self.ar_signal.std_y_norm < 1e-4 * self.ar_signal.y_norm.max()] = 1e-4 * self.ar_signal.y_norm.max()
elif noise_type == 'Poisson':
sig_max_ar = self.truth_data.sig_abs_ar.max()
self.ar_signal.std_y[:, :] = noises[0] * scipy.sqrt(sig_max_ar * self.truth_data.sig_abs_ar)
self.ar_signal.std_y[self.ar_signal.std_y < 1e-4 * sig_max_ar] = 1e-4 * sig_max_ar
self.ar_signal.y[:, :] = self.truth_data.sig_abs_ar + self.ar_signal.std_y * scipy.randn(*self.truth_data.sig_abs_ar.shape)
self.ar_signal.y[self.ar_signal.y < 0.0] = 0.0
sig_max_ar_norm = self.truth_data.sig_norm_ar.max()
self.ar_signal.std_y_norm[:, :] = noises[0] * scipy.sqrt(sig_max_ar_norm * self.truth_data.sig_norm_ar)
self.ar_signal.std_y_norm[self.ar_signal.std_y_norm < 1e-4 * sig_max_ar_norm] = 1e-4 * sig_max_ar_norm
self.ar_signal.y_norm[:, :] = self.truth_data.sig_norm_ar + self.ar_signal.std_y_norm * scipy.randn(*self.truth_data.sig_norm_ar.shape)
self.ar_signal.y_norm[self.ar_signal.y_norm < 0.0] = 0.0
else:
raise ValueError("Unknown noise type!")
def eval_DV(self, params=None, plot=False, lc=None, label=None):
"""Evaluate the D, V profiles for the given parameters.
Parameters
----------
params : array of float
The parameters to evaluate at.
plot : bool, optional
If True, a plot of D and V will be produced. Default is False.
"""
# Hack to make it work with older matplotlib on psfcstor1:
if matplotlib.__version__ == '1.3.1' and lc is None:
lc = 'b'
if params is not None:
params = scipy.asarray(params, dtype=float)
if len(params) == self.num_params:
self.params = params
else:
self.free_params = params
eig_D, eig_V, knots_D, knots_V, param_scaling, param_source, eig_ne, eig_Te = self.split_params()
knotgrid_D = self.knotgrid_D
knotgrid_V = self.knotgrid_V
if self.method == 'spline':
D = spev(
knotgrid_D,
scipy.insert(eig_D, 0, eig_D[0]),
self.spline_k_D,
self.roa_grid_DV
)
# Hackishly attempt to prevent numerical issues with STRAHL:
D[0] = D[1]
V = spev(
knotgrid_V,
scipy.insert(eig_V, 0, 0.0),
self.spline_k_V,
self.roa_grid_DV
)
elif self.method == 'linterp':
D = scipy.interpolate.InterpolatedUnivariateSpline(
knotgrid_D,
scipy.insert(eig_D, 0, eig_D[0]),
k=1
)(self.roa_grid_DV)
V = scipy.interpolate.InterpolatedUnivariateSpline(
knotgrid_V,
scipy.insert(eig_V, 0, 0.0),
k=1
)(self.roa_grid_DV)
else:
raise ValueError("Unknown method '%s'!" % (self.method,))
if plot:
f = plt.figure()
a_D = f.add_subplot(3, 1, 1)
a_V = f.add_subplot(3, 1, 2, sharex=a_D)
a_VD = f.add_subplot(3, 1, 3, sharex=a_D)
a_D.set_xlabel('$r/a$')
a_V.set_xlabel('$r/a$')
a_VD.set_xlabel('$r/a$')
a_D.set_ylabel('$D$ [m$^2$/s]')
a_V.set_ylabel('$V$ [m/s]')
a_VD.set_ylabel('$V/D$ [1/m]')
a_D.plot(self.roa_grid_DV, D, color=lc, label=label)
a_V.plot(self.roa_grid_DV, V, color=lc, label=label)
a_VD.plot(self.roa_grid_DV, V / D, color=lc, label=label)
return (D, V)
def propagate_u(self, u, cov, nsamp=1000, debug_plots=False):
r"""Propagate the uncertainties `cov` through :py:meth:`eval_DV`.
Parameters
----------
u : array of float, (`num_params`,)
The parameters to evaluate at, mapped to :math:`[0, 1]` using the
CDF.
cov : array of float, (`num_params`, `num_params`)
The covariance matrix (i.e., the inverse Hessian returned by the
optimizer in typical applications) to use.
nsamp : int, optional
The number of Monte Carlo samples to take. Default is 1000.
"""
u_samples = numpy.random.multivariate_normal(u, cov, size=nsamp)
u_samples[u_samples > 1.0] = 1.0
u_samples[u_samples < 0.0] = 0.0
p = self.get_prior()
D = scipy.zeros((nsamp, len(self.roa_grid_DV)))
V = scipy.zeros((nsamp, len(self.roa_grid_DV)))
for i, uv in enumerate(u_samples):
D[i, :], V[i, :] = self.eval_DV(p.sample_u(uv))
mu_D = scipy.mean(D, axis=0)
mu_V = scipy.mean(V, axis=0)
std_D = scipy.std(D, axis=0, ddof=1)
std_V = scipy.std(V, axis=0, ddof=1)
if debug_plots:
f = plt.figure()
a_D = f.add_subplot(2, 1, 1)
a_V = f.add_subplot(2, 1, 2)
D_test, V_test = self.eval_DV(p.sample_u(u))
a_D.plot(self.roa_grid_DV, D_test, 'r')
a_V.plot(self.roa_grid_DV, V_test, 'r')
gptools.univariate_envelope_plot(
self.roa_grid_DV,
mu_D,
std_D,
ax=a_D,
color='b'
)
gptools.univariate_envelope_plot(
self.roa_grid_DV,
mu_V,
std_V,
ax=a_V,
color='b'
)
a_D.set_ylabel("$D$ [m$^2$/s]")
a_V.set_ylabel("$V$ [m/s]")
a_V.set_xlabel("$r/a$")
return mu_D, std_D, mu_V, std_V
def split_params(self, params=None):
"""Split the given param vector into its constituent parts.
Any parameters which are infinite are set to `1e-100 * sys.float_info.max`.
The parts are, in order:
1. Coefficients for the D profile.
2. Coefficients for the V profile.
3. Knot locations for the D profile.
4. Knot locations for the V profile.
5. Scaling parameters for the signals.
6. Time shift parameters for the diagnostics.
7. Eigenvalues for the ne profile.
8. Eigenvalues for the Te profile.
Parameters
----------
params : array of float, (`num_params`,), optional
The parameter vector to split. If not provided,
:py:attr:`self.params` is used. This vector should contain all of
the parameters, not just the free parameters.
Returns
-------
split_params : tuple
Tuple of arrays of params, split as described above.
"""
if params is None:
params = self.params
else:
params = scipy.asarray(params, dtype=float)
# Try to avoid some stupid issues:
params[params == scipy.inf] = 1e-100 * sys.float_info.max
params[params == -scipy.inf] = -1e-100 * sys.float_info.max
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
# Number of diagnostics (determines number of time shifts):
nDiag = len(self.signals)
# Split up:
eig_D = params[:nD]
eig_V = params[nD:nD + nV]
knots_D = params[nD + nV:nD + nV + nkD]
knots_V = params[nD + nV + nkD:nD + nV + nkD + nkV]
param_scaling = params[nD + nV + nkD + nkV:nD + nV + nkD + nkV + nS]
param_source = params[nD + nV + nkD + nkV + nS:nD + nV + nkD + nkV + nS + nDiag]
eig_ne = params[
nD + nV + nkD + nkV + nS + nDiag:
nD + nV + nkD + nkV + nS + nDiag + self.num_eig_ne
]
eig_Te = params[
nD + nV + nkD + nkV + nS + nDiag + self.num_eig_ne:
nD + nV + nkD + nkV + nS + nDiag + self.num_eig_ne + self.num_eig_Te
]
if self.sort_knots:
knots_D = scipy.sort(knots_D)
knots_V = scipy.sort(knots_V)
# Fudge the source times and scaling factors since this seems to go
# crazy:
param_scaling[param_scaling > 1e3] = 1e3
param_scaling[param_scaling < 1e-3] = 1e-3
param_source[param_source > 1e3] = 1e3
param_source[param_source < -1e3] = -1e3
return eig_D, eig_V, knots_D, knots_V, param_scaling, param_source, eig_ne, eig_Te
def get_prior(self):
"""Return the prior distribution.
This is a :py:class:`gptools.JointPrior` instance -- when called as a
function, it returns the log-probability.
"""
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
# Number of diagnostics (determines number of time shifts):
nDiag = len(self.signals)
# Coefficients:
prior = gptools.UniformJointPrior(
[(self.D_lb, self.D_ub)] * nD +
[(self.V_lb, self.V_ub)] * (nV - 1) +
[(self.V_lb_outer, self.V_ub_outer)]
)
# Knots:
if nkD + nkV > 0:
if self.sort_knots:
prior = prior * gptools.UniformJointPrior(
[(self.roa_grid_DV.min(), self.roa_grid_DV.max())] * (nkD + nkV)
)
else:
prior = prior * (
gptools.SortedUniformJointPrior(
nkD,
self.roa_grid_DV.min(),
self.roa_grid_DV.max()
) *
gptools.SortedUniformJointPrior(
nkV,
self.roa_grid_DV.min(),
self.roa_grid_DV.max()
)
)
if nS > 0:
# Scaling:
prior = prior * gptools.GammaJointPriorAlt([1.0,] * nS, [0.1,] * nS)
# Shifts:
prior = prior * self.shift_prior
# ne, Te:
if self.num_eig_ne + self.num_eig_Te > 0:
prior = prior * gptools.NormalJointPrior(
[0.0,] * (self.num_eig_ne + self.num_eig_Te),
[1.0,] * (self.num_eig_ne + self.num_eig_Te)
)
return prior
def DV2cs_den(
self,
params=None,
explicit_D=None,
explicit_D_grid=None,
explicit_V=None,
explicit_V_grid=None,
steady_ar=None,
debug_plots=False,
no_write=False,
no_strahl=False,
compute_view_data=False
):
"""Calls STRAHL with the given parameters and returns the charge state densities.
If evaluation of the profiles from the given parameters fails, returns a
single NaN. This failure can either take the form of :py:meth:`eval_DV`
raising a :py:class:`ValueError`, or there being an Inf or NaN in the
resulting profiles.
Returns a single int of the return code if the call to STRAHL fails.
Returns NaN if STRAHL fails due to max iterations.
If everything works, returns a tuple of the following:
* `cs_den`: Charge state densities. Array of float with shape
(`n_time`, `n_cs`, `n_space`)
* `sqrtpsinorm`: Square root of psinorm grid used for the results. Array
of float with shape (`n_space`,)
* `time`: Time grid used for the results. Array of float with shape
(`n_time`,)
* `ne`: Electron density profile used by STRAHL. Array of float with
shape (`n_time`, `n_space`).
* `Te`: Electron temperature profile used by STRAHL. Array of float with
shape (`n_time`, `n_space`).
Parameters
----------
params : array of float, optional
The parameters to use when evaluating the model. The order is:
* eig_D: The eigenvalues to use when evaluating the D profile.
* eig_V: The eigenvalues to use when evaluating the V profile.
* knots_D: The knots of the D profile.
* knots_V: The knots of the V profile.
* scaling: The scaling factors for each diagnostic.
* param_source: The parameters to use for the model source function.
If absent, :py:attr:`self.params` is used.
explicit_D : array of float, optional
Explicit values of D to use. Overrides the profile which would have
been obtained from the parameters in `params` (but the scalings/etc.
from `params` are still used).
explicit_D_grid : array of float, optional
Grid of sqrtpsinorm which `explicit_D` is given on.
explicit_V : array of float, optional
Explicit values of V to use. Overrides the profile which would have
been obtained from the parameters in `params` (but the scalings/etc.
from `params` are still used).
explicit_V_grid : array of float, optional
Grid of sqrtpsinorm which `explicit_V` is given on.
steady_ar : float, optional
If present, will compute the steady-state (constant-source) Ar
profiles for the given source instead of the time-evolving Ca
profiles. Default is None.
debug_plots : bool, optional
If True, plots of the various steps will be generated. Default is
False (do not produce plots).
no_write : bool, optional
If True, the STRAHL control files are not written. Default is False.
no_strahl : bool, optional
If True, STRAHL is not actually called (and the existing results
file is evaluated). Used for debugging. Default is False.
compute_view_data : bool, optional
Set this to True to only compute the view_data.sav file. (Returns
the sqrtpsinorm grid STRAHL uses.)
"""
global NUM_STRAHL_CALLS
if params is not None:
params = scipy.asarray(params, dtype=float)
if len(params) == self.num_params:
self.params = params
else:
self.free_params = params
else:
params = self.params
eig_D, eig_V, knots_D, knots_V, param_scaling, param_source, eig_ne, eig_Te = self.split_params()
if (explicit_D is None) or (explicit_V is None):
try:
D, V = self.eval_DV(plot=debug_plots)
except ValueError:
print("Failure evaluating profiles!")
print(params)
return scipy.nan
# Get the correct grids, handle explicit D and V:
if explicit_D is not None:
D = explicit_D
D_grid = explicit_D_grid
else:
D_grid = scipy.sqrt(self.psinorm_grid_DV)
if explicit_V is not None:
V = explicit_V
V_grid = explicit_V_grid
else:
V_grid = scipy.sqrt(self.psinorm_grid_DV)
# Check for bad values in D, V profiles:
if scipy.isinf(D).any() or scipy.isnan(D).any():
print("inf in D!")
print(params)
return scipy.nan
if scipy.isinf(V).any() or scipy.isnan(V).any():
print("inf in V!")
print(params)
return scipy.nan
# Evaluate ne, Te:
# TODO: Can probably speed things up by caching this when using fixed
# values!
ne_in = self.run_data.ne_p.gp.draw_sample(
self.run_data.ne_X,
rand_vars=scipy.atleast_2d(eig_ne).T,
method='eig',
num_eig=self.num_eig_ne,
mean=self.run_data.ne_res['mean_val'],
cov=self.run_data.ne_res['cov']
)[:, 0]
Te_in = self.run_data.Te_p.gp.draw_sample(
self.run_data.Te_X,
rand_vars=scipy.atleast_2d(eig_Te).T,
method='eig',
num_eig=self.num_eig_Te,
mean=self.run_data.Te_res['mean_val'],
cov=self.run_data.Te_res['cov']
)[:, 0]
# HACK to get rid of negative values in ne, Te:
ne_in[ne_in < 0.0] = 0.0
Te_in[Te_in < 0.0] = 0.0
# Now write the param and pp files, if required:
if not no_write:
# Need to override the start/end times of steady_ar is not None:
if steady_ar is None:
time_2_override = None
else:
time_2_override = self.time_1 + 0.2
# TODO: Can probably speed things up by not re-writing the control
# and pp files if they aren't changing!
self.write_control(time_2_override=time_2_override)
self.write_pp(
scipy.sqrt(self.psinorm_grid),
ne_in,
Te_in,
self.time_2 if steady_ar is None else time_2_override
)
self.write_param(
D_grid,
V_grid,
D,
V,
# compute_NC=compute_NC,
const_source=steady_ar,
element='Ca' if steady_ar is None else 'Ar',
time_2_override=time_2_override
)
# Now call STRAHL:
try:
if no_strahl:
out = 'STRAHL not run!'
else:
command = ['./strahl', 'a', 'n']
# The "n" disables STRAHL's calculation of radiation.
NUM_STRAHL_CALLS += 1
start = time_.time()
out = subprocess.check_output(command, stderr=subprocess.STDOUT)
elapsed = time_.time() - start
if debug_plots:
print(elapsed)
except subprocess.CalledProcessError as e:
print("STRAHL exited with error code %d." % (e.returncode))
return e.returncode
# Process the results:
f = scipy.io.netcdf.netcdf_file('result/strahl_result.dat', 'r')
sqrtpsinorm = scipy.asarray(f.variables['rho_poloidal_grid'][:], dtype=float)
if compute_view_data:
return sqrtpsinorm
time = scipy.asarray(f.variables['time'][:], dtype=float)
# Check to make sure it ran through:
if time[-1] <= self.time_2 - 0.1 * (self.time_2 - self.time_1):
print(time[-1])
print(len(time))
print("STRAHL failed (max iterations)!")
print(params)
return scipy.nan
# cs_den has shape (n_time, n_cs, n_space)
cs_den = scipy.asarray(f.variables['impurity_density'][:], dtype=float)
# These are needed for subsequent calculations:
ne = scipy.asarray(f.variables['electron_density'][:], dtype=float)
Te = scipy.asarray(f.variables['electron_temperature'][:], dtype=float)
if debug_plots:
roa = self.efit_tree.psinorm2roa(sqrtpsinorm**2.0, (self.time_1 + self.time_2) / 2.0)
# Plot the ne, Te profiles:
f = plt.figure()
a_ne = f.add_subplot(2, 1, 1)
gptools.univariate_envelope_plot(
self.run_data.ne_X,
self.run_data.ne_res['mean_val'],
self.run_data.ne_res['std_val'],
ax=a_ne
)
a_ne.plot(self.run_data.ne_X, ne_in)
a_ne.plot(roa, ne[0, :] * 1e6 / 1e20)
a_ne.set_xlabel("$r/a$")
a_ne.set_ylabel(r'$n_{\mathrm{e}}$')
a_Te = f.add_subplot(2, 1, 2, sharex=a_ne)
gptools.univariate_envelope_plot(
self.run_data.Te_X,
self.run_data.Te_res['mean_val'],
self.run_data.Te_res['std_val'],
ax=a_Te
)
a_Te.plot(self.run_data.Te_X, Te_in)
a_Te.plot(roa, Te[0, :] / 1e3)
a_Te.set_xlabel('$r/a$')
a_Te.set_ylabel(r"$T_{\mathrm{e}}$")
# Plot the charge state densities:
slider_plot(
sqrtpsinorm,
time,
scipy.rollaxis(cs_den.T, 1),
xlabel=r'$\sqrt{\psi_n}$',
ylabel=r'$t$ [s]',
zlabel=r'$n$ [cm$^{-3}$]',
labels=[str(i) for i in range(0, cs_den.shape[1])],
plot_sum=True
)
# Plot the total impurity content:
volnorm_grid = self.efit_tree.psinorm2volnorm(
sqrtpsinorm**2.0,
(self.time_1 + self.time_2) / 2.0
)
V = self.efit_tree.psinorm2v(1.0, (self.time_1 + self.time_2) / 2.0)
mask = ~scipy.isnan(volnorm_grid)
volnorm_grid = volnorm_grid[mask]
nn = cs_den.sum(axis=1)[:, mask]
# Use the trapezoid rule:
N = V * 0.5 * ((volnorm_grid[1:] - volnorm_grid[:-1]) * (nn[:, 1:] + nn[:, :-1])).sum(axis=1)
f = plt.figure()
a = f.add_subplot(1, 1, 1)
a.plot(time, N, '.-')
a.set_xlabel('$t$ [s]')
a.set_ylabel('$N$')
a.set_title("Total impurity content")
f = plt.figure()
a = f.add_subplot(1, 1, 1)
# Hack for older matplotlib:
if matplotlib.__version__ == '1.3.1':
cmap = 'gray_r'
else:
cmap = 'plasma'
pcm = a.pcolormesh(sqrtpsinorm, time, cs_den.sum(axis=1), cmap=cmap, vmax=cs_den.sum(axis=1)[:, 0].max())
pcm.cmap.set_over('white')
f.colorbar(pcm, extend='max')
a.set_xlabel(r"$\sqrt{\psi_{\mathrm{n}}}$")
a.set_ylabel(r"$t$ [s]")
a.set_title("Total impurity density")
return cs_den, sqrtpsinorm, time, ne, Te
def cs_den2dlines(self, cs_den, sqrtpsinorm, time, ne, Te, steady_ar=None, debug_plots=False):
"""Predicts the local emissivities that would arise from the given charge state densities.
Parameters
----------
cs_den : array of float, (`n_time`, `n_cs`, `n_space`)
The charge state densities as computed by STRAHL.
sqrtpsinorm : array of float, (`n_space`,)
The square root of psinorm grid which `cs_den` is given on.
time : array of float, (`n_time`,)
The time grid which `cs_den` is given on.
ne : array of float, (`n_time`, `n_space`)
The electron density profile used by STRAHL.
Te : array of float, (`n_time`, `n_space`)
The electron temperature profile used by STRAHL.
steady_ar : float, optional
If None, compute for calcium. If a float, compute for argon.
debug_plots : bool, optional
If True, plots of the various steps will be generated. Default is
False (do not produce plots).
"""
atomdat = self.atomdat if steady_ar is None else self.Ar_atomdat
# Put the SXR signal as the final entry in dlines.
n_lines = len(atomdat[0]) + 1 if steady_ar is None else 1
dlines = scipy.zeros((len(time), n_lines, len(sqrtpsinorm)))
if steady_ar is None:
for i, chg, cw, hw in zip(
range(0, len(atomdat[0])),
atomdat[0],
atomdat[1],
atomdat[2]
):
dlines[:, i, :] = compute_emiss(
self.PEC[int(chg)],
cw,
hw,
ne,
cs_den[:, int(chg), :],
Te
)
# Compute the emissivity seen through the core XTOMO filters:
dlines[:, -1, :] = lines.compute_SXR(
cs_den,
ne,
Te,
self.atdata,
self.sindat,
self.filter_trans,
self.PEC
)
else:
# We need to add up the contributions to the z-line. These are
# stored in the PEC dict in the charge of the state the line is
# populated from.
# Excitation:
dlines[:, 0, :] = compute_emiss(
self.Ar_PEC[16],
4.0,
0.1,
ne,
cs_den[:, 16, :],
Te,
no_ne=True
)
# Ionization:
dlines[:, 0, :] += compute_emiss(
self.Ar_PEC[15],
4.0,
0.1,
ne,
cs_den[:, 15, :],
Te,
no_ne=True
)
# Recombination:
dlines[:, 0, :] += compute_emiss(
self.Ar_PEC[17],
4.0,
0.1,
ne,
cs_den[:, 17, :],
Te,
no_ne=True
)
if debug_plots:
# Plot the emissivity profiles:
slider_plot(
sqrtpsinorm,
time,
scipy.rollaxis(dlines.T, 1),
xlabel=r'$\sqrt{\psi_n}$',
ylabel=r'$t$ [s]',
zlabel=r'$\epsilon$ [W/cm$^3$]',
labels=[str(i) for i in range(0, dlines.shape[1])]
)
return dlines
def dlines2sig(self, dlines, time, params=None, steady_ar=None, debug_plots=False, sigsplines=None):
"""Computes the diagnostic signals corresponding to the given local emissivities.
Takes each signal in :py:attr:`self.signals`, applies the weights (if
present), interpolates onto the correct timebase and (if appropriate)
normalizes the interpolated signal.
Returns an array, `signals`, with one entry for each element in
:py:attr:`self.signals`. Each entry has shape (`n_time`, `n_chan`).
Parameters
----------
dlines : array of float, (`n_time`, `n_lines`, `n_space`)
The spatial profiles of local emissivities.
time : array of float, (`n_time`,)
The time grid which `dlines` is given on.
params : array of float
The parameters to use. If absent, :py:attr:`self.params` is used.
steady_ar : float, optional
If None, compute for calcium. If a float, compute for argon.
debug_plots : bool, optional
If True, plots of the various steps will be generated. Default is
False (do not produce plots).
"""
if params is not None:
params = scipy.asarray(params, dtype=float)
if len(params) == self.num_params:
self.params = params
else:
self.free_params = params
eig_D, eig_V, knots_D, knots_V, param_scaling, param_source, eig_ne, eig_Te = self.split_params()
if steady_ar is None:
if sigsplines is None:
sigsplines = self.dlines2sigsplines(dlines, time)
sig = []
# k is the index of the current block in param_scaling:
k = 0
for j, (s, sspl) in enumerate(zip(self.signals, sigsplines)):
if s is not None and self.signal_mask[j]:
out_arr = scipy.zeros_like(s.y)
# Use postinj to zero out before the injection:
postinj = s.t >= -param_source[j]
for i in range(0, s.y.shape[1]):
out_arr[postinj, i] = sspl[i](s.t[postinj] + param_source[j])
# Do the normalization and scaling for each block:
for b in scipy.unique(s.blocks):
mask = s.blocks == b
# Normalization:
if self.normalize:
out_arr[:, mask] = out_arr[:, mask] / out_arr[:, mask].max()
# Scaling:
out_arr[:, mask] *= param_scaling[k]
k += 1
sig.append(out_arr)
else:
# Just compute the endpoint:
sig = self.ar_signal.weights.dot(dlines[-1, 0, :])
# Normalize the HiReX-SR signal:
if self.normalize:
sig = sig / sig.max()
# Big plots:
if debug_plots:
if steady_ar is None:
for i, s in enumerate(self.signals):
if s is not None and self.signal_mask[i]:
f, a = s.plot_data(norm=self.normalize, fast=(i == 2))
srt = s.t.argsort()
for k, ax in enumerate(a):
ax.plot(s.t[srt], sig[i][srt, k], '.-')
if hasattr(self, 'truth_data') and self.truth_data is not None:
ax.plot(
s.t,
self.truth_data.sig_norm[i][:, k] if self.normalize else self.truth_data.sig_abs[i][:, k],
'.--'
)
else:
# This is just a function of chord number. The time-variation
# won't tell me much, but the profile will.
f = plt.figure()
a = f.add_subplot(1, 1, 1)
x = range(0, len(sig))
y = self.ar_signal.y_norm if self.normalize else self.ar_signal.y
err_y = self.ar_signal.std_y_norm if self.normalize else self.ar_signal.std_y
a.boxplot(y, positions=x)
a.plot(x, sig, 'go')
return sig
def sig2diffs(self, sig, steady_ar=None):
"""Computes the individual diagnostic differences corresponding to the given signals.
Parameters
----------
sig : list of arrays of float
The diagnostic signals. There should be one entry for each element
of :py:attr:`self.signals`. Each entry should be an array
of float with shape (`n_time`, `n_chords`).
time : array of float, (`n_time`,)
The time grid which `dlines` is given on.
steady_ar : float, optional
If None, compute for calcium. If a float, compute for argon.
"""
# Convert to differences:
# Weighting must be accomplished in diffs2ln_prob.
if steady_ar is None:
sig_diff = []
for i, (s, ss) in enumerate(zip(sig, self.signals)):
if ss is not None and self.signal_mask[i]:
sig_diff.append(s - (ss.y_norm if self.normalize else ss.y))
else:
sig_diff = sig - (self.ar_signal.y_norm if self.normalize else self.ar_signal.y)
return sig_diff
def diffs2ln_prob(
self,
sig_diff,
params=None,
steady_ar=None,
d_weights=None,
sign=1.0,
no_prior=False
):
r"""Computes the log-posterior corresponding to the given differences.
If there is a NaN in the differences, returns `-scipy.inf`.
Here, the weighted differences :math:`\chi^2` are given as
.. math::
\chi^2 = \sum_i \left ( w_{i}\frac{b_{STRAHL, i} - b_{data, i}}{\sigma_i} \right )^2
In effect, the weight factors :math:`w_i` (implemented as keywords
`s_weight`, `v_weight` and `xtomo_weight`) let you scale the uncertainty
for a given diagnostic up and down. A higher weight corresponds to a
smaller uncertainty and hence a bigger role in the inference, and a
lower weight corresponds to a larger uncertainty and hence a smaller
role in the inference.
The log-posterior itself is then computed as
.. math::
\ln p \propto -\chi^2 / 2 + \ln p(D, V)
Here, :math:`\ln p(D, V)` is the log-prior.
Parameters
----------
sig_diff : list of arrays of float
The diagnostic signal differences. There should be one entry for
each element of :py:attr:`self.signals`. Each entry should
be an array of float with shape (`n_time`, `n_chords`).
params : array of float
The parameters to use. If absent, :py:attr:`self.params` is used.
steady_ar : float, optional
If None, compute for calcium. If a float, compute for argon.
d_weights : list of float, (`n_sig`,), or list of arrays of float
The weights to use for each signal in :py:attr:`self.signals`.
Each entry in the outer list applies to the corresponding entry in
:py:attr:`self.signals`. Each entry can either be a single,
global value to use for the diagnostic, or a list of values for each
chord.
sign : float, optional
Sign (or other factor) applied to the final result. Set this to -1.0
to use this function with a minimizer, for instance. Default is 1.0
(return actual log-posterior).
"""
if params is not None:
params = scipy.asarray(params, dtype=float)
if len(params) == self.num_params:
self.params = params
else:
self.free_params = params
if d_weights is None:
d_weights = [1.0,] * len(sig_diff)
if steady_ar is None:
signals_masked = [s for s in self.signals if s is not None]
chi2 = 0.0
for w, s, ss in zip(d_weights, sig_diff, signals_masked):
dnorm2 = (w * s / (ss.std_y_norm if self.normalize else ss.std_y))**2.0
chi2 += dnorm2[~scipy.isnan(dnorm2)].sum()
else:
dnorm2 = (sig_diff / (self.ar_signal.std_y_norm if self.normalize else self.ar_signal.std_y))**2.0
chi2 = dnorm2[~scipy.isnan(dnorm2)].sum()
# This indicates that the brightness differences were all NaN:
if chi2 == 0.0:
return -sign * scipy.inf
else:
if no_prior:
lp = sign * (-0.5 * chi2)
else:
lp = sign * (-0.5 * chi2 + self.get_prior()(self.params))
# print(lp)
return lp
def dlines2sigsplines(self, dlines, time):
"""Convert the given diagnostic lines to splines which can be used to interpolate onto the diagnostic timebase.
"""
time = time - self.time_1
return [
[
scipy.interpolate.InterpolatedUnivariateSpline(
time,
dlines[:, s.atomdat_idx[i], :].dot(s.weights[i, :])
)
for i in range(0, s.y.shape[1])
]
if s is not None else None
for j, s in enumerate(self.signals)
]
def dlines2ln_prob_marg(self, dlines, time, params=None, debug_plots=False, no_prior=False, **kwargs):
"""Convert the given diagnostic lines to marginalized log-posterior.
Marginalizes over the time shifts using quasi Monte Carlo importance
sampling.
Parameters
----------
dlines : array of float, (`n_time`, `n_lines`, `n_space`)
The spatial profiles of local emissivities.
time : array of float, (`n_time`,)
The time grid which `dlines` is given on.
params : array of float
The parameters to use. If absent, :py:attr:`self.params` is used.
debug_plots : bool, optional
If True, plots of the various steps will be generated. Default is
False (do not produce plots).
**kwargs : optional keywords
All additional keywords are passed to each call to
:py:meth:`self.dlines2ln_prob`.
"""
if params is not None:
params = scipy.asarray(params, dtype=float)
if len(params) == self.num_params:
self.params = params
else:
self.free_params = params
sigsplines = self.dlines2sigsplines(dlines, time)
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
# Number of diagnostics (determines number of time shifts):
nDiag = len(self.signals)
# Mask for where the active time shifts are:
mask = scipy.arange(nD + nV + nkD + nkV + nS, nD + nV + nkD + nkV + nS + nDiag)[self.signal_mask]
if self.method_PMMCMC == 'QMC':
# The QMC points are stored as an (NPTS, NDIM) array:
ln_prob = scipy.zeros(self.dt_quad_arr.shape[0])
if debug_plots:
ln_fz = scipy.zeros(self.dt_quad_arr.shape[0])
for i in range(0, self.dt_quad_arr.shape[0]):
self.params[mask] = self.dt_quad_arr[i]
ln_prob[i] = self.dlines2ln_prob(
dlines,
time,
debug_plots=debug_plots > 1,
no_prior=True,
sigsplines=sigsplines,
**kwargs
)
if debug_plots:
ln_fz[i] = self.shift_prior(self.params[nD + nV + nkD + nkV + nS:nD + nV + nkD + nkV + nS + nDiag])
# Apply the log-sum-exp trick:
A = ln_prob.max()
lp = scipy.log(scipy.exp(ln_prob - A).sum()) + A - scipy.log(len(ln_prob))
if debug_plots:
if len(mask) == 1:
f = plt.figure()
a = f.add_subplot(1, 1, 1)
a.plot(self.dt_quad_arr[:, 0], ln_prob + ln_fz, 'o')
elif len(mask) == 2:
# Interpolate to regular grid:
dt0 = scipy.linspace(self.dt_quad_arr[:, 0].min(), self.dt_quad_arr[:, 0].max(), 100)
dt1 = scipy.linspace(self.dt_quad_arr[:, 1].min(), self.dt_quad_arr[:, 1].max(), 101)
ln_fz_interp = scipy.interpolate.griddata(self.dt_quad_arr, ln_prob + ln_fz, (dt0[None, :], dt1[:, None]))
f = plt.figure()
a = f.add_subplot(1, 1, 1)
a.contourf(dt0, dt1, ln_fz_interp, 25)
elif self.method_PMMCMC == 'GHQ':
diffs = []
for i in range(0, self.dt_quad_arr.shape[0]):
self.params[mask] = self.dt_quad_arr[i]
diffs.append(
self.dlines2diffs(
dlines,
time,
debug_plots=debug_plots > 1,
sigsplines=sigsplines
)
)
# Each entry in diffs will now be a list of arrays containing the
# differences for each signal. Now we need to form the ln_prob...
ll_wt = scipy.zeros_like(self.dt_quad_arr)
signals_masked = [s for s in self.signals if s is not None]
# TODO: This can probably be vectorized!
for i, d in enumerate(diffs):
for j, (sd, ss) in enumerate(zip(d, signals_masked)):
dnorm2 = (sd / (ss.std_y_norm if self.normalize else ss.std_y))**2.0
ll_wt[i, j] = -0.5 * dnorm2[~scipy.isnan(dnorm2)].sum() + self.ln_dt_quad_wts[i, j]
lp = 0.0
for ll in ll_wt.T:
A = ll.max()
lp += scipy.log(scipy.exp(ll - A).sum()) + A
if debug_plots:
if len(mask) == 1:
# Compute the prior on time scales:
dts = scipy.zeros(len(self.signals))
ln_fz = scipy.zeros(self.dt_quad_arr.shape[0])
for i, dt in enumerate(self.dt_quad_arr[:, 0]):
dts[self.signal_mask] = dt
ln_fz[i] = self.shift_prior(dts)
f = plt.figure()
a = f.add_subplot(1, 1, 1)
a.plot(self.dt_quad_arr[:, 0], ll_wt[:, 0] + ln_fz)
elif len(mask) == 2:
# Compute the prior on time scales:
DT0, DT1 = scipy.meshgrid(self.dt_quad_arr[:, 0], self.dt_quad_arr[:, 1])
dt0 = DT0.ravel()
dt1 = DT1.ravel()
ln_fz = scipy.zeros(len(dt1))
dts = scipy.zeros(len(self.signals))
for i, (dt0v, dt1v) in enumerate(zip(dt0, dt1)):
dts[self.signal_mask] = [dt0v, dt1v]
ln_fz[i] = self.shift_prior(dts)
ln_fz = scipy.reshape(ln_fz, DT0.shape)
f = plt.figure()
a = f.add_subplot(1, 1, 1)
a.contourf(
self.dt_quad_arr[:, 0],
self.dt_quad_arr[:, 1],
ll_wt[:, 0][:, None] + ll_wt[:, 0][None, :] + ln_fz,
25
)
else:
raise ValueError("Unknown method for PMMCMC!")
if not no_prior:
lp += (
self.get_prior()(self.params) -
self.shift_prior(self.params[nD + nV + nkD + nkV + nS:nD + nV + nkD + nkV + nS + nDiag])
)
return lp
# The following are all wrapper functions. I explicitly copied the arguments
# over for the function fingerprints, SO THESE MUST BE CAREFULLY UPDATED
# WHEN CHANGING ANY OF THE FUNCTIONS ABOVE!!!
def DV2dlines(
self,
params=None,
explicit_D=None,
explicit_D_grid=None,
explicit_V=None,
explicit_V_grid=None,
steady_ar=None,
debug_plots=False,
no_write=False,
no_strahl=False,
compute_view_data=False,
return_rho_t=False
):
"""Computes the local emissivities corresponding to the given parameters.
This is simply a wrapper around the chain of :py:meth:`DV2cs_den` ->
:py:meth:`cs_den2dlines`. See those functions for argument descriptions.
Parameters
----------
return_rho_t : bool, optional
If True, the return value is a tuple of (dlines, sqrtpsinorm, time).
Default is False (just return dlines).
"""
out = self.DV2cs_den(
params=params,
explicit_D=explicit_D,
explicit_D_grid=explicit_D_grid,
explicit_V=explicit_V,
explicit_V_grid=explicit_V_grid,
steady_ar=steady_ar,
debug_plots=debug_plots,
no_write=no_write,
no_strahl=no_strahl,
compute_view_data=compute_view_data
)
try:
cs_den, sqrtpsinorm, time, ne, Te = out
except (TypeError, ValueError):
raise RuntimeError(
"Something went wrong with STRAHL, return value of DV2cs_den is: '"
+ str(out) + "', params are: " + str(self.params)
)
out = self.cs_den2dlines(
cs_den,
sqrtpsinorm,
time,
ne,
Te,
steady_ar=steady_ar,
debug_plots=debug_plots
)
if return_rho_t:
return out, sqrtpsinorm, time
else:
return out
def cs_den2sig(
self,
cs_den,
sqrtpsinorm,
time,
ne,
Te,
params=None,
steady_ar=None,
debug_plots=False,
):
"""Computes the diagnostic signals corresponding to the given charge state densities.
This is simply a wrapper around the chain of :py:meth:`cs_den2dlines` ->
:py:meth:`dlines2sig`. See those functions for argument descriptions.
"""
dlines = self.cs_den2dlines(
cs_den,
sqrtpsinorm,
time,
ne,
Te,
steady_ar=steady_ar,
debug_plots=debug_plots
)
return self.dlines2sig(
dlines,
time,
params=params,
steady_ar=steady_ar,
debug_plots=debug_plots,
)
def dlines2diffs(
self,
dlines,
time,
params=None,
steady_ar=None,
debug_plots=False,
sigsplines=None
):
"""Computes the diagnostic differences corresponding to the given local emissivities.
This is simply a wrapper around the chain of :py:meth:`dlines2sig` ->
:py:meth:`sig2diffs`. See those functions for argument descriptions.
"""
sig = self.dlines2sig(
dlines,
time,
params=params,
steady_ar=steady_ar,
debug_plots=debug_plots,
sigsplines=sigsplines
)
return self.sig2diffs(sig, steady_ar=steady_ar)
def sig2ln_prob(
self,
sig,
time,
params=None,
steady_ar=None,
d_weights=[1.0, 1.0, 1.0],
sign=1.0
):
"""Computes the log-posterior corresponding to the given diagnostic signals.
This is simply a wrapper around the chain of :py:meth:`sig2diffs` ->
:py:meth:`diffs2ln_prob`. See those functions for argument descriptions.
"""
sig_diff = self.sig2diffs(sig, steady_ar=steady_ar)
return self.diffs2ln_prob(
sig_diff,
params=params,
steady_ar=steady_ar,
d_weights=d_weights,
sign=sign
)
def DV2sig(
self,
params=None,
explicit_D=None,
explicit_D_grid=None,
explicit_V=None,
explicit_V_grid=None,
steady_ar=None,
debug_plots=False,
no_write=False,
no_strahl=False,
compute_view_data=False,
):
"""Predicts the diagnostic signals that would arise from the given parameters.
This is simply a wrapper around the chain of :py:meth:`DV2cs_den` ->
:py:meth:`cs_den2dlines` -> :py:meth:`dlines2sig`. See those functions
for argument descriptions.
"""
out = self.DV2cs_den(
params=params,
explicit_D=explicit_D,
explicit_D_grid=explicit_D_grid,
explicit_V=explicit_V,
explicit_V_grid=explicit_V_grid,
steady_ar=steady_ar,
debug_plots=debug_plots,
no_write=no_write,
no_strahl=no_strahl,
compute_view_data=compute_view_data
)
try:
cs_den, sqrtpsinorm, time, ne, Te = out
except (TypeError, ValueError):
raise RuntimeError(
"Something went wrong with STRAHL, return value of DV2cs_den is: '"
+ str(out) + "', params are: " + str(params)
)
dlines = self.cs_den2dlines(
cs_den,
sqrtpsinorm,
time,
ne,
Te,
steady_ar=steady_ar,
debug_plots=debug_plots
)
return self.dlines2sig(
dlines,
time,
params=params,
steady_ar=steady_ar,
debug_plots=debug_plots,
)
def cs_den2diffs(
self,
cs_den,
sqrtpsinorm,
time,
ne,
Te,
params=None,
steady_ar=None,
debug_plots=False,
):
"""Computes the diagnostic differences corresponding to the given charge state densities.
This is simply a wrapper around the chain of :py:meth:`cs_den2dlines` ->
:py:meth:`dlines2sig` -> :py:meth:`sig2diffs`. See those functions for
argument descriptions.
"""
dlines = self.cs_den2dlines(
cs_den,
sqrtpsinorm,
time,
ne,
Te,
steady_ar=steady_ar,
debug_plots=debug_plots
)
sig = self.dlines2sig(
dlines,
time,
params=params,
steady_ar=steady_ar,
debug_plots=debug_plots,
)
return self.sig2diffs(sig, steady_ar=steady_ar)
def dlines2ln_prob(
self,
dlines,
time,
params=None,
steady_ar=None,
debug_plots=False,
d_weights=[1.0, 1.0, 1.0],
sign=1.0,
no_prior=False,
sigsplines=None
):
"""Computes the log-posterior corresponding to the given local emissivities.
This is simply a wrapper around the chain of :py:meth:`dlines2sig` ->
:py:meth:`sig2diffs` -> :py:meth:`diffs2ln_prob`. See those functions
for argument descriptions.
"""
sig = self.dlines2sig(
dlines,
time,
params=params,
steady_ar=steady_ar,
debug_plots=debug_plots,
sigsplines=sigsplines
)
sig_diff = self.sig2diffs(sig, steady_ar=steady_ar)
return self.diffs2ln_prob(
sig_diff,
params=params,
steady_ar=steady_ar,
d_weights=d_weights,
sign=sign,
no_prior=no_prior
)
def DV2diffs(
self,
params=None,
explicit_D=None,
explicit_D_grid=None,
explicit_V=None,
explicit_V_grid=None,
steady_ar=None,
debug_plots=False,
no_write=False,
no_strahl=False,
compute_view_data=False,
):
"""Computes the diagnostic differences corresponding to the given parameters.
The is simply a wrapper around the chain of :py:meth:`DV2cs_den` ->
:py:meth:`cs_den2dlines` -> :py:meth:`dlines2sig` -> :py:meth:`sig2diffs`.
See those functions for argument descriptions.
"""
out = self.DV2cs_den(
params=params,
explicit_D=explicit_D,
explicit_D_grid=explicit_D_grid,
explicit_V=explicit_V,
explicit_V_grid=explicit_V_grid,
steady_ar=steady_ar,
debug_plots=debug_plots,
no_write=no_write,
no_strahl=no_strahl,
compute_view_data=compute_view_data
)
try:
cs_den, sqrtpsinorm, time, ne, Te = out
except (TypeError, ValueError):
raise RuntimeError(
"Something went wrong with STRAHL, return value of DV2cs_den is: '"
+ str(out) + "', params are: " + str(params)
)
dlines = self.cs_den2dlines(
cs_den,
sqrtpsinorm,
time,
ne,
Te,
steady_ar=steady_ar,
debug_plots=debug_plots
)
sig = self.dlines2sig(
dlines,
time,
params=params,
steady_ar=steady_ar,
debug_plots=debug_plots,
)
return self.sig2diffs(sig, steady_ar=steady_ar)
def cs_den2ln_prob(
self,
cs_den,
sqrtpsinorm,
time,
ne,
Te,
params=None,
steady_ar=None,
debug_plots=False,
d_weights=[1.0, 1.0, 1.0],
sign=1.0
):
"""Computes the log-posterior corresponding to the given charge-state densities.
This is simply a wrapper around the chain of :py:meth:`cs_den2dlines` ->
:py:meth:`dlines2sig` -> :py:meth:`sig2diffs` -> :py:meth:`diffs2ln_prob`.
See those functions for argument descriptions.
"""
dlines = self.cs_den2dlines(
cs_den,
sqrtpsinorm,
time,
ne,
Te,
steady_ar=steady_ar,
debug_plots=debug_plots
)
sig = self.dlines2sig(
dlines,
time,
params=params,
steady_ar=steady_ar,
debug_plots=debug_plots,
)
sig_diff = self.sig2diffs(sig, steady_ar=steady_ar)
return self.diffs2ln_prob(
sig_diff,
params=params,
steady_ar=steady_ar,
d_weights=d_weights,
sign=sign
)
# Special handling for local measurements:
def cs_den2local_sigs(self, cs_den, sqrtpsinorm, time, debug_plots=False):
"""Computes the local charge state densities.
Interpolates the local charge state densities from STRAHL onto the same
grid as :py:attr:`self.local_signals`.
Parameters
----------
cs_den : array of float, (`n_time`, `n_cs`, `n_space`)
The charge state densities as computed by STRAHL.
sqrtpsinorm : array of float, (`n_space`,)
The square root of psinorm grid which `cs_den` is given on.
time : array of float, (`n_time`,)
The time grid which `cs_den` is given on.
Returns
-------
local_sig : list of arrays of float, (`n_time`, `n_space`)
The predicted local charge state densities, as a function of time
and space. There is one entry for each item in
:py:attr:`self.local_signals`.
"""
local_sig = []
for sls in self.local_signals:
if sls.cs_den_idx is None:
spl = scipy.interpolate.RectBivariateSpline(time - self.time_1, sqrtpsinorm, cs_den.sum(axis=1))
else:
spl = scipy.interpolate.RectBivariateSpline(time - self.time_1, sqrtpsinorm, cs_den[:, sls.cs_den_idx, :])
local_sig.append(spl(sls.t, sls.sqrtpsinorm))
if self.normalize:
# Just normalize to the innermost chord
local_sig[-1] /= local_sig[-1][:, 0].max()
if debug_plots:
for s, sls in zip(local_sig, self.local_signals):
f, a = sls.plot_data(norm=self.normalize)
for i, ax in enumerate(a):
ax.plot(sls.t, s[:, i])
return local_sig
def local_sig2local_diffs(self, local_sig):
"""Computes the differences in local charge state densities.
Interpolates the local charge state densities from STRAHL onto the same
grid as :py:attr:`self.local_signals` and finds the difference.
Parameters
----------
local_sig : list of array of float
The local signals predicted by STRAHL, interpolated to the correct
space/time grid.
Returns
-------
local_diffs : list of arrays of float, (`n_time`, `n_space`)
The differences between the predicted and observed local signals, as
a function of time and space.
"""
local_diffs = []
for s, ss in zip(local_sig, self.local_signals):
local_diffs.append(s - (ss.y_norm if self.normalize else ss.y))
return local_diffs
def local_diffs2ln_prob(self, local_diffs, sign=1.0, no_prior=False):
r"""Computes the log-posterior corresponding to the given local signal differences.
Here, the weighted differences :math:`\chi^2` are given as
.. math::
\chi^2 = \sum_i \left ( \frac{b_{STRAHL, i} - b_{data, i}}{\sigma_i} \right )^2
The log-posterior itself is then computed as
.. math::
\ln p \propto -\chi^2 / 2 + \ln p(D, V)
Here, :math:`\ln p(D, V)` is the log-prior.
Parameters
----------
local_diffs : list of arrays of float, (`n_time`, `n_space`)
The difference between the predicted and observed local signals, as
a function of time and space.
sign : float, optional
Sign (or other factor) applied to the final result. Set this to -1.0
to use this function with a minimizer, for instance. Default is 1.0
(return actual log-posterior).
no_prior : bool, optional
If True, the log-likelihood is returned instead of the log-posterior.
Default is False (return log-posterior).
Returns
-------
lp : float
The log-posterior or log-likelihood.
"""
chi2 = 0.0
for s, ss in zip(local_diffs, self.local_signals):
dnorm2 = (s / (ss.std_y_norm if self.normalize else ss.std_y))**2.0
chi2 += dnorm2[~scipy.isnan(dnorm2)].sum()
if chi2 == 0.0:
return -sign * scipy.inf
else:
lp = sign * (-0.5 * chi2 + self.ll_normalization_local)
if not no_prior:
lp += sign * self.get_prior()(self.params)
return lp
# End-to-end routines:
def DV2ln_prob(
self,
params=None,
sign=1.0,
explicit_D=None,
explicit_D_grid=None,
explicit_V=None,
explicit_V_grid=None,
steady_ar=None,
debug_plots=False,
no_write=False,
no_strahl=False,
compute_view_data=False,
d_weights=[1.0, 1.0, 1.0],
use_local=False,
no_prior=False
):
"""Computes the log-posterior corresponding to the given parameters.
This is simply a wrapper around the chain of :py:meth:`DV2cs_den` ->
:py:meth:`cs_den2dlines` -> :py:meth:`dlines2sig` ->
:py:meth:`sig2diffs` -> :py:meth:`diffs2ln_prob`. See those functions
for argument descriptions. This is designed to work as a log-posterior
function for various MCMC samplers, etc.
Parameters
----------
use_local : bool, optional
If True, local measurements are used instead of line-integrated
measurements. Default is False (use line-integrated measurements).
"""
try:
out = self.DV2cs_den(
params=params,
explicit_D=explicit_D,
explicit_D_grid=explicit_D_grid,
explicit_V=explicit_V,
explicit_V_grid=explicit_V_grid,
steady_ar=steady_ar,
debug_plots=debug_plots,
no_write=no_write,
no_strahl=no_strahl,
compute_view_data=compute_view_data
)
try:
cs_den, sqrtpsinorm, time, ne, Te = out
except (TypeError, ValueError):
raise RuntimeError(
"Something went wrong with STRAHL, return value of DV2cs_den is: '"
+ str(out) + "', params are: " + str(params)
)
if use_local:
local_sig = self.cs_den2local_sigs(
cs_den,
sqrtpsinorm,
time,
debug_plots=debug_plots
)
local_diffs = self.local_sig2local_diffs(local_sig)
return self.local_diffs2ln_prob(local_diffs, sign=sign, no_prior=no_prior)
else:
dlines = self.cs_den2dlines(
cs_den,
sqrtpsinorm,
time,
ne,
Te,
steady_ar=steady_ar,
debug_plots=debug_plots
)
sig = self.dlines2sig(
dlines,
time,
params=params,
steady_ar=steady_ar,
debug_plots=debug_plots,
)
sig_diff = self.sig2diffs(sig, steady_ar=steady_ar)
return self.diffs2ln_prob(
sig_diff,
params=params,
steady_ar=steady_ar,
d_weights=d_weights,
sign=sign,
no_prior=no_prior
)
except:
warnings.warn("Failed evaluation! Exception was:\n" + traceback.format_exc())
return -sign * scipy.inf
def u2ln_prob(
self,
u,
nl_grad=None,
sign=1.0,
return_grad=False,
grad_only=False,
pool=None,
eps=scipy.sqrt(sys.float_info.epsilon),
**kwargs
):
r"""Convert the log-posterior corresponding to a given set of CDF values.
Passes the values `u` (which lie in :math:`[0, 1]`) through the inverse
CDF before passing them to :py:meth:`DV2ln_prob`.
Also catches out-of-bounds and exceptions so as to be useful for
optimization.
Also can compute gradients using finite differences. Is intelligent
about using forward/backwards differences near bounds. It supports use
of a pool for parallelizing the gradient calculation, but this adds so
much overhead it will probably only be worth it for shockingly high-
dimensional problems. It also seems to promptly consume all system
memory, which is frustrating to say the least.
Parameters
----------
u : array of float, (`num_params`,)
The parameters, mapped through the CDF to lie in :math:`[0, 1]`.
nl_grad : None or array, (`num_params`,), optional
Container to put the gradient in when used with NLopt. If present
and `grad.size > 0`, `return_grad` is set to True and the gradient
is put into `grad` in-place.
sign : float, optional
The sign/scaling factor to apply to the result before returning. The
default is 1.0 (for maximization/sampling). Set to -1.0 for
minimization.
return_grad : bool, optional
If True, the gradient is computed using finite differences. Single-
order forward differences are preferred, but single-order backward
differences will be used if the parameters are too close to the
bounds. If the bounds are tighter than `eps`, the gradient is set to
zero. (This condition should never be reached if you have reasonable
bounds.) Default is False (do not compute gradient).
grad_only : bool, optional
If `grad_only` and `return_grad` are both True, then only the
gradient is returned. Default is False.
pool : pool, optional
If this is not None, the pool will be used to evaluate the terms in
the gradient. Note that this adds enough overhead that it probably
only helps for very high-dimensional problems. It also seems to run
the system out of memory and crash, so you should probably just
leave it set to None.
eps : float, optional
The step size to use when computing the derivative with finite
differences. The default is the square root of machine epsilon.
**kwargs : extra keywords, optional
All extra keywords are passed to :py:meth:`DV2ln_prob`.
"""
if nl_grad is not None and nl_grad.size > 0:
return_grad = True
nlopt_format = True
else:
nlopt_format = False
if return_grad:
start = time_.time()
print(u)
u = scipy.asarray(u, dtype=float)
if (u < 0.0).any():
print("Lower bound fail!")
u[u < 0.0] = 0.0
elif (u > 1.0).any():
print("Upper bound fail!")
u[u > 1.0] = 1.0
u_full = 0.5 * scipy.ones_like(self.params, dtype=float)
u_full[~self.fixed_params] = u
params = self.get_prior().sample_u(u_full)[~self.fixed_params]
try:
fu = self.DV2ln_prob(params=params, sign=sign, **kwargs)
except:
print(u)
fu = sign * -scipy.inf
if return_grad:
grad = scipy.zeros_like(u)
if pool is None:
for k in xrange(0, len(u)):
if u[k] + eps <= 1.0:
u_mod = u.copy()
u_mod[k] = u_mod[k] + eps
grad[k] = (self.u2ln_prob(u_mod, sign=sign, return_grad=False, **kwargs) - fu) / eps
elif u[k] - eps >= 0.0:
u_mod = u.copy()
u_mod[k] = u_mod[k] - eps
grad[k] = (fu - self.u2ln_prob(u_mod, sign=sign, return_grad=False, **kwargs)) / eps
else:
print("finite difference fail!")
else:
u_mod = []
signs = []
for k in xrange(0, len(u)):
if u[k] + eps <= 1.0:
u_mod.append(u.copy())
u_mod[-1][k] = u_mod[-1][k] + eps
signs.append(1)
elif u[k] - eps >= 0.0:
u_mod.append(u.copy())
u_mod[-1][k] = u_mod[-1][k] - eps
signs.append(-1)
else:
u_mod.append(u.copy())
signs.append(0)
print("finite difference fail!")
f_shifts = pool.map(_UGradEval(self, sign, kwargs), u_mod)
for k in xrange(0, len(u)):
if signs[k] == 1:
grad[k] = (f_shifts[k] - fu) / eps
elif signs[k] == -1:
grad[k] = (fu - f_shifts[k]) / eps
if grad_only:
out = grad
elif nlopt_format:
out = fu
nl_grad[:] = grad
else:
out = (fu, grad)
else:
out = fu
if return_grad:
print(time_.time() - start)
return out
def u2ln_prob_local(self, *args, **kwargs):
kwargs['use_local'] = True
return self.u2ln_prob(*args, **kwargs)
def DV2jac(self, params=None, sign=1.0, **kwargs):
return nd.Gradient(self.DV2ln_prob)(params) * sign
# return scipy.asarray([self.DV2d_ln_prob(i, params=params, sign=sign, **kwargs) for i in self.free_param_idxs], dtype=float)
def DV2d_ln_prob(self, grad_idx, stepsize=None, params=None, **kwargs):
"""Compute the derivative of the log-posterior with respect to one parameter.
By default, centered differences are used. If a parameter is too close
to one of the bounds for the given stepsize, forward or backward
differences will be used, as appropriate.
Parameters
----------
grad_idx : int
The index of the parameter to take the derivative with respect to.
This refers to the index in the full parameter set, not just the
free parameters.
stepsize : float, optional
The step size to use with finite differences. Default is square root
of machine epsilon.
params : array of float, (`num_param`,) or (`num_free_params`,), optional
The point to take the derivative at. Default is to use
:py:attr:`self.params`.
**kwargs : optional keywords
All additional keywords are passed to :py:meth:`DV2ln_prob`.
"""
if params is not None:
params = scipy.asarray(params, dtype=float)
if len(params) == self.num_params:
self.params = params
else:
self.free_params = params
# Always work with the full version:
params = self.params.copy()
bounds = scipy.asarray(self.get_prior().bounds[:], dtype=float)
if stepsize is None:
# Round to a exactly representable step:
if scipy.absolute(params[grad_idx]) >= 1e-4:
temp = params[grad_idx] * (1.0 + (sys.float_info.epsilon)**(1.0 / 3.0))
else:
temp = params[grad_idx] + (sys.float_info.epsilon)**(1.0 / 3.0) * 1e-4
stepsize = temp - params[grad_idx]
if params[grad_idx] - stepsize >= bounds[grad_idx, 0] and params[grad_idx] + stepsize <= bounds[grad_idx, 1]:
params_plus_1 = params.copy()
params_plus_1[grad_idx] += stepsize
f_plus_1 = self.DV2ln_prob(params=params_plus_1, **kwargs)
params_plus_2 = params.copy()
params_plus_2[grad_idx] += stepsize * 2.0
f_plus_2 = self.DV2ln_prob(params=params_plus_2, **kwargs)
params_plus_3 = params.copy()
params_plus_3[grad_idx] += stepsize * 3.0
f_plus_3 = self.DV2ln_prob(params=params_plus_3, **kwargs)
params_minus_1 = params.copy()
params_minus_1[grad_idx] -= stepsize
f_minus_1 = self.DV2ln_prob(params=params_minus_1, **kwargs)
params_minus_2 = params.copy()
params_minus_2[grad_idx] -= stepsize * 2.0
f_minus_2 = self.DV2ln_prob(params=params_minus_2, **kwargs)
params_minus_3 = params.copy()
params_minus_3[grad_idx] -= stepsize * 3.0
f_minus_3 = self.DV2ln_prob(params=params_minus_3, **kwargs)
# Use centered difference (h^2 accuracy):
# d2 = (f_plus_1 - f_minus_1) / (2.0 * stepsize)
# Use centered difference (h^4 accuracy):
# d4 = (f_minus_2 - 8 * f_minus_1 + 8 * f_plus_1 - f_plus_2) / (12.0 * stepsize)
# Use centered difference (h^6 accuracy):
d6 = (-f_minus_3 + 9 * f_minus_2 - 45 * f_minus_1 + 45 * f_plus_1 - 9 * f_plus_2 + f_plus_3) / (60.0 * stepsize)
return d6 #d2, d4, d6
elif params[grad_idx] + stepsize <= bounds[grad_idx, 1]:
# Use forward difference (h^1 accuracy):
f = self.DV2ln_prob(params=params.copy(), **kwargs)
params_plus = params.copy()
params_plus[grad_idx] += stepsize
f_plus = self.DV2ln_prob(params=params_plus, **kwargs)
return (f_plus - f) / stepsize
elif params[grad_idx] - stepsize >= bounds[grad_idx, 0]:
# Use backward difference (h^1 accuracy):
f = self.DV2ln_prob(params=params.copy(), **kwargs)
params_minus = params.copy()
params_minus[grad_idx] -= stepsize
f_minus = self.DV2ln_prob(params=params_minus, **kwargs)
return (f - f_minus) / stepsize
else:
raise ValueError("Stepsize is too large, could not use any finite difference equation!")
def DV2d2_ln_prob(self, grad_idx_1, grad_idx_2, stepsize=[None, None], params=None, **kwargs):
r"""Compute the second derivative of the log-posterior with respect to two parameters.
By default, centered differences are used. If a parameter is too close
to one of the bounds for the given stepsize, forward or backward
differences will be used, as appropriate.
The derivatives are related to the indices by
.. math::
\frac{d}{d \theta_1}\left ( \frac{d}{\theta_2}(\ln p) \right )
Parameters
----------
grad_idx_1 : int
The index of the parameter to take the "outer" derivative with
respect to.
grad_idx_2 : int
The index of the parameter to take the "inner" derivative with
respect to.
stepsize : list of float, optional
The step size to use with finite differences. The same stepsize is
used for each dimension. Default is square root of machine epsilon.
params : array of float, (`num_param`,) or (`num_free_params`,), optional
The point to take the derivative at. Default is to use
:py:attr:`self.params`.
**kwargs : optional keywords
All additional keywords are passed to :py:meth:`DV2d_ln_prob`.
"""
if params is not None:
params = scipy.asarray(params, dtype=float)
if len(params) == self.num_params:
self.params = params
else:
self.free_params = params
# Always work with the full version:
params = self.params.copy()
bounds = scipy.asarray(self.get_prior().bounds[:], dtype=float)
if stepsize[0] is None:
# Round to a exactly representable step:
if scipy.absolute(params[grad_idx_1]) >= 1e-4:
temp = params[grad_idx_1] * (1.0 + (sys.float_info.epsilon)**(1.0 / 3.0))
else:
temp = params[grad_idx_1] + (sys.float_info.epsilon)**(1.0 / 3.0) * 1e-4
stepsize[0] = temp - params[grad_idx_1]
if params[grad_idx_1] - stepsize[0] >= bounds[grad_idx_1, 0] and params[grad_idx_1] + stepsize[0] <= bounds[grad_idx_1, 1]:
# Use centered difference:
params_plus = params.copy()
params_plus[grad_idx_1] += stepsize[0]
f_plus = self.DV2d_ln_prob(grad_idx_2, stepsize=stepsize[1], params=params_plus, **kwargs)
params_minus = params.copy()
params_minus[grad_idx_1] -= stepsize[0]
f_minus = self.DV2d_ln_prob(grad_idx_2, stepsize=stepsize[1], params=params_minus, **kwargs)
return (f_plus - f_minus) / (2.0 * stepsize[0])
elif params[grad_idx_1] + stepsize[0] <= bounds[grad_idx_1, 1]:
# Use forward difference:
f = self.DV2d_ln_prob(grad_idx_2, stepsize=stepsize[1], params=params.copy(), **kwargs)
params_plus = params.copy()
params_plus[grad_idx_1] += stepsize[0]
f_plus = self.DV2d_ln_prob(grad_idx_2, stepsize=stepsize[1], params=params_plus, **kwargs)
return (f_plus - f) / stepsize[0]
elif params[grad_idx_1] - stepsize[0] >= bounds[grad_idx_1, 0]:
# Use backward difference:
f = self.DV2d_ln_prob(grad_idx_2, stepsize=stepsize[1], params=params.copy(), **kwargs)
params_minus = params.copy()
params_minus[grad_idx_1] -= stepsize[0]
f_minus = self.DV2d_ln_prob(grad_idx_2, stepsize=stepsize[1], params=params_minus, **kwargs)
return (f - f_minus) / stepsize[0]
else:
raise ValueError("Stepsize is too large, could not use any finite difference equation!")
def DV2hessian(self, **kwargs):
"""Compute the Hessian matrix for the free parameters.
All keywords (including `params`) are passed to :py:meth:`DV2d2_ln_prob`.
"""
H = scipy.zeros((self.num_free_params, self.num_free_params))
for i in range(0, self.num_free_params):
for j in range(i, self.num_free_params):
val = self.DV2d2_ln_prob(
self.free_param_idxs[i],
self.free_param_idxs[j],
**kwargs
)
H[i, j] = val
H[j, i] = val
return H
def DV2hessian_2(self, params, stepsize=None, numsteps=2, pool=None):
"""Compute the Hessian and gradient using a polynomial approximation.
Fits a second-order polynomial to a dense grid of points around the
desired location, then computes the Hessian matrix and gradient vector
from the coefficients of that polynomial. The coefficients are fit using
linear least squares.
Parameters
----------
params : array of float
The free parameters to find the Hessian and gradient at.
stepsize : float or array of float, optional
The stepsize(s) to use for each parameter. The default is to pick
something of order `epsilon**(1.0 / 3.0) * params`.
numsteps : int, optional
The number of steps away from the center to take. Default is 2.
pool : :py:class:`Pool` instance, optional
The pool to use when evaluating the log-posterior at each point.
Default is to do this in serial.
"""
params = scipy.asarray(params, dtype=float)
if stepsize is None:
params_t = params.copy()
params_t[scipy.absolute(params_t) <= 1e-4] = 1e-4
temp = params_t * (1.0 + (sys.float_info.epsilon)**(1.0 / 3.0))
stepsize = temp - params_t
stepsize = scipy.asarray(stepsize, dtype=float)
if stepsize.ndim == 0:
stepsize = stepsize * scipy.ones_like(params)
# Form the grids:
grid_base = stepsize[:, None] * scipy.asarray(range(-numsteps, numsteps + 1), dtype=float)[None, :]
grids = params[:, None] + grid_base
dense_grids = scipy.meshgrid(*grids)
X = scipy.vstack([dg.ravel() for dg in dense_grids]).T
# Form the polynomial features up to order 2:
poly = PolynomialFeatures(2)
X_poly = poly.fit_transform(X - params[None, :])
# Evaluate at each value of X:
ev = _ComputeLnProbWrapper(self, make_dir=pool is not None)
if pool is None:
z = map(ev, X)
else:
z = pool.map(ev, X)
z = scipy.asarray(z, dtype=float)
# Mask the bad/out-of-bounds points:
good_pts = (~scipy.isnan(z)) & (~scipy.isinf(z))
# Fit the polynomial:
coeffs, res, rank, s = scipy.linalg.lstsq(X_poly[good_pts, :], z[good_pts])
# Form the Hessian matrix:
H = scipy.zeros((len(params), len(params)))
for i in range(0, len(params)):
# Diagonal:
mask = scipy.zeros(len(params))
mask[i] = 2
H[i, i] = 2.0 * coeffs[(poly.powers_ == mask).all(axis=1)]
# Off-diagonal:
for j in range(i + 1, len(params)):
mask = scipy.zeros(len(params))
mask[i] = 1
mask[j] = 1
H[i, j] = coeffs[(poly.powers_ == mask).all(axis=1)]
H[j, i] = H[i, j]
# Form the gradient vector:
g = scipy.zeros(len(params))
for i in range(0, len(params)):
mask = scipy.zeros(len(params))
mask[i] = 1
g[i] = coeffs[(poly.powers_ == mask).all(axis=1)]
return H, g
# Routines for multinest:
def multinest_prior(self, cube, ndim, nparams):
"""Prior distribution function for :py:mod:`pymultinest`.
Maps the (free) parameters in `cube` from [0, 1] to real space.
Parameters
----------
cube : array of float, (`num_free_params`,)
The variables in the unit hypercube.
ndim : int
The number of dimensions (meaningful length of `cube`).
nparams : int
The number of parameters (length of `cube`).
"""
# Need to use self.params so that we can handle fixed params:
u = self.get_prior().elementwise_cdf(self.params)
u[~self.fixed_params] = cube[:ndim]
p = self.get_prior().sample_u(u)
p_masked = p[~self.fixed_params]
for k in range(0, ndim):
cube[k] = p_masked[k]
def multinest_ll_local(self, cube, ndim, nparams, lnew):
"""Log-likelihood function for py:mod:`pymultinest`. Uses the local data.
Parameters
----------
cube : array of float, (`num_free_params`,)
The free parameters.
ndim : int
The number of dimensions (meaningful length of `cube`).
nparams : int
The number of parameters (length of `cube`).
lnew : float
New log-likelihood. Probably just there for FORTRAN compatibility?
"""
ll = -scipy.inf
acquire_working_dir(lockmode='file')
try:
c = [cube[i] for i in range(0, ndim)]
cs_den, sqrtpsinorm, time, ne, Te = self.DV2cs_den(params=c)
local_sig = self.cs_den2local_sigs(cs_den, sqrtpsinorm, time)
local_diffs = self.local_sig2local_diffs(local_sig)
ll = self.local_diffs2ln_prob(local_diffs, no_prior=True)
except:
warnings.warn("Something went wrong with STRAHL!")
finally:
release_working_dir(lockmode='file')
return ll
def multinest_ll_lineintegral(self, cube, ndim, nparams, lnew):
"""Log-likelihood function for py:mod:`pymultinest`. Uses the local data.
Parameters
----------
cube : array of float, (`num_free_params`,)
The free parameters.
ndim : int
The number of dimensions (meaningful length of `cube`).
nparams : int
The number of parameters (length of `cube`).
lnew : float
New log-likelihood. Probably just there for FORTRAN compatibility?
"""
ll = -scipy.inf
acquire_working_dir(lockmode='file')
try:
c = [cube[i] for i in range(0, ndim)]
ll = self.DV2ln_prob(params=c, no_prior=True)
except:
warnings.warn("Something went wrong with STRAHL!")
finally:
release_working_dir(lockmode='file')
return ll
def run_multinest(self, local=True, basename=None, n_live_points=100, **kwargs):
"""Run the multinest sampler.
"""
if basename is None:
basename = os.path.abspath('../chains_%d_%d/c-' % (self.shot, self.version))
chains_dir = os.path.dirname(basename)
if chains_dir and not os.path.exists(chains_dir):
os.mkdir(chains_dir)
# progress = pymultinest.ProgressPrinter(
# n_params=(~self.fixed_params).sum(),
# outputfiles_basename=basename
# )
# progress.start()
pymultinest.run(
self.multinest_ll_local if local else self.multinest_ll_lineintegral,
self.multinest_prior,
(~self.fixed_params).sum(),
outputfiles_basename=basename,
n_live_points=n_live_points,
**kwargs
)
# progress.stop()
def process_multinest(self, basename=None, cutoff_weight=0.1, **kwargs):
if basename is None:
basename = os.path.abspath('../chains_%d_%d/c-' % (self.shot, self.version))
a = pymultinest.Analyzer(
n_params=~(self.fixed_params).sum(),
outputfiles_basename=basename
)
data = a.get_data()
gptools.plot_sampler(
data[:, 2:],
weights=data[:, 0],
labels=self.free_param_names,
chain_alpha=1.0,
cutoff_weight=cutoff_weight,
**kwargs
)
return a
def parallel_compute_cs_den(self, D_grid, V_grid, pool):
eval_obj = _ComputeCSDenEval(self)
DD, VV = scipy.meshgrid(D_grid, V_grid)
orig_shape = DD.shape
DD = DD.ravel()
VV = VV.ravel()
res = pool.map(eval_obj, zip(DD, VV))
return res
def make_cs_den_result_contour_plot(self, q, D_grid, V_grid, title='', callback=None, logscale=False, levels=50, vmin=None, vmax=None, vd_contours=True):
DD, VV = scipy.meshgrid(D_grid, V_grid)
VD = VV / DD
orig_shape = DD.shape
f = plt.figure()
a = f.add_subplot(1, 1, 1)
if logscale:
kwargs = {'norm': LogNorm(vmin=vmin, vmax=vmax)}
else:
kwargs = {'vmin': vmin, 'vmax': vmax}
pcm = a.pcolormesh(
D_grid[1:],
V_grid,
q[:, 1:],
cmap='gray_r',
**kwargs
)
f.colorbar(pcm)
if logscale:
qc = scipy.log10(q[:, 1:])
vminc = qc.min() if vmin is None else scipy.log10(vmin)
vmaxc = qc.max() if vmax is None else scipy.log10(vmax)
else:
qc = q[:, 1:]
vminc = qc.min() if vmin is None else vmin
vmaxc = qc.max() if vmax is None else vmax
try:
iter(levels)
except TypeError:
levels = scipy.linspace(vminc, vmaxc, levels)
a.contour(
D_grid[1:],
V_grid,
qc,
levels,
cmap='gray',
alpha=0.5,
**kwargs
)
if vd_contours:
VD_max = scipy.absolute(VD[:, 1:]).max()
VD_min = max(scipy.absolute(VD[:, 1:]).min(), 1e-3)
a.contour(
D_grid[1:],
V_grid,
VD[:, 1:],
scipy.sinh(
scipy.linspace(
scipy.arcsinh(VD[:, 1:].min()),
scipy.arcsinh(VD[:, 1:].max()),
len(levels)
)
),
colors='b',
alpha=0.5
)
a.set_xlabel("$D$ [m$^2$/s]")
a.set_ylabel("$V$ [m/s]")
a.set_title(title)
if callback is not None:
f.canvas.mpl_connect('button_press_event', callback)
return f, a
def process_cs_den(self, res, D_grid, V_grid):
DD, VV = scipy.meshgrid(D_grid, V_grid)
VD = VV / DD
orig_shape = DD.shape
# Make a single density evolution plot which can be updated:
rr = res[1]
f_n = plt.figure()
gs = mplgs.GridSpec(3, 1, height_ratios=[2, 1, 1])
a_n = f_n.add_subplot(gs[0, 0])
cs_den, sqrtpsinorm, time, ne, Te = self.DV2cs_den(scipy.concatenate((rr.DV, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.0, 0.0, 0.0])))
pcm_n = a_n.pcolormesh(sqrtpsinorm, time, cs_den.sum(axis=1), cmap='gray_r')
cb_n = f_n.colorbar(pcm_n)
l_local_n, = a_n.plot(self.truth_data.sqrtpsinorm, rr.t_n_peak_local, 'b')
l_global_n, = a_n.plot(rr.sqrtpsinorm_n_peak_global, rr.t_n_peak_global, 'ro')
a_n.set_xlabel(r'$\sqrt{\psi_n}$')
a_n.set_ylabel(r'$t$ [s]')
a_n.set_title("total Ca, $D=%.2f$m$^2$/s, $V=%.2f$m/s" % (rr.DV[0], rr.DV[1]))
volnorm_grid = self.efit_tree.psinorm2volnorm(
sqrtpsinorm**2.0,
(self.time_1 + self.time_2) / 2.0
)
V = self.efit_tree.psinorm2v(1.0, (self.time_1 + self.time_2) / 2.0)
mask = ~scipy.isnan(volnorm_grid)
volnorm_grid = volnorm_grid[mask]
nn = cs_den.sum(axis=1)[:, mask]
# Use the trapezoid rule:
N = V * 0.5 * ((volnorm_grid[1:] - volnorm_grid[:-1]) * (nn[:, 1:] + nn[:, :-1])).sum(axis=1)
# Confinement time of total impurity content:
t_mask = (time > rr.t_N_peak + 0.01) & (N > 0.0) & (~scipy.isinf(N)) & (~scipy.isnan(N))
if t_mask.sum() < 2:
theta = scipy.asarray([0.0, 0.0])
else:
X = scipy.hstack((scipy.ones((t_mask.sum(), 1)), scipy.atleast_2d(time[t_mask]).T))
theta, dum1, dum2, dum3 = scipy.linalg.lstsq(X.T.dot(X), X.T.dot(scipy.log(N[t_mask])))
a_N = f_n.add_subplot(gs[1, 0])
l_N, = a_N.plot(time, N)
l_fit, = a_N.plot(time, scipy.exp(theta[0] + theta[1] * time))
p_N = a_N.axvspan(time[t_mask].min(), time[t_mask].max(), alpha=0.05, facecolor='r')
a_lN = f_n.add_subplot(gs[2, 0])
l_lN, = a_lN.plot(time, scipy.log(N))
l_lfit, = a_lN.plot(time, theta[0] + theta[1] * time)
p_lN = a_lN.axvspan(time[t_mask].min(), time[t_mask].max(), alpha=0.05, facecolor='r')
def on_click(event):
if plt.gcf().canvas.manager.toolbar._active is not None:
return
if event.xdata is None or event.ydata is None:
return
print("Updating...")
D_idx = profiletools.get_nearest_idx(event.xdata, D_grid)
V_idx = profiletools.get_nearest_idx(event.ydata, V_grid)
rr = res[V_idx * len(D_grid) + D_idx]
cs_den, sqrtpsinorm, time, ne, Te = self.DV2cs_den(scipy.concatenate((rr.DV, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.0, 0.0, 0.0])))
pcm_n.set_array(cs_den.sum(axis=1)[:-1, :-1].ravel())
pcm_n.set_clim(cs_den.sum(axis=1).min(), cs_den.sum(axis=1).max())
l_local_n.set_ydata(rr.t_n_peak_local)
l_global_n.set_ydata(rr.t_n_peak_global)
l_global_n.set_xdata(rr.sqrtpsinorm_n_peak_global)
a_n.set_title("total Ca, $D=%.2f$m$^2$/s, $V=%.2f$m/s" % (rr.DV[0], rr.DV[1]))
volnorm_grid = self.efit_tree.psinorm2volnorm(
sqrtpsinorm**2.0,
(self.time_1 + self.time_2) / 2.0
)
V = self.efit_tree.psinorm2v(1.0, (self.time_1 + self.time_2) / 2.0)
mask = ~scipy.isnan(volnorm_grid)
volnorm_grid = volnorm_grid[mask]
nn = cs_den.sum(axis=1)[:, mask]
# Use the trapezoid rule:
N = V * 0.5 * ((volnorm_grid[1:] - volnorm_grid[:-1]) * (nn[:, 1:] + nn[:, :-1])).sum(axis=1)
# Confinement time of total impurity content:
t_mask = (time > rr.t_N_peak + 0.01) & (N > 0.0) & (~scipy.isinf(N)) & (~scipy.isnan(N))
if t_mask.sum() < 2:
theta = scipy.asarray([0.0, 0.0])
else:
X = scipy.hstack((scipy.ones((t_mask.sum(), 1)), scipy.atleast_2d(time[t_mask]).T))
theta, dum1, dum2, dum3 = scipy.linalg.lstsq(X.T.dot(X), X.T.dot(scipy.log(N[t_mask])))
l_N.set_ydata(N)
l_fit.set_ydata(scipy.exp(theta[0] + theta[1] * time))
p_N.set_xy(
scipy.asarray(
[[time[t_mask].min(), 0],
[time[t_mask].min(), 1],
[time[t_mask].max(), 1],
[time[t_mask].max(), 0],
[time[t_mask].min(), 0]]
)
)
a_N.relim()
a_N.autoscale()
l_lN.set_ydata(scipy.log(N))
l_lfit.set_ydata(theta[0] + theta[1] * time)
p_lN.set_xy(
scipy.asarray(
[[time[t_mask].min(), 0],
[time[t_mask].min(), 1],
[time[t_mask].max(), 1],
[time[t_mask].max(), 0],
[time[t_mask].min(), 0]]
)
)
a_lN.relim()
a_lN.autoscale()
print(-1e3 / theta[1])
f_n.canvas.draw()
print("done!")
# Confinement times: we should always be able to get these.
# Total impurity confinement time:
tau_N = scipy.reshape(scipy.asarray([rr.tau_N for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
tau_N * 1e3, D_grid, V_grid, title=r'$\tau_{\mathrm{imp}}$ [ms]',
callback=on_click, logscale=True, vmax=1e3, vmin=1e0,
levels=scipy.linspace(scipy.log10(tau_N[:, 1:] * 1e3).min(), scipy.log10(tau_N[:, 1:] * 1e3).max(), 50),
)
# Compare to the result from SeguinPRL1983:
aa = self.efit_tree.getAOutSpline()((self.time_1 + self.time_2) / 2.0)
# Seguin uses positive V for inwards, so I need a sign correction.
SS = -aa * VV / (2.0 * DD)
tau_0_approx = (77.0 + SS**2.0) * (scipy.exp(SS) - SS - 1.0) * aa**2.0 / ((56.0 + SS**2.0) * 4.0 * SS**2.0 * DD)
self.make_cs_den_result_contour_plot(
tau_0_approx * 1e3, D_grid, V_grid, title=r"Seguin's $\tau_{\mathrm{imp}}$ [ms]",
callback=on_click, logscale=True, vmax=1e3, vmin=1e0,
levels=scipy.linspace(scipy.log10(tau_N[:, 1:] * 1e3).min(), scipy.log10(tau_N[:, 1:] * 1e3).max(), 50)
)
# Plot the residual between Seguin's result and ours:
self.make_cs_den_result_contour_plot(
scipy.absolute(tau_N - tau_0_approx) * 1e3, D_grid, V_grid, title=r"Absolute difference between observed and Seguin's $\tau_{\mathrm{imp}}$ [ms]",
callback=on_click, logscale=False, vmin=0, vmax=1e2,
levels=scipy.linspace(0, 1e2, 50)
)
# Peaking times: needing to know these sets our time resolution.
# (core n peaking time) - (edge n peaking time)
dt_peak = scipy.reshape(scipy.asarray([rr.t_n_peak_local[0] - rr.t_n_peak_local[-1] for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
dt_peak * 1e3, D_grid, V_grid, title=r"Core-edge impurity density peaking time difference [ms]",
callback=on_click, logscale=True, vmin=1e0, vmax=1e2
)
# Core density peaking time:
t_peak_core = scipy.reshape(scipy.asarray([rr.t_n_peak_local[0] for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
(t_peak_core - self.time_1) * 1e3, D_grid, V_grid, title=r"Core impurity density peaking time [ms]",
callback=on_click, logscale=False
)
# Compare to result from FussmannNF1986:
tau_2_approx = 0.03 * aa**2.0 / DD
self.make_cs_den_result_contour_plot(
tau_2_approx * 1e3, D_grid, V_grid, title=r"Fussmann's $\tau_2$ [ms]",
callback=on_click, logscale=False, vmax=10, vmin=1.0
)
# Global density peak time:
t_peak_n = scipy.reshape(scipy.asarray([rr.t_n_peak_global for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
(t_peak_n - self.time_1) * 1e3, D_grid, V_grid, title=r"Impurity density peaking time [ms]",
callback=on_click, logscale=False
)
# Total impurity peaking time:
t_peak = scipy.reshape(scipy.asarray([rr.t_N_peak for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
(t_peak - self.time_1) * 1e3, D_grid, V_grid, title=r"Global impurity content peaking time [ms]",
callback=on_click, logscale=False
)
# Numbers we can get with absolute calibration:
# Peak midradius impurity density:
n_mid_peak = scipy.reshape(scipy.asarray([rr.n_peak_local[32] for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
n_mid_peak, D_grid, V_grid, title=r"Peak midradius impurity density",
callback=on_click, logscale=True
)
# Peak core impurity density:
n_core_peak = scipy.reshape(scipy.asarray([rr.n_peak_local[0] for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
n_core_peak, D_grid, V_grid, title=r"Peak core impurity density",
callback=on_click, logscale=True
)
# Peak impurity content:
N_peak = scipy.reshape(scipy.asarray([rr.N_peak for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
N_peak, D_grid, V_grid, title=r"Peak impurity content",
callback=on_click, logscale=False
)
# Global density peak:
peak_n = scipy.reshape(scipy.asarray([rr.n_peak_global for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
peak_n, D_grid, V_grid, title=r"Global peak impurity density",
callback=on_click, logscale=True
)
# Locations: not very informative...
# density peak location:
rho_peak_n = scipy.reshape(scipy.asarray([rr.sqrtpsinorm_n_peak_global for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
rho_peak_n, D_grid, V_grid, title=r"Location of peak impurity density",
callback=on_click, logscale=False
)
# The charge state-specific stuff probably isn't very useful, since it
# has extra confounding variables.
# He-like peaking time:
t_He_peak = scipy.reshape(scipy.asarray([rr.t_cs_den_peak_global[-3] for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
t_He_peak, D_grid, V_grid, title=r"Global He-like density peaking time [s]",
callback=on_click, logscale=False
)
# (core He-like peaking time) - (edge He-like peaking time)
dt_peak_He = scipy.reshape(scipy.asarray([rr.t_cs_den_peak_local[-3, 0] - rr.t_cs_den_peak_local[-3, -1] for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
dt_peak_He * 1e3, D_grid, V_grid, title=r"Core-edge He-like density peaking time difference [ms]",
callback=on_click, logscale=False
)
# He-like peak location:
rho_peak_He = scipy.reshape(scipy.asarray([rr.sqrtpsinorm_cs_den_peak_global[-3] for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
rho_peak_He, D_grid, V_grid, title=r"Location of peak He-like density",
callback=on_click, logscale=False
)
# Peak He-like density:
He_like_peak = scipy.reshape(scipy.asarray([rr.cs_den_peak_global[-3] for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
He_like_peak, D_grid, V_grid, title=r"Peak He-like impurity density",
callback=on_click, logscale=True
)
# Core He-like density:
He_like_core_peak = scipy.reshape(scipy.asarray([rr.cs_den_peak_local[-3, 0] for rr in res]), orig_shape)
self.make_cs_den_result_contour_plot(
He_like_core_peak, D_grid, V_grid, title=r"Peak core He-like impurity density",
callback=on_click, logscale=True
)
def find_MAP_estimate(self, random_starts=None, num_proc=None, pool=None, theta0=None, thresh=None, use_local=False):
"""Find the most likely parameters given the data.
Parameters
----------
random_starts : int, optional
The number of times to start the optimizer at a random point in
order to find the global optimum. If set to None (the default), a
number equal to twice the number of processors will be used.
num_proc : int, optional
The number of cores to use. If set to None (the default), half of
the available cores will be used.
pool : :py:class:`Pool`, optional
The multiprocessing pool to use. You should produce this with
:py:func:`make_pool` so that the correct directories are in place.
theta0 : array of float, optional
The initial guess(es) to use. If not specified, random draws from
the prior will be used. This overrides `random_starts` if present.
This array have shape (`num_starts`, `ndim`) or (`ndim`,).
thresh : float, optional
The threshold to continue with the optimization.
"""
if num_proc is None:
num_proc = multiprocessing.cpu_count() // 2
if num_proc > 1 and pool is None:
pool = make_pool(num_proc=num_proc)
if random_starts is None:
random_starts = 2 * num_proc
prior = self.get_prior()
if theta0 is None:
param_samples = prior.random_draw(size=random_starts)[~self.fixed_params, :].T
else:
param_samples = scipy.atleast_2d(theta0)
t_start = time_.time()
if pool is not None:
res = pool.map(
_OptimizeEval(self, thresh=thresh, use_local=use_local),
param_samples
)
else:
res = map(
_OptimizeEval(self, thresh=thresh, use_local=use_local),
param_samples
)
t_elapsed = time_.time() - t_start
print("All done, wrapping up!")
res_min = max(res, key=lambda r: r[1])
# res = [r for r in res if r is not None]
# TODO: Implement this for the new fmin_l_bfgs_b-based version...
# if res:
# res_min = min(res, key=lambda r: r.fun)
# else:
# res_min = None
# D, V = self.eval_DV(res_min.x, plot=True)
# impath_DV = '../DV_%d_%d.pdf' % (self.shot, self.version,)
# plt.gcf().savefig(impath_DV)
# if res_min is not None:
# lp = self.DV2ln_prob(self.get_prior().sample_u(res_min.x), debug_plots=True)
# impath_data = '../data_%d_%d.pdf' % (self.shot, self.version,)
# plt.gcf().savefig(impath_data)
send_email("MAP update", "MAP estimate done.", [])#, [impath_DV, impath_data])
print("MAP estimate complete. Elapsed time is %.2fs. Got %d completed." % (t_elapsed, len(res)))
# Compute the Hessian at the best result:
print("Estimating Hessian matrix...")
# H = self.DV2hessian(params=res_min[0])
# nd.Hessian does not respect bounds! So, I will try to use the MaxStepGenerator.
lower_dist = res_min[0] - self.free_param_bounds[:, 0]
upper_dist = self.free_param_bounds[:, 1] - res_min[0]
max_step = min(lower_dist.min(), upper_dist.min())
if max_step < scipy.sqrt(sys.float_info.epsilon):
warnings.warn("A parameter is too close to the bounds, could not compute Hessian!")
cov_min = None
else:
H = nd.Hessian(
lambda p: self.DV2ln_prob(params=p, use_local=use_local),
step=nd.MaxStepGenerator(step_ratio=None, num_extrap=14, step_max=max_step)
)(res_min[0])
cov_min = scipy.linalg.inv(-H)
# Estimate the AIC and BIC at the best result:
# (This is technically a kludge, because they are supposed to be at the
# MLE, but what I find is the MAP.)
print("Estimating AIC, BIC...")
ll_hat = self.DV2ln_prob(params=res_min[0], use_local=use_local, no_prior=True)
num_params = (~self.fixed_params).sum()
num_data = 0
if use_local:
ss = self.local_signals
else:
ss = self.signals
for s in ss:
if s is not None:
num_data += (~scipy.isnan(s.y)).sum()
AIC = 2.0 * (num_params - ll_hat)
BIC = -2.0 * ll_hat + num_params * scipy.log(num_data)
out = {
'all_results': res,
'best_result': res_min,
'best_covariance': cov_min,
'best_AIC': AIC,
'best_BIC': BIC
}
return out
# return res
def MAP_from_SQL(self):
"""Get a job to do from the SQL server.
Only runs a single job. This function is designed to allow multiple
computers to run in parallel for controlled periods of time, without
running for "too long."
The computer connects to the database hosted on juggernaut.psfc.mit.edu,
gets a named lock called "bayesimplock", finds the current best unsolved
case (i.e., status is -100, 5 or 6) and marks it as being in progress
before releasing the lock. It then pulls in the parameters and starts
the optimizer. When the optimizer is done, it checks to make sure the
result was actually improved. If it was, it puts the new results in.
Otherwise it just sets it back to the old status in the hopes that a run
with more time will be able to make some progress. If bayesimp crashes,
it will attempt to mark the case with status 10. Sometimes the catch
statement fails to execute, in which case the case will be left marked
with status 0 (in progress).
"""
db = get_connection()
c = db.cursor()
# Acquire a mutex lock on the database:
c.execute("SELECT GET_LOCK('bayesimplock', 600);")
status, = c.fetchone()
if not status:
raise ValueError("Failed to get lock in a reasonable amount of time!")
# Find the most promising case which hasn't been worked on yet:
c.execute(
"""SELECT case_id
FROM results
WHERE status = -100 OR status = 5 OR status = 6
ORDER BY log_posterior DESC
LIMIT 1;"""
)
case_id, = c.fetchone()
# Set the status to 0:
c.execute(
"""UPDATE results
SET status = 0
WHERE case_id = %s
""",
(case_id,)
)
db.commit()
c.execute("SELECT RELEASE_LOCK('bayesimplock');")
try:
c.execute(
"""SELECT ending_params, log_posterior, log_likelihood, status
FROM results
WHERE case_id = %s""",
(case_id,)
)
params, old_lp, old_ll, old_status = c.fetchone()
# Close the connection so it doesn't crash out:
c.close()
db.close()
params = scipy.loads(params)
setup_working_dir()
res, = self.find_MAP_estimate(random_starts=0, num_proc=1, theta0=params)
new_params = self.get_prior().sample_u(res[0])
lp = res[1]
# TODO: This needs to be updated!
ll = res[1] - self.get_prior()(new_params)
new_status = res[2]
# Only put it in the tree if it improved:
if lp < old_lp:
new_params = params
new_status = old_status
ll = old_ll
lp = old_lp
# Re-establish the connection:
db = get_connection()
c = db.cursor()
# c.execute("SELECT GET_LOCK('bayesimplock', 600);")
# status, = c.fetchone()
# if not status:
# raise ValueError("Failed to get lock in a reasonable amount of time!")
# Put the new results in:
c.execute(
"""UPDATE results
SET status = %s, ending_params = %s, log_likelihood = %s, log_posterior = %s
WHERE case_id = %s
""",
(int(new_status), new_params.dumps(), float(ll), float(lp), case_id)
)
db.commit()
# c.execute("SELECT RELEASE_LOCK('bayesimplock');")
except:
print("Abnormal exit!")
traceback.print_exc()
db = get_connection()
c = db.cursor()
# c.execute("SELECT GET_LOCK('bayesimplock', 600);")
# status, = c.fetchone()
# if not status:
# raise ValueError("Failed to get lock in a reasonable amount of time!")
# Set the status to 10 to indicate an error:
c.execute(
"""UPDATE results
SET status = 10
WHERE case_id = %s
""",
(case_id,)
)
db.commit()
# c.execute("SELECT RELEASE_LOCK('bayesimplock');")
finally:
cleanup_working_dir()
c.close()
db.close()
print("All done!")
def process_MAP_from_SQL(self, n=20, plot=False, filter_bad=True, compute_dlines=False):
"""Get the top `n` cases from the SQL database.
Pulls the top `n` cases for which the status is not 10 (bayesimp error).
Computes D, V for each case and makes a plot with them color-coded
according to their log-posterior. Also computes the true D, V.
Parameters
----------
n : int, optional
The number of cases to plot. Default is 20.
plot : bool, optional
If True, plot the solutions. Default is False.
filter_bad : bool, optional
If True, remove probably bad solutions. This includes solutions
which have knots too close to the boundaries. Default is True.
compute_dlines : bool, optional
If True, the params will be passed through :py:meth:`DV2dlines`
to compute the line brightness profiles. The default is False.
"""
db = get_connection()
c = db.cursor()
c.execute(
"""SELECT log_posterior, status, ending_params
FROM results
WHERE status != 10 AND status != -100 AND log_posterior >= -45000000 AND status != 6 AND status != 5 AND status !=0
ORDER BY log_posterior DESC
LIMIT %s
""",
(n,)
)
res = c.fetchall()
lp = scipy.asarray([r[0] for r in res], dtype=float)
status = scipy.asarray([r[1] for r in res], dtype=int)
params = scipy.asarray([scipy.loads(r[2]) for r in res], dtype=float)
# Sort the knots:
for i in range(params.shape[0]):
params[
i,
self.num_eig_D + self.num_eig_V:
self.num_eig_D + self.num_eig_V + self.num_eig_D -
self.spline_k_D
] = scipy.sort(
params[
i,
self.num_eig_D + self.num_eig_V:
self.num_eig_D + self.num_eig_V + self.num_eig_D -
self.spline_k_D
]
)
params[
i,
self.num_eig_D + self.num_eig_V + self.num_eig_D -
self.spline_k_D:
self.num_eig_D + self.num_eig_V + self.num_eig_D -
self.spline_k_D + self.num_eig_V - self.spline_k_V
] = scipy.sort(
params[
i,
self.num_eig_D + self.num_eig_V + self.num_eig_D -
self.spline_k_D:
self.num_eig_D + self.num_eig_V + self.num_eig_D -
self.spline_k_D + self.num_eig_V - self.spline_k_V
]
)
if filter_bad:
is_bad = scipy.zeros_like(lp, dtype=bool)
for i, p in enumerate(params):
eig_D, eig_V, knots_D, knots_V, param_scaling, param_source, eig_ne, eig_Te = self.split_params(p)
is_bad[i] = (
knots_D.min() <= self.roa_grid_DV[3] or
knots_D.max() >= self.roa_grid_DV[-4] or
knots_V.min() <= self.roa_grid_DV[3] or
knots_V.max() >= self.roa_grid_DV[-4] or
eig_V[-1] > 0 or
scipy.diff(knots_D).min() < (self.roa_grid_DV[1] - self.roa_grid_DV[0]) or
scipy.diff(knots_V).min() < (self.roa_grid_DV[1] - self.roa_grid_DV[0])
)
mask = ~is_bad
params = params[mask, :]
lp = lp[mask]
status = status[mask]
D = scipy.zeros((len(params), len(self.roa_grid_DV)), dtype=float)
V = scipy.zeros((len(params), len(self.roa_grid_DV)), dtype=float)
D_true, V_true = self.eval_DV(self.params_true)
if compute_dlines:
dlines_true, sqrtpsinorm, time = self.DV2dlines(self.params_true, return_rho_t=True)
dlines = scipy.zeros(scipy.concatenate(([len(params),], dlines_true.shape)))
sbright_true, vbright_true, xtomobright_true = self.dlines2sig(self.params_true, dlines_true, time)
sbright = scipy.zeros(scipy.concatenate(([len(params),], sbright_true.shape)))
vbright = scipy.zeros(scipy.concatenate(([len(params),], vbright_true.shape)))
xtomobright = {}
for k, v in xtomobright_true.iteritems():
xtomobright[k] = scipy.zeros(scipy.concatenate(([len(params),], v.shape)))
if plot:
f = plt.figure()
a_D = f.add_subplot(2, 1, 1)
a_V = f.add_subplot(2, 1, 2, sharex=a_D)
for i, (p, lpv, s) in enumerate(zip(params, lp, status)):
D[i, :], V[i, :] = self.eval_DV(p)
if compute_dlines:
dlines[i] = self.DV2dlines(p)
sbright[i], vbright[i], x = self.dlines2sig(p, dlines[i], time)
for k, v in x.iteritems():
xtomobright[k][i] = v
if plot:
if s == 3:
lc = 'g'
elif s == 4:
lc = 'b'
elif s == 0:
lc = 'k'
else:
lc = 'r'
a_D.plot(self.roa_grid_DV, D[i, :], color=lc, alpha=(lpv - lp.min()) / (lp.max() - lp.min()))
a_V.plot(self.roa_grid_DV, V[i, :], color=lc, alpha=(lpv - lp.min()) / (lp.max() - lp.min()))
if plot:
# Overplot true solution:
a_D.plot(self.roa_grid_DV, D_true, color='orange', lw=3)
a_V.plot(self.roa_grid_DV, V_true, color='orange', lw=3)
a_D.set_ylabel('$D$ [m$^2$/s]')
a_V.set_xlabel('$r/a$')
a_V.set_ylabel('$V$ [m/s]')
a_D.set_title("Results from local optima search")
c.close()
db.close()
if compute_dlines:
return (
params, lp, status, D, V, D_true, V_true, dlines_true, dlines,
sqrtpsinorm, time, sbright_true, vbright_true, xtomobright_true,
sbright, vbright, xtomobright
)
else:
return params, lp, status, D, V, D_true, V_true
def make_MAP_solution_line_plot(self, params, lp, status):
"""Make a solution line plot from the output of :py:math:`process_MAP_from_SQL`.
The parameter samples are mapped to [0, 1] so they all fit on the same
axis. The lines are color-coded according to their status and the alpha
is selected based on the log-posterior. The true solution is shown in
orange with large squares and boxplots for each parameter are
superimposed to show how good/bad the fit is doing reaching the true
parameters.
Parameters
----------
params : array, (`n`, `num_params`)
The parameters to use.
lp : array, (`n`,)
The log-posterior values.
status : array, (`n`,)
The status of each fit, as stored in the MySQL database.
"""
u = [self.get_prior().elementwise_cdf(p) for p in params]
f = plt.figure()
a = f.add_subplot(1, 1, 1)
x = range(params.shape[1])
for uv, lpv, s in zip(u, lp, status):
if s == 3:
lc = 'g'
elif s == 4:
lc = 'b'
elif s == 0:
lc = 'k'
else:
lc = 'r'
a.plot(x, uv, 'o:', markersize=8, color=lc, alpha=(lpv - lp.min()) / (lp.max() - lp.min()))
u_true = self.get_prior().elementwise_cdf(self.params_true)
a.plot(x, u_true, 's-', markersize=12, color='orange')
a.boxplot(scipy.asarray(u, dtype=float), positions=x)
a.set_xlabel('parameter')
a.set_ylabel('$u=F_P(p)$')
a.set_xticks(x)
a.set_xticklabels(self.get_labels())
def make_MAP_solution_hist_plot(self, params, nbins=None):
"""Make histogram "fingerprint" plot of the parameter samples.
Parameters
----------
params : array, (`n`, `num_params`)
The parameters to plot.
nbins : int or array of int, (`D`,), optional
The number of bins dividing [0, 1] to use for each histogram. If a
single int is given, this is used for all of the hyperparameters. If an
array of ints is given, these are the numbers of bins for each of the
hyperparameters. The default is to determine the number of bins using
the Freedman-Diaconis rule.
"""
gptools.plot_sampler_fingerprint(
params[None, :, :],
self.get_prior(),
labels=self.get_labels(),
points=self.params_true,
point_color='r',
rot_x_labels=True
)
def make_MAP_solution_corr_plot(self, params):
"""Make a plot of the correlation matrix of the parameter samples.
Parameters
----------
params : array, (`n`, `num_params`)
The parameters to plot.
"""
gptools.plot_sampler_cov(
params[None, :, :],
labels=self.get_labels(),
rot_x_labels='True'
)
def make_MAP_param_dist_plot(self, params):
"""Make a plot of the univariate and bivariate histograms of the parameter samples.
Parameters
----------
params : array, (`n`, `num_params`)
The parameters to plot.
"""
gptools.plot_sampler(
params[None, :, :],
labels=self.get_labels(),
points=self.params_true
)
def make_MAP_slider_plot(
self, params, lp, status, D, V, D_true, V_true,
dlines_true=None, dlines=None, sqrtpsinorm=None, time=None,
sig_true=None, sig=None
):
"""Make a plot which lets you explore the output of :py:meth:`process_MAP_from_SQL`.
All of the cases are plotted as thin grey lines, and the true curve is
plotted as a thick green line. The slider selects a pair of D, V curves
and highlights them so that pairs of "wrong" fits can be explored.
Parameters
----------
params : array, (`n`, `num_params`)
The parameters to use.
lp : array, (`n`,)
The log-posterior values.
status : array, (`n`,)
The status of each fit, as stored in the MySQL database.
D : array, (`n`, `num_rho`)
The D values for each set of params.
V : array, (`n`, `num_rho`)
The V values for each set of params.
D_true : array, (`num_rho`,)
The true values of D.
V_true : array, (`num_rho`,)
The true values of V.
dlines_true : array, (`num_time`, `num_lines`, `num_space`), optional
The true diagnostic emissivity profiles. If not present, will not be
plotted.
dlines : array, (`num_samp`, `num_time`, `num_lines`, `num_space`), optional
The diagnostic emissivity profiles for each sample. If not present,
will not be plotted.
sqrtpsinorm : array, (`num_space`,), optional
The sqrtpsinorm grid that `dlines` is given on. If not present, will
not be plotted.
time : array, (`num_time`,), optional
The time grid that `dlines` is given on. If not present, will not be
plotted.
sbright_true : array, (`num_time`, `num_chords`), optional
The true HiReX-SR signal.
vbright_true : array, (`num_time`, `num_lines`), optional
The true VUV signals.
xtomobright_true : dict of array, (`num_time`, `num_chords`), optional
The true XTOMO signals.
sbright : array, (`num_samp`, `num_time`, `num_chords`), optional
The HiReX-SR signals.
vbright : array, (`num_samp`, `num_time`, `num_lines`), optional
The VUV signals.
xtomobright : dict of array, (`num_samp`, `num_time`, `num_chords`), optional
The XTOMO signals.
"""
def arrow_respond(t_slider, samp_slider, event):
if event.key == 'right':
if t_slider is not None:
t_slider.set_val(min(t_slider.val + 1, t_slider.valmax))
else:
samp_slider.set_val(min(samp_slider.val + 1, samp_slider.valmax))
elif event.key == 'left':
if t_slider is not None:
t_slider.set_val(max(t_slider.val - 1, t_slider.valmin))
else:
samp_slider.set_val(max(samp_slider.val - 1, samp_slider.valmin))
elif event.key == 'up':
samp_slider.set_val(min(samp_slider.val + 1, samp_slider.valmax))
elif event.key == 'down':
samp_slider.set_val(max(samp_slider.val - 1, samp_slider.valmin))
f = plt.figure()
if dlines is not None:
outer_grid = mplgs.GridSpec(1, 3, width_ratios=[1, 2, 1])
else:
outer_grid = mplgs.GridSpec(1, 1)
gs_DV = mplgs.GridSpecFromSubplotSpec(3, 1, outer_grid[0, 0], height_ratios=[5, 5, 1])
a_D = f.add_subplot(gs_DV[0, 0])
a_V = f.add_subplot(gs_DV[1, 0], sharex=a_D)
a_s = f.add_subplot(gs_DV[2, 0])
if dlines is not None:
dlines_norm = dlines / dlines.max(axis=(1, 3))[:, None, :, None]
dlines_norm_true = dlines_true / dlines_true.max(axis=(0, 2))[None, :, None]
n_lines = dlines_true.shape[1]
gs_dlines = mplgs.GridSpecFromSubplotSpec(
n_lines + 1,
2,
outer_grid[0, 1],
height_ratios=[5,] * n_lines + [1,]
)
a_lines = []
for i in xrange(n_lines):
a_lines.append(
f.add_subplot(
gs_dlines[i:i + 1, 0],
sharex=a_lines[0] if i > 0 else None
)
)
if i < n_lines - 1:
plt.setp(a_lines[-1].get_xticklabels(), visible=False)
else:
a_lines[-1].set_xlabel(r"$\sqrt{\psi_{\mathrm{n}}}$")
a_lines[-1].set_ylabel(r"$\epsilon$ [AU]")
if i < n_lines - 1:
a_lines[-1].set_title(r"Ca$^{%d+}$, %.2f nm" % (self.atomdat[0][i], self.atomdat[1][i] / 10.0))
else:
a_lines[-1].set_title("SXR")
a_t_s = f.add_subplot(gs_dlines[-1, :])
a_lines_norm = []
for i in xrange(n_lines):
a_lines_norm.append(
f.add_subplot(
gs_dlines[i:i + 1, 1],
sharex=a_lines[0]
)
)
if i < n_lines - 1:
plt.setp(a_lines_norm[-1].get_xticklabels(), visible=False)
else:
a_lines_norm[-1].set_xlabel(r"$\sqrt{\psi_{\mathrm{n}}}$")
a_lines_norm[-1].set_ylabel(r"normalized $\epsilon$ [AU]")
if i < n_lines - 1:
a_lines_norm[-1].set_title(r"Ca$^{%d+}$, %.2f nm" % (self.atomdat[0][i], self.atomdat[1][i] / 10.0))
else:
a_lines_norm[-1].set_title("SXR")
gs_sig = mplgs.GridSpecFromSubplotSpec(3, 1, outer_grid[0, 2])
a_sr = f.add_subplot(gs_sig[0, 0])
a_sr.set_title("HiReX-SR")
a_sr.set_ylabel("normalized signal [AU]")
a_sr.set_ylim(bottom=0)
a_vuv = f.add_subplot(gs_sig[1, 0])
a_vuv.set_title("VUV lines")
a_vuv.set_ylabel("normalized signal [AU]")
a_vuv.set_ylim(bottom=0)
a_vuv.set_xlim(-0.5, 2.5)
a_xtomo = f.add_subplot(gs_sig[2, 0])
a_xtomo.set_title("XTOMO (combined)")
a_xtomo.set_ylabel("normalized signal [AU]")
a_xtomo.set_ylim(bottom=0)
a_xtomo.set_xlabel("chord")
plt.setp(a_D.get_xticklabels(), visible=False)
a_V.set_xlabel(r"$r/a$")
a_D.set_ylabel(r"$D$ [m$^2$/s]")
a_V.set_ylabel(r"$V$ [m/s]")
title = f.suptitle('')
a_D.plot(self.roa_grid_DV, D_true, zorder=len(lp) + 1, color='g', lw=3)
a_V.plot(self.roa_grid_DV, V_true, zorder=len(lp) + 1, color='g', lw=3)
off_alpha = 0.25
lines_D = a_D.plot(self.roa_grid_DV, D.T, color='k', alpha=off_alpha)
lines_V = a_V.plot(self.roa_grid_DV, V.T, color='k', alpha=off_alpha)
if dlines is not None:
# Plot the raw emissivity data:
lines_dlines = []
lines_dlines_true = []
for i, a in enumerate(a_lines):
lines = a.plot(sqrtpsinorm, dlines[:, 0, i, :].T, color='k', alpha=off_alpha)
lines_dlines.append(lines)
line, = a.plot(sqrtpsinorm, dlines_true[0, i, :], color='g', zorder=len(lp) + 1, lw=3)
lines_dlines_true.append(line)
a.set_ylim(bottom=0.0)
# Plot the normalized emissivity data:
lines_dlines_norm = []
lines_dlines_norm_true = []
for i, a in enumerate(a_lines_norm):
lines_norm = a.plot(sqrtpsinorm, dlines_norm[:, 0, i, :].T, color='k', alpha=off_alpha)
lines_dlines_norm.append(lines_norm)
line_norm, = a.plot(sqrtpsinorm, dlines_norm_true[0, i, :], color='g', zorder=len(lp) + 1, lw=3)
lines_dlines_norm_true.append(line_norm)
a.set_ylim(bottom=0.0)
# Plot the line integrals:
lines_sig_sr = a_sr.plot(
range(sig[0].shape[-1]),
sig[0][:, 0, :].T,
color='k',
alpha=off_alpha,
ls='None',
marker='o'
)
line_sig_sr_true, = a_sr.plot(
range(sig[0].shape[-1]),
sig_true[0][0, :],
color='g',
zorder=len(lp) + 1,
ls='None',
marker='s'
)
lines_sig_vuv = a_vuv.plot(
range(sig[1].shape[-1]),
sig[1][:, 0, :].T,
color='k',
alpha=off_alpha,
ls='None',
marker='o'
)
line_sig_vuv_true, = a_vuv.plot(
range(sig[1].shape[-1]),
sig_true[1][0, :],
color='g',
zorder=len(lp) + 1,
ls='None',
marker='s'
)
# Plot the actual data:
t_idx_sr = profiletools.get_nearest_idx(time[0] - self.time_1, self.signals[0].t)
y = self.signals[0].y_norm if self.normalize else self.signals[0].y
err_y = self.signals[0].std_y_norm if self.normalize else self.signals[0].std_y
line_data_sr, (erry_top_data_sr, erry_bot_data_sr), (barsy_data_sr,) = a_sr.errorbar(
range(sig[0].shape[-1]),
y[t_idx_sr, :],
yerr=err_y[t_idx_sr, :],
color='r',
ls='None',
marker='^',
zorder=len(lp) + 2
)
# Assume all VUV diagnostics have the same timebase:
t_idx_vuv = profiletools.get_nearest_idx(time[0] - self.time_1, self.signals[1].t)
y = self.signals[1].y_norm if self.normalize else self.signals[1].y
err_y = self.signals[1].std_y_norm if self.normalize else self.signals[1].std_y
line_data_vuv, (erry_top_data_vuv, erry_bot_data_vuv), (barsy_data_vuv,) = a_vuv.errorbar(
range(sig[1].shape[-1]),
self.signals[1].y,
y[t_idx_vuv, :],
yerr=err_y[t_idx_vuv, :],
color='r',
ls='None',
marker='^',
zorder=len(lp) + 2
)
t_idx_xtomo = profiletools.get_nearest_idx(time[0] - self.time_1, self.signals[2].t)
y = self.signals[2].y_norm if self.normalize else self.signals[2].y
err_y = self.signals[2].std_y_norm if self.normalize else self.signals[2].std_y
line_data_xtomo, (erry_top_data_xtomo, erry_bot_data_xtomo), (barsy_data_xtomo,) = a_xtomo.errorbar(
range(sig[2].shape[-1]),
y[t_idx_xtomo, :],
yerr=err_y[t_idx_xtomo, :],
color='r',
ls='None',
marker='^',
zorder=len(lp) + 2
)
lines_xtomo = a_xtomo.plot(
range(sig[2].shape[-1]),
sig[2][:, 0, :].T,
color='k',
alpha=off_alpha,
ls='None',
marker='o'
)
lines_xtomo_true, = a_xtomo.plot(
range(sig[2].shape[-1]),
sig_true[2][0, :],
color='g',
zorder=len(lp) + 1,
ls='None',
marker='s'
)
sl = mplw.Slider(a_s, 'case index', 0, len(status) - 1, valinit=0, valfmt='%d')
if dlines is not None:
t_sl = mplw.Slider(a_t_s, 'time index', 0, len(time) - 1, valinit=0, valfmt='%d')
else:
t_sl = None
def update_samp(idx):
print("updating...")
idx = int(idx)
lines_D[update_samp.old_idx].set_alpha(off_alpha)
lines_V[update_samp.old_idx].set_alpha(off_alpha)
lines_D[update_samp.old_idx].set_color('k')
lines_V[update_samp.old_idx].set_color('k')
lines_D[update_samp.old_idx].set_linewidth(1)
lines_V[update_samp.old_idx].set_linewidth(1)
lines_D[update_samp.old_idx].set_zorder(2)
lines_V[update_samp.old_idx].set_zorder(2)
if dlines is not None:
for i, lines in enumerate(lines_dlines + [lines_sig_sr, lines_sig_vuv, lines_xtomo] + lines_dlines_norm):
lines[update_samp.old_idx].set_alpha(off_alpha)
lines[update_samp.old_idx].set_color('k')
lines[update_samp.old_idx].set_linewidth(1)
lines[update_samp.old_idx].set_zorder(2)
update_samp.old_idx = idx
lines_D[idx].set_alpha(1)
lines_V[idx].set_alpha(1)
lines_D[idx].set_color('b')
lines_V[idx].set_color('b')
lines_D[idx].set_linewidth(5)
lines_V[idx].set_linewidth(5)
lines_D[idx].set_zorder(len(lp) + 3)
lines_V[idx].set_zorder(len(lp) + 3)
if dlines is not None:
for i, lines in enumerate(lines_dlines + [lines_sig_sr, lines_sig_vuv, lines_xtomo] + lines_dlines_norm):
lines[idx].set_alpha(1)
lines[idx].set_color('b')
lines[idx].set_linewidth(5)
lines[idx].set_zorder(len(lp) + 3)
if t_sl is not None:
title.set_text("%s, lp=%.4g, t=%.3gs" % (OPT_STATUS[status[idx]], lp[idx], time[t_sl.val] - self.time_1))
else:
title.set_text("%s, lp=%.4g" % (OPT_STATUS[status[idx]], lp[idx]))
f.canvas.draw()
print('done!')
update_samp.old_idx = 0
def update_time(idx):
print("updating...")
idx = int(idx)
for i, lines in enumerate(lines_dlines):
for j, l in enumerate(lines):
l.set_ydata(dlines[j, idx, i, :])
for i, lines in enumerate(lines_dlines_norm):
for j, l in enumerate(lines):
l.set_ydata(dlines_norm[j, idx, i, :])
for i, l in enumerate(lines_sig_sr):
l.set_ydata(sig[0][i, idx, :])
for i, l in enumerate(lines_sig_vuv):
l.set_ydata(sig[1][i, idx, :])
for i, l in enumerate(lines_sig_xtomo):
l.set_ydata(sig[2][i, idx, :])
# Update the errorbar plots:
t_idx_sr = profiletools.get_nearest_idx(time[idx] - self.time_1, self.signals[0].t)
y = self.signals[0].y_norm[t_idx_sr, :] if self.normalize else self.signals[0].y[t_idx_sr, :]
yerr = self.signals[0].std_y_norm[t_idx_sr, :] if self.normalize else self.signals[0].std_y[t_idx_sr, :]
line_data_sr.set_ydata(y)
erry_top_data_sr.set_ydata(y + yerr)
erry_bot_data_sr.set_ydata(y - yerr)
new_segments_y = [
scipy.array([[x, yt], [x, yb]]) for x, yt, yb in zip(line_data_sr.get_xdata(), y + yerr, y - yerr)
]
barsy_data_sr.set_segments(new_segments_y)
t_idx_vuv = profiletools.get_nearest_idx(time[idx] - self.time_1, self.signals[1].t)
y = self.signals[1].y_norm[t_idx_vuv, :] if self.normalize else self.signals[1].y[t_idx_vuv, :]
yerr = self.signals[1].std_y_norm[t_idx_vuv, :] if self.normalize else self.signals[1].std_y[t_idx_vuv, :]
line_data_vuv.set_ydata(y)
erry_top_data_vuv.set_ydata(y + yerr)
erry_bot_data_vuv.set_ydata(y - yerr)
new_segments_y = [
scipy.array([[x, yt], [x, yb]]) for x, yt, yb in zip(line_data_vuv.get_xdata(), y + yerr, y - yerr)
]
barsy_data_vuv.set_segments(new_segments_y)
t_idx_xtomo = profiletools.get_nearest_idx(time[idx] - self.time_1, self.signals[2].t)
y = self.signals[2].y_norm[t_idx_xtomo, :] if self.normalize else self.signals[2].y[t_idx_xtomo, :]
yerr = self.signals[2].std_y_norm[t_idx_xtomo, :] if self.normalize else self.signals[2].std_y[t_idx_xtomo, :]
line_data_xtomo.set_ydata(y)
erry_top_data_xtomo.set_ydata(y + yerr)
erry_bot_data_xtomo.set_ydata(y - yerr)
new_segments_y = [
scipy.array([[x, yt], [x, yb]]) for x, yt, yb in zip(line_data_xtomo.get_xdata(), y + yerr, y - yerr)
]
barsy_data_xtomo.set_segments(new_segments_y)
for i, (l, a) in enumerate(zip(lines_dlines_true, a_lines)):
l.set_ydata(dlines_true[idx, i, :])
a.relim()
a.autoscale(axis='y')
for i, (l, a) in enumerate(zip(lines_dlines_norm_true, a_lines_norm)):
l.set_ydata(dlines_norm_true[idx, i, :])
a.relim()
a.autoscale(axis='y')
line_sig_sr_true.set_ydata(sig_true[0][idx, :])
a_sr.relim()
a_sr.autoscale(axis='y')
line_sig_vuv_true.set_ydata(sig_true[1][idx, :])
a_vuv.relim()
a_vuv.autoscale(axis='y')
line_sig_xtomo_true.set_ydata(sig_true[2][idx, :])
a_xtomo.relim()
a_xtomo.autoscale(axis='y')
title.set_text("%s, lp=%.4g, t=%.3gs" % (OPT_STATUS[status[sl.val]], lp[sl.val], time[idx] - self.time_1))
f.canvas.draw()
print('done!')
sl.on_changed(update_samp)
update_samp(0)
if dlines is not None:
t_sl.on_changed(update_time)
update_time(0)
f.canvas.mpl_connect('key_press_event', lambda evt: arrow_respond(t_sl, sl, evt))
def sample_posterior(
self,
nsamp,
burn=None,
num_proc=None,
nwalkers=None,
ntemps=20,
a=2.0,
make_backup=True,
pool=None,
samp_type='Ensemble',
theta0=None,
ball_samples=0,
ball_std=0.01,
adapt=False,
**sampler_kwargs
):
"""Initialize and run the MCMC sampler.
Parameters
----------
nsamp : int
The number of samples to draw from each walker.
burn : int, optional
The number of samples to drop from the start. If absent, `nsamp` // 2
samples are burned from the start of each walker.
num_proc : int, optional
The number of processors to use. If absent, the number of cores on
the machine divided by two is used.
nwalkers : int, optional
The number of walkers to use. If absent, the `num_proc` times the
number of dimensions of the parameter space times two is used.
ntemps : int, optional
The number of temperatures to use with a parallel-tempered sampler.
Tmax : float, optional
The maximum temperature to use with a parallel-tempered sampler. If
using adaptive sampling, `scipy.inf` is a good choice.
make_backup : bool, optional
If True, the sampler (less its pool) will be written to sampler.pkl
when sampling is complete. Default is True (backup sampler).
pool : :py:class:`emcee.InterruptiblePool` instance
The pool to use for multiprocessing. If present overrides num_proc.
samp_type : {'Ensemble', 'PT'}, optional
The type of sampler to construct. Options are the affine-invariant
ensemble sampler (default) and the parallel-tempered sampler.
ball_samples : int, optional
The number of samples to take in a ball around each entry in
`theta0`. Default is 0 (just use the values in `theta0` directly).
ball_std : float, optional
The standard deviation to use when constructing the ball of samples
to start from. This is given as a fraction of the value. Default is
0.01 (i.e., 1%%).
adapt : bool, optional
Whether or not to use an adaptive temperature ladder with the PT
sampler. You must have put the appropriately-modified form of emcee
on your sys.path for this to work, and have selected
`samp_type` = 'PT'.
**sampler_kwargs : optional keyword args
Optional arguments passed to construct the sampler. The most useful
one is `a`, the width of the proposal distribution. You can also use
this to adjust `adaptation_lag` (the timescale for adaptation of the
temperature ladder to slow down on) and `adaptation_time` (the
timescale of the temperature adaptation dynamics themselves).
"""
ndim = (~self.fixed_params).sum()
if burn is None:
burn = nsamp // 2
if num_proc is None:
if pool is not None:
num_proc = pool._processes
else:
num_proc = multiprocessing.cpu_count()
if nwalkers is None:
nwalkers = num_proc * ndim * 2
if num_proc > 1 and pool is None:
pool = make_pool(num_proc=num_proc)
if samp_type == 'Ensemble':
sampler = emcee.EnsembleSampler(
nwalkers,
ndim,
_ComputeLnProbWrapper(self),
pool=pool,
**sampler_kwargs
)
elif samp_type == 'PT':
# TODO: This needs to be cleaned up -- the adaptive sampler version
# has a different fingerprint.
if adapt:
sampler = emcee.PTSampler(
nwalkers,
ndim,
_ComputeLnProbWrapper(self),
self.get_prior(),
ntemps=ntemps,
pool=pool,
loglkwargs={'no_prior': True},
**sampler_kwargs
)
else:
sampler = emcee.PTSampler(
ntemps,
nwalkers,
ndim,
_ComputeLnProbWrapper(self),
self.get_prior(),
pool=pool,
loglkwargs={'no_prior': True},
**sampler_kwargs
)
else:
raise ValueError("Unknown sampler type: %s" % (samp_type,))
return self.add_samples(
sampler,
nsamp,
burn=burn,
make_backup=make_backup,
first_run=True,
theta0=theta0,
adapt=adapt,
ball_samples=ball_samples,
ball_std=ball_std
)
def add_samples(
self,
sampler,
nsamp,
burn=0,
make_backup=True,
first_run=False,
resample_infs=True,
ll_thresh=None,
theta0=None,
ball_samples=None,
ball_std=None,
adapt=False
):
"""Add samples to the given sampler.
Parameters
----------
sampler : :py:class:`emcee.EnsembleSampler` instance
The sampler to add to.
nsamp : int
The number of samples to add.
burn : int, optional
The number of samples to burn when plotting. Default is 0.
make_backup : bool, optional
If True, the sampler will be backed up to
../sampler_<SHOT>_<VERSION>.pkl when done. Default is True.
first_run : bool, optional
If True, the initial state is taken to be a draw from the prior
(i.e., for the initial run of the sampler). Otherwise, the initial
state is taken to be the current state. Default is False (use
current state of sampler).
resample_infs : bool, optional
If True, any chain whose log-probability is presently infinite will
be replaced with a draw from the prior. Only has an effect when
`first_run` is False. Default is True.
ll_thresh : float, optional
The threshold of log-probability, below which the chain will be
re-drawn from the prior. Default is to not redraw any chains with
finite log-probabilities.
theta0 : array of float, optional
The starting points for each chain. If omitted and `first_run` is
True then a draw from the prior will be used. If omitted and
`first_run` is False then the last state of the chain will be used.
ball_samples : int, optional
The number of samples to take in a ball around each entry in
`theta0`. Default is 0 (just use the values in `theta0` directly).
ball_std : float, optional
The standard deviation to use when constructing the ball of samples
to start from. This is given as a fraction of the value. Default is
0.01 (i.e., 1%%).
adapt : bool, optional
Whether or not to use an adaptive temperature ladder with the PT
sampler. You must have put the appropriately-modified form of emcee
on your sys.path for this to work, and have passed a
:py:class:`emcee.PTSampler` instance for `sampler`.
"""
if theta0 is None:
if first_run or resample_infs:
prior = self.get_prior()
if isinstance(sampler, emcee.EnsembleSampler):
draw = prior.random_draw(size=sampler.chain.shape[0]).T
elif isinstance(sampler, emcee.PTSampler):
draw = prior.random_draw(size=(sampler.nwalkers, sampler.ntemps)).T
else:
raise ValueError("Unknown sampler class: %s" % (type(sampler),))
if first_run:
theta0 = draw
else:
if isinstance(sampler, emcee.EnsembleSampler):
theta0 = sampler.chain[:, -1, :]
bad = (
scipy.isinf(sampler.lnprobability[:, -1])# |
#(sampler.lnprobability[:, -1] <= -5.0e4)
)
if ll_thresh is not None:
bad = bad | (sampler.lnprobability[:, -1] <= ll_thresh)
elif isinstance(sampler, emcee.PTSampler):
theta0 = sampler.chain[:, :, -1, :]
bad = (
scipy.isinf(sampler.lnprobability[:, :, -1]) |
scipy.isnan(sampler.lnprobability[:, :, -1])
#(sampler.lnprobability[:, :, -1] <= -5.0e4)
)
if ll_thresh is not None:
bad = bad | (sampler.lnprobability[:, :, -1] <= ll_thresh)
else:
raise ValueError("Unknown sampler class: %s" % (type(sampler),))
theta0[bad, :] = draw[bad, :]
else:
if isinstance(sampler, emcee.EnsembleSampler):
theta0 = sampler.chain[:, -1, :]
elif isinstance(sampler, emcee.PTSampler):
theta0 = sampler.chain[:, :, -1, :]
else:
raise ValueError("Unknown sampler class: %s" % (type(sampler),))
else:
if ball_samples > 0:
theta0 = scipy.asarray(theta0, dtype=float)
if theta0.ndim == 1:
theta0 = emcee.utils.sample_ball(
theta0,
ball_std * theta0,
size=ball_samples
)
else:
theta0 = [
emcee.utils.sample_ball(
x,
ball_std * x,
size=ball_samples
)
for x in theta0
]
theta0 = scipy.vstack(theta0)
print(theta0.shape)
# Check against bounds:
bounds = scipy.asarray(self.get_prior().bounds[:])
for i in xrange(0, len(bounds)):
theta0[theta0[:, i] < bounds[i, 0], i] = bounds[i, 0]
theta0[theta0[:, i] > bounds[i, 1], i] = bounds[i, 1]
print("Starting MCMC sampler...this will take a while.")
try:
subprocess.call('fortune -a | cowsay -f vader-koala', shell=True)
except:
pass
t_start = time_.time()
if isinstance(sampler, emcee.EnsembleSampler):
sampler.run_mcmc(theta0, nsamp)
elif isinstance(sampler, emcee.PTSampler):
sampler.run_mcmc(theta0, nsamp, adapt=adapt)
t_elapsed = time_.time() - t_start
print("MCMC sampler done, elapsed time is %.2fs." % (t_elapsed,))
labels = self.get_labels()
# gptools.plot_sampler(
# sampler,
# burn=burn,
# labels=labels
# )
# impath = '../sampler_%d_%d.pdf' % (self.shot, self.version,)
# plt.gcf().savefig(impath)
if make_backup:
try:
# pools aren't pickleable, so we need to ditch it:
pool = sampler.pool
sampler.pool = None
# Put the file one level up so we don't copy it with our directory each time we open a pool!
# Perform an atomic save so we don't nuke it if there is a failure.
with open('../tmp_%d_%d.pkl' % (self.shot, self.version), 'wb') as f:
pkl.dump(sampler, f, protocol=pkl.HIGHEST_PROTOCOL)
os.rename(
'../tmp_%d_%d.pkl' % (self.shot, self.version),
'../sampler_%d_%d.pkl' % (self.shot, self.version,)
)
except SystemError:
# Failback on the basic pickle if it fails:
warnings.warn("cPickle failed, trying pickle!", RuntimeWarning)
import pickle as pkl2
with open('../tmp_%d_%d.pkl' % (self.shot, self.version,), 'wb') as f:
pkl2.dump(sampler, f)
os.rename(
'../tmp_%d_%d.pkl' % (self.shot, self.version),
'../sampler_%d_%d.pkl' % (self.shot, self.version,)
)
finally:
sampler.pool = pool
send_email("MCMC update", "MCMC sampler done.", []) #, [impath])
return sampler
def restore_sampler(self, pool=None, spath=None):
"""Restore the most recent sampler, optionally setting its pool.
"""
# TODO: MAKE THIS PULL IN INFO ON OTHER SETTINGS!
if spath is None:
spath = '../sampler_%d_%d.pkl' % (self.shot, self.version)
with open(spath, 'rb') as f:
s = pkl.load(f)
s.pool = pool
return s
def combine_samplers(self, v_start=0, lp=None, ll=None, chain=None, beta=None, tswap_acceptance_fraction=None, make_plots=True):
"""Stitch together multiple samplers from sequentially-numbered files.
Parameters
----------
v_start : int, optional
The sampler index to start reading at. Use this to avoid re-reading
old samplers. Default is 0 (read all samplers).
lp : array of float, optional
The log-posterior histories which have been previously read. If
present, the new data will be concatenated on.
ll : array of float, optional
The log-likelihood histories which have been previously read. If
present, the new data will be concatenated on.
chain : array of float, optional
The parameter histories which have been previously read. If present,
the new data will be concatenated on.
beta : array of float, optional
The inverse temperature histories which have been previously read.
If present, the new data will be concatenated on.
tswap_acceptance_fraction : array of float, optional
The accepted temperature swap fractions for each temperature which
have been previously read. If present, new data will be combined in.
make_plots : bool, optional
If True, plots of the log-posterior and beta histories will be
produced. Default is True (make plots).
"""
v = glob.glob('../sampler_%d_%d_*.pkl' % (self.shot, self.version))
v = [
int(
re.split(
r'^\.\./sampler_%d_%d_([0-9]+)\.pkl$' % (self.shot, self.version),
s
)[1]
) for s in v
]
v.sort()
v = scipy.asarray(v)
# Remove previously-read samplers:
v = v[v >= v_start]
# Get the shapes by simply handling the first fencepost if previous
# values were not provided:
if (lp is None) or (ll is None) or (chain is None) or (beta is None) or (tswap_acceptance_fraction is None):
vv = v[0]
v = v[1:]
print(vv)
s = self.restore_sampler(
spath='../sampler_%d_%d_%d.pkl' % (self.shot, self.version, vv)
)
lp = s.lnprobability
ll = s.lnlikelihood
chain = s.chain
beta = s.beta_history
tswap_acceptance_fraction = s.tswap_acceptance_fraction
for vv in v:
print(vv)
s = self.restore_sampler(
spath='../sampler_%d_%d_%d.pkl' % (self.shot, self.version, vv)
)
lp = scipy.concatenate((lp, s.lnprobability), axis=2)
ll = scipy.concatenate((lp, s.lnlikelihood), axis=2)
tswap_acceptance_fraction = (
chain.shape[2] * tswap_acceptance_fraction +
s.chain.shape[2] * s.tswap_acceptance_fraction
) / (chain.shape[2] + s.chain.shape[2])
chain = scipy.concatenate((chain, s.chain), axis=2)
beta = scipy.concatenate((beta, s.beta_history), axis=1)
if make_plots:
self.plot_lp_chains(lp)
f = plt.figure()
a = f.add_subplot(1, 1, 1)
a.semilogy(beta.T)
a.set_xlabel('step')
a.set_ylabel(r'$\beta$')
return (lp, ll, chain, beta, tswap_acceptance_fraction)
def plot_prior_samples(self, nsamp):
"""Plot samples from the prior distribution.
Parameters
----------
nsamp : int
The number of samples to plot.
"""
prior = self.get_prior()
draw = prior.random_draw(size=nsamp).T
f = plt.figure()
aD = f.add_subplot(2, 1, 1)
aV = f.add_subplot(2, 1, 2)
for d in draw:
D, V = self.eval_DV(d)
aD.plot(self.roa_grid_DV, D, alpha=0.1)
aV.plot(self.roa_grid_DV, V, alpha=0.1)
aD.set_xlabel('$r/a$')
aV.set_xlabel('$r/a$')
aD.set_ylabel('$D$ [m$^2$/s]')
aV.set_ylabel('$V$ [m/s]')
f.canvas.draw()
def get_labels(self):
"""Get the labels for each of the variables included in the sampler.
"""
# Make typing array lengths more compact:
nD = self.num_eig_D
nV = self.num_eig_V
kD = self.spline_k_D
kV = self.spline_k_V
nkD = self.num_eig_D - self.spline_k_D
nkV = self.num_eig_V - self.spline_k_V
# Number of signals (determines number of scaling parameters):
nS = 0
for s in self.signals:
if s is not None:
nS += len(scipy.unique(s.blocks))
# Number of diagnostics (determines number of time shifts):
nDiag = len(self.signals)
labels = (
['$C_{D,%d}$' % (n + 1,) for n in xrange(0, nD)] +
['$C_{V,%d}$' % (n + 1,) for n in xrange(0, nV)]
)
labels += ['$t_{D,%d}$' % (n + 1,) for n in xrange(0, nkD)]
labels += ['$t_{V,%d}$' % (n + 1,) for n in xrange(0, nkV)]
labels += [r'$s$ %d' % (n,) for n in xrange(0, nS)]
labels += [r'$\Delta t$ %d' % (n,) for n in xrange(0, nDiag)]
labels += [r'$u_{n_{\mathrm{e}},%d}$' % (n + 1,) for n in xrange(0, self.num_eig_ne)]
labels += [r'$u_{T_{\mathrm{e}},%d}$' % (n + 1,) for n in xrange(0, self.num_eig_Te)]
return labels
def process_sampler(self, sampler, burn=0, thin=1):
"""Processes the sampler.
Performs the following tasks:
* Marginalizes the D, V profiles and brightness histories.
* Makes interactive plots to browse the state at each sample on each walker.
Parameters
----------
sampler : :py:class:`emcee.EnsembleSampler` instance
The sampler to process the data from.
burn : int, optional
The number of samples to burn from the front of each walker. Default
is zero.
thin : int, optional
The amount by which to thin the samples. Default is 1.
"""
self.plot_marginalized_brightness(sampler, burn=burn, thin=thin)
self.plot_marginalized_DV(sampler, burn=burn, thin=thin)
self.explore_chains(samper)
def plot_lp_chains(self, sampler, temp_idx=0):
"""Plot the log-posterior trajectories of the chains in the given sampler.
"""
f = plt.figure()
a = f.add_subplot(1, 1, 1)
if isinstance(sampler, emcee.EnsembleSampler):
a.semilogy(-sampler.lnprobability.T, alpha=0.1)
elif isinstance(sampler, emcee.PTSampler):
a.semilogy(-sampler.lnprobability[temp_idx].T, alpha=0.1)
elif isinstance(sampler, scipy.ndarray):
if sampler.ndim == 3:
a.semilogy(-sampler[temp_idx].T, alpha=0.1)
else:
a.semilogy(-sampler.T, alpha=0.1)
else:
raise ValueError("Unknown sampler class: %s" % (type(sampler),))
a.set_xlabel('step')
a.set_ylabel('-log-posterior')
def compute_marginalized_DV(self, sampler, burn=0, thin=1, chain_mask=None,
pool=None, weights=None, cutoff_weight=None,
plot=False, compute_VD=False, compute_M=False):
"""Computes and plots the marginal D, V profiles.
Parameters
----------
sampler : :py:class:`emcee.EnsembleSampler` instance
The sampler to process the data from.
burn : int, optional
The number of samples to burn from the front of each walker. Default
is zero.
thin : int, optional
The amount by which to thin the samples. Default is 1.
chain_mask : mask array
The chains to keep when computing the marginalized D, V profiles.
Default is to use all chains.
pool : object with `map` method, optional
Multiprocessing pool to use. If None, `sampler.pool` will be used.
weights : array of float, optional
The weights to use (i.e., when post-processing MultiNest output).
cutoff_weight : float, optional
Throw away any points with weights lower than `cutoff_weight` times
`weights.max()`. Default is to keep all points.
plot : bool, optional
If True, make a plot of D and V.
compute_VD : bool, optional
If True, compute and return V/D in addition to D and V.
compute_M : bool, optional
If True, compute the Mahalanobis distance between the marginalized
D and V and `self.explicit_D`, `self.explicit_V`.
"""
if pool is None:
try:
pool = sampler.pool
except:
pool = InterruptiblePool(multiprocessing.cpu_count())
try:
k = sampler.flatchain.shape[-1]
except AttributeError:
# Assumes array input is only case where there is no "flatchain" attribute.
k = sampler.shape[-1]
if isinstance(sampler, emcee.EnsembleSampler):
if chain_mask is None:
chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool)
flat_trace = sampler.chain[chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
elif isinstance(sampler, emcee.PTSampler):
if chain_mask is None:
chain_mask = scipy.ones(sampler.nwalkers, dtype=bool)
flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
elif isinstance(sampler, scipy.ndarray):
if sampler.ndim == 4:
if chain_mask is None:
chain_mask = scipy.ones(sampler.shape[1], dtype=bool)
flat_trace = sampler[temp_idx, chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[temp_idx, chain_mask, burn:]
weights = weights.ravel()
elif sampler.ndim == 3:
if chain_mask is None:
chain_mask = scipy.ones(sampler.shape[0], dtype=bool)
flat_trace = sampler[chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[chain_mask, burn:]
weights = weights.ravel()
elif sampler.ndim == 2:
flat_trace = sampler[burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[burn:]
weights = weights.ravel()
if cutoff_weight is not None and weights is not None:
mask = weights >= cutoff_weight * weights.max()
flat_trace = flat_trace[mask, :]
weights = weights[mask]
else:
raise ValueError("Unknown sampler class: %s" % (type(sampler),))
DV_samp = scipy.asarray(
pool.map(
_ComputeProfileWrapper(self),
flat_trace
)
)
D_samp = DV_samp[:, 0, :]
bad = scipy.isinf(D_samp).any(axis=1)
print(str(bad.sum()) + " samples had inf in D.")
D_mean = profiletools.meanw(D_samp[~bad], axis=0, weights=weights)
D_std = profiletools.stdw(D_samp[~bad], axis=0, ddof=1)
V_samp = DV_samp[:, 1, :]
V_mean = profiletools.meanw(V_samp[~bad], axis=0)
V_std = profiletools.stdw(V_samp[~bad], axis=0, ddof=1, weights=weights)
if compute_VD:
VD_samp = DV_samp[:, 1, :] / DV_samp[:, 0, :]
VD_mean = profiletools.meanw(VD_samp[~bad], axis=0, weights=weights)
VD_std = profiletools.stdw(VD_samp[~bad], axis=0, ddof=1)
if compute_M:
# First, interpolate the true profiles onto the correct D, V grid:
D_point = scipy.interpolate.InterpolatedUnivariateSpline(
self.explicit_D_grid, self.explicit_D
)(scipy.sqrt(self.psinorm_grid_DV))
V_point = scipy.interpolate.InterpolatedUnivariateSpline(
self.explicit_V_grid, self.explicit_V
)(scipy.sqrt(self.psinorm_grid_DV))
DV_point = scipy.hstack((D_point, V_point))
DV = scipy.hstack((DV_samp[:, 0, :], DV_samp[:, 1, :]))
mu_DV = scipy.hstack((D_mean, V_mean))
DV_point = scipy.hstack((self.explicit_D, self.explicit_V))
cov_DV = scipy.cov(DV, rowvar=False, aweights=weights)
L = scipy.linalg.cholesky(cov_DV + 1000 * sys.float_info.epsilon * scipy.eye(*cov_DV.shape), lower=True)
y = scipy.linalg.solve_triangular(L, DV_point - mu_DV, lower=True)
M = y.T.dot(y)
if plot:
f_DV = plt.figure()
f_DV.suptitle('Marginalized Ca transport coefficient profiles')
a_D = f_DV.add_subplot(2, 1, 1)
a_D.plot(self.roa_grid_DV, D_mean, 'b')
a_D.fill_between(
self.roa_grid_DV,
D_mean - D_std,
D_mean + D_std,
color='b',
alpha=0.5
)
a_D.set_xlabel('$r/a$')
a_D.set_ylabel('$D$ [m$^2$/s]')
a_V = f_DV.add_subplot(2, 1, 2, sharex=a_D)
a_V.plot(self.roa_grid_DV, V_mean, 'b')
a_V.fill_between(
self.roa_grid_DV,
V_mean - V_std,
V_mean + V_std,
color='b',
alpha=0.5
)
a_V.set_xlabel('$r/a$')
a_V.set_ylabel('$V$ [m/s]')
out = [D_mean, D_std, V_mean, V_std]
if compute_VD:
out += [VD_mean, VD_std]
if compute_M:
out += [M,]
return tuple(out)
def plot_marginalized_brightness(self, sampler, burn=0, thin=1, chain_mask=None):
"""Averages the brightness histories over all samples/chains and makes a plot.
Parameters
----------
sampler : :py:class:`emcee.EnsembleSampler` instance
The sampler to process the data from.
burn : int, optional
The number of samples to burn from the front of each walker. Default
is zero.
thin : int, optional
The amount by which to thin the samples. Default is 1.
"""
if not isinstance(sampler, emcee.EnsembleSampler):
raise NotImplementedError(
"plot_marginalized_brightness is only supported for EnsembleSamplers!"
)
if chain_mask is None:
chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool)
blobs = scipy.asarray(sampler.blobs[burn::thin], dtype=object)[:, chain_mask, :]
chains = scipy.swapaxes(sampler.chain[chain_mask, burn::thin, :], 0, 1)
# Flatten it out to compute the marginal stuff (we need to keep the
# chain info for the slider plots, though):
blobs_flat = scipy.reshape(blobs, (-1, blobs.shape[2]))
chain_flat = scipy.reshape(chains, (-1, chains.shape[2]))
ll_flat = scipy.asarray(blobs_flat[:, 0], dtype=float)
good = ~(scipy.isinf(ll_flat) | scipy.isnan(ll_flat))
ll_flat = ll_flat[good]
sbright = blobs_flat[good, 1]
vbright = blobs_flat[good, 2]
time = blobs_flat[good, 3]
t_s = chain_flat[good, -2]
t_v = chain_flat[good, -1]
# We need to interpolate sbright, vbright onto a uniform timebase:
t = scipy.linspace(
self.signals[0].t.min(),
self.signals[0].t.max(),
100
)
wrapper = _InterpBrightWrapper(t, sbright[0].shape[1], vbright[0].shape[1])
out = sampler.pool.map(wrapper, zip(sbright, vbright, time, t_s, t_v))
sbright_interp = scipy.asarray([o[0] for o in out], dtype=float)
vbright_interp = scipy.asarray([o[1] for o in out], dtype=float)
xbright_interp = scipy.asarray([o[2] for o in out], dtype=float)
# Now we can compute the summary statistics:
mean_sbright = scipy.mean(sbright_interp, axis=0)
std_sbright = scipy.std(sbright_interp, axis=0, ddof=1)
mean_vbright = scipy.mean(vbright_interp, axis=0)
std_vbright = scipy.std(vbright_interp, axis=0, ddof=1)
# And make a big plot:
f_H, a_H = self.signals[0].plot_data(norm=self.normalize)
for i, a in enumerate(a_H):
a.plot(t, mean_sbright[:, i], 'g')
a.fill_between(
t,
mean_sbright[:, i] - std_sbright[:, i],
mean_sbright[:, i] + std_sbright[:, i],
color='g',
alpha=0.5
)
f_V, a_V = self.signals[1].plot_data(norm=self.normalize)
for i, a in enumerate(a_V):
a.plot(t, mean_vbright[:, i], 'g')
a.fill_between(
t,
mean_vbright[:, i] - std_vbright[:, i],
mean_vbright[:, i] + std_vbright[:, i],
color='g',
alpha=0.5
)
f_D.canvas.draw()
def compute_IC(self, sampler, burn, chain_mask=None, debug_plots=False, lp=None, ll=None):
"""Compute the DIC and AIC information criteria.
Parameters
----------
sampler : :py:class:`emcee.EnsembleSampler`
The sampler to compute the criteria for.
burn : int
The number of samples to burn before computing the criteria.
chain_mask : array, optional
The chains to include in the computation.
debug_plots : bool, optional
If True, plots will be made of the conditions at the posterior mean
and a histogram of the log-likelihood will be drawn.
lp : array, optional
The log-posterior. Only to be passed if `sampler` is an array.
ll : array, optional
The log-likelihood. Only to be passed if `sampler` is an array.
"""
# Compute the DIC:
if chain_mask is None:
if isinstance(sampler, emcee.EnsembleSampler):
chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool)
elif isinstance(sampler, emcee.PTSampler):
chain_mask = scipy.ones(sampler.chain.shape[1], dtype=bool)
elif isinstance(sampler, scipy.ndarray):
if sampler.ndim == 4:
chain_mask = scipy.ones(sampler.shape[1], dtype=bool)
else:
chain_mask = scipy.ones(sampler.shape[0], dtype=bool)
else:
raise ValueError("Unknown sampler class: %s" % (type(sampler),))
if isinstance(sampler, emcee.EnsembleSampler):
flat_trace = sampler.chain[chain_mask, burn:, :]
elif isinstance(sampler, emcee.PTSampler):
flat_trace = sampler.chain[0, chain_mask, burn:, :]
elif isinstance(sampler, scipy.ndarray):
if sampler.ndim == 4:
flat_trace = sampler[0, chain_mask, burn:, :]
else:
flat_trace = sampler[chain_mask, burn:, :]
else:
raise ValueError("Unknown sampler class: %s" % (type(sampler),))
flat_trace = flat_trace.reshape((-1, flat_trace.shape[2]))
theta_hat = flat_trace.mean(axis=0)
lp_theta_hat, blob = self.compute_ln_prob(theta_hat, debug_plots=debug_plots, return_blob=True)
ll_theta_hat = blob[0]
if isinstance(sampler, emcee.EnsembleSampler):
blobs = scipy.asarray(sampler.blobs, dtype=object)
ll = scipy.asarray(blobs[burn:, chain_mask, 0], dtype=float)
elif isinstance(sampler, emcee.PTSampler):
ll = sampler.lnlikelihood[0, chain_mask, burn:]
elif isinstance(sampler, scipy.ndarray):
if sampler.ndim == 4:
ll = ll[0, chain_mask, burn:]
else:
ll = ll[chain_mask, burn:]
E_ll = ll.mean()
if debug_plots:
f = plt.figure()
a = f.add_subplot(1, 1, 1)
a.hist(ll.ravel(), 50)
a.axvline(ll_theta_hat, label=r'$LL(\hat{\theta})$', color='r', lw=3)
a.axvline(E_ll, label=r'$E[LL]$', color='g', lw=3)
a.legend(loc='best')
a.set_xlabel('LL')
pD_1 = 2 * (ll_theta_hat - E_ll)
pD_2 = 2 * ll.var(ddof=1)
DIC_1 = -2 * ll_theta_hat + 2 * pD_1
DIC_2 = -2 * ll_theta_hat + 2 * pD_2
# Compute AIC:
try:
p = sampler.dim
except AttributeError:
p = sampler.shape[-1]
ll_max = ll.max()
AIC = 2 * p - 2 * ll_max
# Compute WAIC:
# TODO!
# Compute log-evidence:
try:
lev, e_lev = sampler.thermodynamic_integration_log_evidence(fburnin=burn / sampler.chain.shape[2])
except:
lev = None
e_lev = None
warnings.warn("Thermodynamic integration failed!", RuntimeWarning)
out = {
'DIC_1': DIC_1,
'DIC_2': DIC_2,
'pD_1': pD_1,
'pD_2': pD_2,
'AIC': AIC,
'p': p,
'theta_hat': theta_hat,
'log_evidence': lev,
'err_log_evidence': e_lev
}
return out
def explore_chains(self, sampler):
"""Interactively explore the chains in `sampler`.
Creates three plots: the D, V profiles, the brightness histories and the
chain histories. Interact with the chain histories using the arrow keys.
"""
l = []
# TODO: Update this!
f_H, a_H = self.signals[0].plot_data(norm=self.normalize)
f_VUV, a_VUV = self.signals[1].plot_data(norm=self.normalize)
title_f_H = f_H.suptitle('')
f_DV = plt.figure()
title_f_DV = f_DV.suptitle('')
a_D = f_DV.add_subplot(2, 1, 1)
plt.setp(a_D.get_xticklabels(), visible=False)
a_D.set_ylabel('$D$ [m$^2$/s]')
a_V = f_DV.add_subplot(2, 1, 2, sharex=a_D)
a_V.set_xlabel('$r/a$')
a_V.set_ylabel('$V$ [m/s]')
f_chains = plt.figure()
title_f_chains = f_chains.suptitle('')
gs = mplgs.GridSpec(3, sampler.chain.shape[2], height_ratios=[10, 1, 1])
a_chains = []
for k, label in enumerate(self.get_labels()):
a_chains.append(
f_chains.add_subplot(gs[0, k], sharex=a_chains[0] if k > 0 else None)
)
a_chains[-1].set_xlabel('step')
a_chains[-1].set_ylabel(label)
a_chains[-1].plot(sampler.chain[:, :, k].T, color='k', alpha=0.1)
a_chain_slider = f_chains.add_subplot(gs[1, :])
a_step_slider = f_chains.add_subplot(gs[2, :])
def update(dum):
"""Update the chain and/or step index.
"""
print("Updating...")
remove_all(l)
while l:
l.pop()
i_chain = int(chain_slider.val)
i_step = int(step_slider.val)
b = sampler.blobs[i_step][i_chain]
print(b[-1])
title_text = "walker %d, step %d, ll=%g, lp=%g" % (
i_chain,
i_step,
b[0],
sampler.lnprobability[i_chain, i_step]
)
title_f_H.set_text(title_text)
title_f_DV.set_text(title_text)
title_f_chains.set_text(title_text)
# Plot the brightness histories:
if b[1] is not None:
for k, a in enumerate(a_H):
l.append(a.plot(b[3] + sampler.chain[i_chain, i_step, -2], b[1][:, k], 'g'))
for k, a in enumerate(a_VUV):
l.append(a.plot(b[3] + sampler.chain[i_chain, i_step, -1], b[2][:, k], 'g'))
D, V = self.eval_DV(sampler.chain[i_chain, i_step, :])
l.append(a_D.plot(self.roa_grid_DV, D, 'b'))
l.append(a_V.plot(self.roa_grid_DV, V, 'b'))
a_D.relim()
a_D.autoscale_view()
a_V.relim()
a_V.autoscale_view()
for k in xrange(0, sampler.chain.shape[2]):
l.append(
a_chains[k].plot(
sampler.chain[i_chain, :, k],
color='r',
linewidth=3
)
)
l.append(a_chains[k].axvline(i_step, color='r', linewidth=3))
f_H.canvas.draw()
f_VUV.canvas.draw()
f_DV.canvas.draw()
f_chains.canvas.draw()
print("Done.")
def arrow_respond(up_slider, side_slider, event):
"""Event handler for arrow key events in plot windows.
Pass the slider object to update as a masked argument using a lambda function::
lambda evt: arrow_respond(my_up_slider, my_side_slider, evt)
Parameters
----------
up_slider : Slider instance associated with up/down keys for this handler.
side_slider : Slider instance associated with left/right keys for this handler.
event : Event to be handled.
"""
if event.key == 'right':
side_slider.set_val(min(side_slider.val + 1, side_slider.valmax))
elif event.key == 'left':
side_slider.set_val(max(side_slider.val - 1, side_slider.valmin))
elif event.key == 'up':
up_slider.set_val(min(up_slider.val + 1, up_slider.valmax))
elif event.key == 'down':
up_slider.set_val(max(up_slider.val - 1, up_slider.valmin))
chain_slider = mplw.Slider(
a_chain_slider,
'walker index',
0,
sampler.chain.shape[0] - 1,
valinit=0,
valfmt='%d'
)
step_slider = mplw.Slider(
a_step_slider,
'step index',
0,
sampler.chain.shape[1] - 1,
valinit=0,
valfmt='%d'
)
chain_slider.on_changed(update)
step_slider.on_changed(update)
update(0)
f_chains.canvas.mpl_connect(
'key_press_event',
lambda evt: arrow_respond(chain_slider, step_slider, evt)
)
def find_closest_representation(self, D_other, V_other, guess=None):
"""Find the closest representation of the given D, V profiles with the current basis functions.
Parameters
----------
D_other : array of float
The values of D. Must be given on the same internal roa_grid_DV as
the current run instance.
V_other : array of float
The values of V. Must be given on the same internal roa_grid_DV as
the current run instance.
guess : array of float, optional
The initial guess to use for the parameters when running the
optimizer. If not present, a random draw from the prior is used.
"""
# TODO: This needs random starts!
b = self.get_prior().bounds[:-4]
bounds = [list(v) for v in b]
for v in bounds:
if scipy.isinf(v[0]):
v[0] = None
if scipy.isinf(v[1]):
v[1] = None
res = scipy.optimize.minimize(
self.objective_func,
self.get_prior().random_draw(size=1).ravel()[:-4] if guess is None else guess,
args=(D_other, V_other),
method='L-BFGS-B',
# method='SLSQP',
bounds=bounds
)
self.compute_ln_prob(scipy.concatenate((res.x, [1, 1, 0, 0])), debug_plots=True)
D, V = self.eval_DV(scipy.concatenate((res.x, [1, 1, 0, 0])))
f = plt.figure()
aD = f.add_subplot(2, 1, 1)
aV = f.add_subplot(2, 1, 2)
aD.plot(self.roa_grid_DV, D)
aD.plot(self.roa_grid_DV, D_other)
aV.plot(self.roa_grid_DV, V)
aV.plot(self.roa_grid_DV, V_other)
return res
def objective_func(self, params, D_other, V_other):
"""Objective function for the minimizer in :py:meth:`find_closest_representation`.
"""
D, V = self.eval_DV(scipy.concatenate((params, [1, 1, 0, 0])))
return scipy.sqrt((scipy.concatenate((D - D_other, V - V_other))**2).sum())
@property
def working_dir(self):
"""Returns the directory name for the given settings.
"""
return 'strahl_%d_%d' % (self.shot, self.version)
@property
def ll_normalization_local(self):
"""Returns the normalization constant for the log-likelihood using local signals.
"""
if self._ll_normalization_local is None:
good_err = []
for s in self.local_signals:
if self.normalize:
good_err.extend(s.std_y_norm[~scipy.isnan(s.y_norm)].ravel())
else:
good_err.extend(s.std_y[~scipy.isnan(s.y)].ravel())
self._ll_normalization_local = (
-scipy.log(good_err).sum() - 0.5 * len(good_err) * scipy.log(2.0 * scipy.pi)
)
return self._ll_normalization_local
@property
def ll_normalization(self):
"""Returns the normalization constant for the log-likelihood.
"""
if self._ll_normalization is None:
good_err = []
for s in self.signals:
if self.normalize:
good_err.extend(s.std_y_norm[~scipy.isnan(s.y_norm)].ravel())
else:
good_err.extend(s.std_y[~scipy.isnan(s.y)].ravel())
self._ll_normalization = (
-scipy.log(good_err).sum() - 0.5 * len(good_err) * scipy.log(2 * scipy.pi)
)
return self._ll_normalization
@property
def ar_ll_normalization(self):
"""Returns the normalization constant for the log-likelihood of the Ar data.
"""
if self._ar_ll_normalization is None:
if self.normalize:
good_err = self.ar_signal.std_y_norm[~scipy.isnan(self.ar_signal.y_norm)].ravel()
else:
good_err = self.ar_signal.std_y[~scipy.isnan(self.ar_signal.y)].ravel()
self._ar_ll_normalization = (
-scipy.log(good_err).sum() - 0.5 * len(good_err) * scipy.log(2 * scipy.pi)
)
return self._ar_ll_normalization
def setup_files(self):
"""Sets up a copy of the STRAHL directory with the relevant files.
Must be run from the directory containing bayesimp.
"""
print("Setting up bayesimp...")
current_dir = os.getcwd()
# Make a copy of the master STRAHL directory:
print("Cloning master STRAHL directory...")
new_dir = os.path.join(current_dir, self.working_dir)
copy_tree(os.path.abspath('strahl'), new_dir)
print("Created %s." % (new_dir,))
# Switch to that directory to initialize the IDL side of things:
print("Running setup_strahl_run...")
os.chdir(new_dir)
if not os.path.isfile('run_data.sav'):
cmd = "idl70 <<EOF\n.compile setup_strahl_run.pro\nsetup_strahl_run, {shot}, {time_1}, {time_2}".format(
shot=self.shot,
time_1=self.time_1,
time_2=self.time_2
)
try:
cmd += ', tht={tht}'.format(tht=self.tht)
except AttributeError:
pass
try:
cmd += ', line={line}'.format(line=self.line)
except AttributeError:
pass
cmd += '\nexit\nEOF'
subprocess.call(cmd, shell=True)
else:
print("run_data.sav already in place. You may want to make sure it matches!")
print("Setup of files complete.")
def write_atomdat(self, vuv_data, li_like=None, be_like=None):
"""Write the Ca.atomdat file.
Parameters
----------
vuv_data : :py:class:`VUVData`
Class holding the XEUS and LoWEUS line information. If set to None,
explicit values can be set using the lines below.
li_like : array of int, optional
Indices in `CA_17_LINES` to use. Default is None.
be_like : array of int, optional
Indices in `CA_16_LINES` to use. Default is None.
"""
line_spec = LINE_SPEC_TEMPLATE.format(charge=18, wavelength=3.173, halfwidth=0.001)
if li_like is not None:
for idx in li_like:
line_spec += LINE_SPEC_TEMPLATE.format(
charge=17,
wavelength=CA_17_LINES[idx] * 10.0,
halfwidth=0.0001
)
if be_like is not None:
for idx in be_like:
line_spec += LINE_SPEC_TEMPLATE.format(
charge=16,
wavelength=CA_16_LINES[idx] * 10.0,
halfwidth=0.0001
)
if vuv_data is not None:
for spectrometer, s in vuv_data.vuv_lines.iteritems():
for l in s:
if l.diagnostic_lines is not None:
lam = CA_LINES[l.diagnostic_lines]
i_max = lam.argmax()
i_min = lam.argmin()
l_max = lam[i_max]
l_min = lam[i_min]
cwl = (l_max + l_min) / 2.0
halfwidth = (l_max - l_min) / 2.0 + 0.0001
if max(l.diagnostic_lines) < len(CA_17_LINES):
line_spec += LINE_SPEC_TEMPLATE.format(
charge=17,
wavelength=cwl * 10.0,
halfwidth=halfwidth * 10.0
)
else:
line_spec += LINE_SPEC_TEMPLATE.format(
charge=16,
wavelength=cwl * 10.0,
halfwidth=halfwidth * 10.0
)
with open('Ca.atomdat', 'w') as f:
f.write(
CA_ATOMDAT_TEMPLATE.format(
num_lines=len(line_spec.splitlines()),
line_spec=line_spec
)
)
def write_control(self, filepath=None, time_2_override=None):
"""Writes the strahl.control file used to automate STRAHL.
"""
if filepath is None:
filepath = 'strahl.control'
contents = (
"run_{shot:d}.0\n"
" {time_2:.3f}\n"
"E\n".format(
shot=self.shot,
time_2=self.time_2 if time_2_override is None else time_2_override
)
)
with open(filepath, 'w') as f:
f.write(contents)
return contents
def write_pp(self, sqrtpsinorm, ne, Te, t, filepath=None):
"""Write STRAHL plasma background (pp) file for the given ne, Te profiles.
At present, this is a very simplistic script that has the functionality
needed for :py:mod:`bayesimp` to run and not much else.
Does not write the neutral density or ion temperature blocks. Assumes
you have fit the WHOLE profile (i.e., uses the `interpa` option).
Parameters
----------
sqrtpsinorm : array of float, (`M`,)
The square root of normalized poloidal flux grid the profiles are
given on.
ne : array of float, (`M`,) or (`N`, `M`)
The electron density in units of 10^20 m^-3.
Te : array of float, (`M`,) or (`N`, `M`)
The electron temperature on units of keV.
t : float or array of float (`N`,)
The times the profiles are specified at. If using a single value,
this should be equal to the end time of your simulation.
filepath : str, optional
The path to write the file to. By default, nete/pp<SHOT>.0 is used.
"""
if filepath is None:
filepath = 'nete/pp{shot:d}.0'.format(shot=self.shot)
try:
iter(t)
except TypeError:
t = scipy.atleast_1d(t)
ne = scipy.atleast_2d(ne)
Te = scipy.atleast_2d(Te)
else:
t = scipy.asarray(t, dtype=float)
ne = scipy.asarray(ne, dtype=float)
Te = scipy.asarray(Te, dtype=float)
t_str = ' '.join(map(str, t))
rho_str = ' '.join(map(str, sqrtpsinorm))
ne_str = ''
for row in ne:
ne_max = row.max()
ne_str += (
str(ne_max * 1e14) + ' ' +
' '.join(map(str, row / ne_max)) + '\n'
)
Te_str = ''
for row in Te:
Te_max = row.max()
Te_str += (
str(Te_max * 1e3) + ' ' +
' '.join(map(str, row / Te_max)) + '\n'
)
contents = (
"\n"
"cv time-vector\n"
" {num_time:d}\n"
" {time_points:s}\n"
"\n"
"cv Ne-function\n"
" interpa\n"
"\n"
"\n"
"cv x-coordinate\n"
" 'poloidal rho'\n"
"\n"
"\n"
"cv # of interpolation points\n"
" {num_rho:d}\n"
"\n"
"\n"
"cv x-grid for ne-interpolation\n"
" {rho_points:s}\n"
"\n"
"\n"
"cv DATA\n"
" {ne_points:s}"
"\n"
"\n"
"cv time-vector\n"
" {num_time:d}\n"
" {time_points:s}\n"
"\n"
"cv Te-function\n"
" interpa\n"
"\n"
"\n"
"cv x-coordinate\n"
" 'poloidal rho'\n"
"\n"
"\n"
"cv # of interpolation points\n"
" {num_rho:d}\n"
"\n"
"\n"
"cv x-grid for Te-interpolation\n"
" {rho_points:s}\n"
"\n"
"\n"
"cv DATA\n"
" {Te_points:s}"
"\n"
"\n"
"cv time-vector\n"
" 0\n".format(
num_time=len(t),
time_points=t_str,
num_rho=len(sqrtpsinorm),
rho_points=rho_str,
ne_points=ne_str,
Te_points=Te_str
)
)
with open(filepath, 'w') as f:
f.write(contents)
return contents
def write_param(
self,
D_grid,
V_grid,
D,
V,
filepath=None,
compute_NC=False,
const_source=None,
element='Ca',
time_2_override=None
):
"""Write plasma param file for the given D, V profiles.
At present this is a very stripped-down version that only implements the
functionality needed for :py:mod:`bayesimp` to run.
Note that this assumes you have written the source file to the right spot.
Also note that there is a bug in STRAHL that causes it to crash if you
use more than 100 points for the D, V profiles -- so don't do that!
Parameters
----------
D_grid : array of float
The sqrtpsinorm points D is given on.
V_grid : array of float
The sqrtpsinorm points V is given on.
D : array of float
Values of D.
V : array of float
Values of V.
filepath : str, optional
The path to write the file to. If absent, param_files/run_SHOT.0 is
used.
compute_NC : bool, optional
If True, neoclassical (NEOART) transport will be computed. Default
is False.
const_source : float, optional
The constant source rate (particles/second) to use. Default is to
use a time-varying source from a file.
element : str, optional
The element (symbol) to use. Default is 'Ca'.
"""
if filepath is None:
filepath = 'param_files/run_{shot:d}.0'.format(shot=self.shot)
rho_str_D = ' '.join(map(str, D_grid))
rho_str_V = ' '.join(map(str, V_grid))
D_str = ' '.join(map(str, D))
V_str = ' '.join(map(str, V))
contents = (
" E L E M E N T\n"
"cv element atomic weight(amu) energy of neutrals(eV)\n"
" '{elsym}' {mass:.2f} 1.00\n"
"\n"
"cv main ion: atomic weight(amu) charge\n"
" 2.014 1\n"
"\n"
" G R I D - F I L E\n"
"cv shot index\n"
" {shot:d} 0\n"
"\n"
" G R I D P O I N T S A N D I T E R A T I O N\n"
"cv K number of grid points dr_center(cm) dr_edge(cm)\n"
" 6.0 100 0.3 0.1\n"
"\n"
"cv max iterations at fixed time stop iteration if change below (%)\n"
" 2000 0.001\n"
"\n"
" S T A R T C O N D I T I O N S\n"
"cv start new=0/from old calc=1 take distr. from shot at time\n"
" 0 0 0.000\n"
"\n"
"\n"
" O U T P U T\n"
"cv save all cycles = 1, save final and start distribution = 0\n"
" 1\n"
"\n"
" T I M E S T E P S\n"
"cv number of changes(start-time+....+stop-time)\n"
" {n_time_spec:d}\n"
"\n"
"cv time dt at start increase of dt after cycle steps per cycle\n"
"{time_spec:s}"
"\n"
" S O U R C E\n"
"cv position(cm) constant rate (1/s) time dependent rate from file\n"
" 90.5 {source:.5g} {from_file:d}\n"
"\n"
# MLR recommends putting -1 for each for stability:
"cv divertor puff source width in(cm) source width out(cm)\n"
" 0 -1 -1\n"
"\n"
" E D G E , R E C Y C L I N G\n"
"cv decay length of impurity outside last grid point (cm)\n"
" 1.0 \n"
"\n"
# NOTE: NTH uses different values for these, but he also appears to
# have used the exact values from the manual...
"cv Rec.:ON=1/OFF=0 wall-rec Tau-div->SOL(ms) Tau-pump(ms)\n"
" 0 0 1. 1000.\n"
"\n"
"cv SOL=width(cm)\n"
" 1.0\n"
"\n"
" D E N S I T Y, T E M P E R A T U R E, A N D N E U T R A L H Y D R O G R E N F O R C X\n"
"cv take from file with: shot index\n"
" {shot:d} 0\n"
"\n"
" N E O C L A S S I C A L T R A N S P O R T\n"
" method\n"
" 0 = off, >0 = % of Drift, 1 = approx\n"
"cv <0 = figure out, but dont use 2/3 = NEOART neoclassics for rho_pol <\n"
" {NC:d} 2 0.99\n"
"\n"
" A N A M A L O U S T R A N S P O R T\n"
"cv # of changes for transport\n"
" 1\n"
"\n"
"cv time-vector\n"
" 0.00000\n"
"\n"
"cv parallel loss times (ms)\n"
" 2.50000\n"
"cv Diffusion [m^2/s]\n"
" 'interp'\n"
"\n"
"cv # of interpolation points\n"
" {num_rho_D:d}\n"
"\n"
"cv rho_pol grid\n"
" {rho_points_D:s}\n"
"\n"
"cv Diffusion Coefficient Grid\n"
" {D_points:s}\n"
"\n"
"cv Drift function only for drift\n"
" 'interp' 'velocity'\n"
"\n"
"cv # of interpolation points\n"
" {num_rho_V:d}\n"
"\n"
"cv rho_pol grid\n"
" {rho_points_V:s}\n"
"\n"
"cv Velocity Coefficient Grid\n"
"\n"
" {V_points:s}\n"
"\n"
"cv # of sawteeth inversion radius (cm)\n"
" 0 1.00\n"
"\n"
"cv times of sawteeth\n"
" 0.00000\n".format(
elsym=element,
mass=periodictable.__dict__[element].mass,
shot=self.shot,
NC=-1 * int(compute_NC),
num_rho_D=len(D_grid),
num_rho_V=len(V_grid),
rho_points_D=rho_str_D,
rho_points_V=rho_str_V,
D_points=D_str,
V_points=V_str,
source=1e17 if const_source is None else const_source,
from_file=const_source is None,
time_spec=self.time_spec if time_2_override is None else DEFAULT_TIME_SPEC.format(time_1=self.time_1, time_2=time_2_override),
n_time_spec=len(self.time_spec.splitlines()) if time_2_override is None else 2
)
)
with open(filepath, 'w') as f:
f.write(contents)
return contents
def write_source(self, t, s):
r"""Write a STRAHL source file.
Will overwrite nete/Caflx{SHOT}.dat.
Parameters
----------
t : array of float, (`n`,)
The timebase (in seconds).
s : array of float, (`n`,)
The source function (in particles/s).
"""
contents = '%d\n' % (len(t),)
for tv, sv in zip(t, s):
contents += ' %5.5f %5.5e\n' % (tv, sv)
with open('neta/Caflx%d.dat' % (self.shot,), 'w') as f:
f.write(contents)
def compute_view_data(self, debug_plots=False, contour_axis=None, **kwargs):
"""Compute the quadrature weights to line-integrate the emission profiles.
Puts the results in the corresponding entries in signal.
Parameters
----------
debug_plots : bool, optional
If True, plots of the weights and chords will be produced. Default
is False (do not make plots).
contour_axis : axis instance, optional
If provided, plot the chords on this axis. All systems will be put
on the same axis! Default is to produce a new figure for each system.
**kwargs : additional keyword arguments, optional
All additional keyword arguments are passed to :py:meth:`DV2cs_den`
when running STRAHL to get the sqrtpsinorm grid used.
"""
# First, do a dummy run of STRAHL to get the grid. Just use random
# draws for the parameters, we just need it to run through:
sqrtpsinormgrid = self.DV2cs_den(
# self.get_prior().random_draw(),
compute_view_data=True,
**kwargs
)
# Temporary HACK:
sqrtpsinormgrid[sqrtpsinormgrid < 0] = 0.0
tokamak = TRIPPy.plasma.Tokamak(self.efit_tree)
# TODO: This is hard-coded with which diagnostic is which, should
# probably be revised!
# Handle HiReX-SR:
if self.signals[0] is not None:
rays = [TRIPPy.beam.pos2Ray(p, tokamak) for p in self.signals[0].pos]
# Handle XEUS:
if self.signals[1] is not None:
XEUS_beam = TRIPPy.beam.pos2Ray(XEUS_POS, tokamak)
# Handle LoWEUS:
if self.signals[1] is not None:
LoWEUS_beam = TRIPPy.beam.pos2Ray(LOWEUS_POS, tokamak)
# Handle HiReX-SR argon:
ar_rays = [TRIPPy.beam.pos2Ray(p, tokamak) for p in self.ar_signal.pos]
# Get xtomo beams:
if self.signals[2] is not None:
xtomo_1_beams = TRIPPy.XTOMO.XTOMO1beam(tokamak)
xtomo_3_beams = TRIPPy.XTOMO.XTOMO3beam(tokamak)
xtomo_5_beams = TRIPPy.XTOMO.XTOMO5beam(tokamak)
# fluxFourierSens returns shape (n_time, n_chord, n_quad), we just have
# one time element.
# Handle HiReX-SR:
if self.signals[0] is not None:
self.signals[0].weights = TRIPPy.invert.fluxFourierSens(
rays,
self.efit_tree.rz2psinorm,
tokamak.center,
(self.time_1 + self.time_2) / 2.0,
sqrtpsinormgrid**2.0,
ds=1e-5
)[0]
# Handle XEUS and LoWEUS:
if self.signals[1] is not None:
XEUS_weights = TRIPPy.invert.fluxFourierSens(
XEUS_beam,
self.efit_tree.rz2psinorm,
tokamak.center,
(self.time_1 + self.time_2) / 2.0,
sqrtpsinormgrid**2.0,
ds=1e-5
)[0]
LoWEUS_weights = TRIPPy.invert.fluxFourierSens(
LoWEUS_beam,
self.efit_tree.rz2psinorm,
tokamak.center,
(self.time_1 + self.time_2) / 2.0,
sqrtpsinormgrid**2.0,
ds=1e-5
)[0]
self.signals[1].weights = scipy.zeros(
(self.signals[1].y.shape[1], len(sqrtpsinormgrid))
)
for i, n in enumerate(self.signals[1].name):
if n == 'XEUS':
self.signals[1].weights[i, :] = XEUS_weights
else:
self.signals[1].weights[i, :] = LoWEUS_weights
# Handle HiReX-SR argon:
self.ar_signal.weights = TRIPPy.invert.fluxFourierSens(
ar_rays,
self.efit_tree.rz2psinorm,
tokamak.center,
(self.time_1 + self.time_2) / 2.0,
sqrtpsinormgrid**2.0,
ds=1e-5
)[0]
# Handle XTOMO:
if self.signals[2] is not None:
xtomo_weights = {}
xtomo_weights[1] = TRIPPy.invert.fluxFourierSens(
xtomo_1_beams,
self.efit_tree.rz2psinorm,
tokamak.center,
(self.time_1 + self.time_2) / 2.0,
sqrtpsinormgrid**2.0,
ds=1e-5
)[0]
xtomo_weights[3] = TRIPPy.invert.fluxFourierSens(
xtomo_3_beams,
self.efit_tree.rz2psinorm,
tokamak.center,
(self.time_1 + self.time_2) / 2.0,
sqrtpsinormgrid**2.0,
ds=1e-5
)[0]
xtomo_weights[5] = TRIPPy.invert.fluxFourierSens(
xtomo_5_beams,
self.efit_tree.rz2psinorm,
tokamak.center,
(self.time_1 + self.time_2) / 2.0,
sqrtpsinormgrid**2.0,
ds=1e-5
)[0]
self.signals[2].weights = scipy.zeros(
(self.signals[2].y.shape[1], len(sqrtpsinormgrid))
)
for i, b in enumerate(self.signals[2].blocks):
self.signals[2].weights[i, :] = xtomo_weights[b][self.signals[2].weight_idxs[i], :]
if debug_plots:
i_flux = profiletools.get_nearest_idx(
(self.time_1 + self.time_2) / 2.0,
self.efit_tree.getTimeBase()
)
color_vals = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
style_vals = ['-', '--', '-.', ':']
ls_vals = []
for s in style_vals:
for c in color_vals:
ls_vals.append(c + s)
ls_cycle = itertools.cycle(ls_vals)
if self.signals[0] is not None:
f = plt.figure()
a = f.add_subplot(1, 1, 1)
for w in self.signals[0].weights:
a.plot(sqrtpsinormgrid**2, w, ls_cycle.next())
a.set_xlabel(r"$\psi_n$")
a.set_ylabel("quadrature weights")
a.set_title("HiReX-SR Calcium")
if self.signals[1] is not None:
f = plt.figure()
a = f.add_subplot(1, 1, 1)
for w in self.signals[1].weights:
a.plot(sqrtpsinormgrid**2, w, ls_cycle.next())
a.set_xlabel(r'$\psi_n$')
a.set_ylabel('quadrature weights')
a.set_title('VUV')
ls_cycle = itertools.cycle(ls_vals)
vuv_cycle = itertools.cycle(['b', 'g'])
from TRIPPy.plot.pyplot import plotTokamak, plotLine
if contour_axis is None:
f = plt.figure()
a = f.add_subplot(1, 1, 1)
# Only plot the tokamak if an axis was not provided:
plotTokamak(tokamak)
else:
a = contour_axis
plt.sca(a)
# Plot VUV in different color:
if self.signals[0] is not None:
for r in rays:
plotLine(r, pargs='r')#ls_cycle.next())
if self.signals[1] is not None:
plotLine(XEUS_beam, pargs=vuv_cycle.next(), lw=3)
plotLine(LoWEUS_beam, pargs=vuv_cycle.next(), lw=3)
if contour_axis is None:
a.contour(
self.efit_tree.getRGrid(),
self.efit_tree.getZGrid(),
self.efit_tree.getFluxGrid()[i_flux, :, :],
50
)
a.set_title("HiReX-SR, VUV")
# Do it over again for Ar:
ls_cycle = itertools.cycle(ls_vals)
f = plt.figure()
a = f.add_subplot(1, 1, 1)
for w in self.ar_signal.weights:
a.plot(sqrtpsinormgrid**2, w, ls_cycle.next())
a.set_xlabel(r"$\psi_n$")
a.set_ylabel("quadrature weights")
a.set_title("HiReX-SR, argon")
ls_cycle = itertools.cycle(ls_vals)
if contour_axis is None:
f = plt.figure()
a = f.add_subplot(1, 1, 1)
# Only plot the tokamak if an axis was not provided:
plotTokamak(tokamak)
else:
a = contour_axis
plt.sca(a)
for r in ar_rays:
plotLine(r, pargs=ls_cycle.next())
if contour_axis is None:
a.contour(
self.efit_tree.getRGrid(),
self.efit_tree.getZGrid(),
self.efit_tree.getFluxGrid()[i_flux, :, :],
50
)
a.set_title("Argon")
if self.signals[2] is not None:
# And for XTOMO 1:
ls_cycle = itertools.cycle(ls_vals)
f = plt.figure()
a = f.add_subplot(1, 1, 1)
for w in xtomo_weights[1]:
a.plot(sqrtpsinormgrid**2, w, ls_cycle.next())
a.set_xlabel(r"$\psi_n$")
a.set_ylabel("quadrature weights")
a.set_title("XTOMO 1")
ls_cycle = itertools.cycle(ls_vals)
if contour_axis is None:
f = plt.figure()
a = f.add_subplot(1, 1, 1)
# Only plot the tokamak if an axis was not provided:
plotTokamak(tokamak)
else:
a = contour_axis
plt.sca(a)
for r in xtomo_1_beams:
plotLine(r, pargs='r')#ls_cycle.next())
if contour_axis is None:
a.contour(
self.efit_tree.getRGrid(),
self.efit_tree.getZGrid(),
self.efit_tree.getFluxGrid()[i_flux, :, :],
50
)
a.set_title("XTOMO 1")
# And for XTOMO 3:
ls_cycle = itertools.cycle(ls_vals)
f = plt.figure()
a = f.add_subplot(1, 1, 1)
for w in xtomo_weights[3]:
a.plot(sqrtpsinormgrid**2, w, ls_cycle.next())
a.set_xlabel(r"$\psi_n$")
a.set_ylabel("quadrature weights")
a.set_title("XTOMO 3")
ls_cycle = itertools.cycle(ls_vals)
if contour_axis is None:
f = plt.figure()
a = f.add_subplot(1, 1, 1)
# Only plot the tokamak if an axis was not provided:
plotTokamak(tokamak)
else:
a = contour_axis
plt.sca(a)
for r in xtomo_3_beams:
plotLine(r, pargs='r')#ls_cycle.next())
if contour_axis is None:
a.contour(
self.efit_tree.getRGrid(),
self.efit_tree.getZGrid(),
self.efit_tree.getFluxGrid()[i_flux, :, :],
50
)
a.set_title("XTOMO 3")
# And for XTOMO 5:
ls_cycle = itertools.cycle(ls_vals)
f = plt.figure()
a = f.add_subplot(1, 1, 1)
for w in xtomo_weights[5]:
a.plot(sqrtpsinormgrid**2, w, ls_cycle.next())
a.set_xlabel(r"$\psi_n$")
a.set_ylabel("quadrature weights")
a.set_title("XTOMO 5")
ls_cycle = itertools.cycle(ls_vals)
if contour_axis is None:
f = plt.figure()
a = f.add_subplot(1, 1, 1)
# Only plot the tokamak if an axis was not provided:
plotTokamak(tokamak)
else:
a = contour_axis
plt.sca(a)
for r in xtomo_5_beams:
plotLine(r, pargs='r')#ls_cycle.next())
if contour_axis is None:
a.contour(
self.efit_tree.getRGrid(),
self.efit_tree.getZGrid(),
self.efit_tree.getFluxGrid()[i_flux, :, :],
50
)
a.set_title("XTOMO 5")
print("Done finding view data!")
def load_PEC(self):
"""Load the photon emissivity coefficients from the ADF15 files.
"""
self._PEC = {}
atom_dir = 'atomdat/adf15/ca'
for p in os.listdir(atom_dir):
if p[0] == '.':
continue
res = re.split(".*ca([0-9]+)\.dat", os.path.basename(p))
self._PEC[int(res[1])] = read_ADF15(os.path.join('atomdat/adf15/ca', p))
#, debug_plots=[3.173, 19.775, 19.79]
@property
def PEC(self):
"""Reload the photon emissivity coefficients from the ADF15 files only as needed.
"""
if self._PEC is None:
self.load_PEC()
return self._PEC
def load_Ar_PEC(self, use_ADAS=False, debug_plots=False):
"""Load the photon emissivity coefficients from the ADF15 files.
"""
self._Ar_PEC = {}
if use_ADAS:
self._Ar_PEC[16] = read_ADF15(
'atomdat/adf15/ar/fpk#ar16.dat',
debug_plots=[4.0,] if debug_plots else []
)
else:
f = scipy.io.readsav('../ar_rates.sav')
Te = scipy.asarray(f.Te, dtype=float) * 1e3
exc = scipy.asarray(f.exc, dtype=float)
rec = scipy.asarray(f.rec, dtype=float)
ion = scipy.asarray(f.ion, dtype=float)
# Excitation:
self._Ar_PEC[16] = {
4.0: [scipy.interpolate.InterpolatedUnivariateSpline(scipy.log10(Te), exc)]
}
# Recombination:
self._Ar_PEC[17] = {
4.0: [scipy.interpolate.InterpolatedUnivariateSpline(scipy.log10(Te), rec)]
}
# Ionization:
self._Ar_PEC[15] = {
4.0: [scipy.interpolate.InterpolatedUnivariateSpline(scipy.log10(Te), ion)]
}
if debug_plots:
f = plt.figure()
a = f.add_subplot(1, 1, 1)
a.plot(Te, exc, '.', label='exc')
a.plot(Te, rec, '.', label='rec')
a.plot(Te, ion, '.', label='ion')
a.set_xlabel('$T_e$ [eV]')
a.set_ylabel('PEC')
a.legend(loc='best')
@property
def Ar_PEC(self):
"""Reload the photon emissivity coefficients from the ADF15 files only as needed.
"""
if self._Ar_PEC is None:
self.load_Ar_PEC()
return self._Ar_PEC
def __getstate__(self):
"""Pitch the PEC's while loading because scipy.interpolate is stupid and not pickleable.
"""
self._PEC = None
self._Ar_PEC = None
return self.__dict__
def assemble_surrogate(self, stub, thresh=None):
"""Assemble a GP surrogate model from files with names stub_*.pkl.
Returns a :py:class:`GaussianProcess` instance, trained with the data.
"""
bounds = [(0, 1e7),] + [(0, r[1] - r[0]) for r in self.get_prior().bounds]
k = gptools.SquaredExponentialKernel(
num_dim=len(bounds) - 1,
param_bounds=bounds,
initial_params=[(b[0] + b[1]) / 4.0 for b in bounds]
)
gp = gptools.GaussianProcess(k)
files = glob.glob(stub + '*.pkl')
for fn in files:
with open(fn, 'rb') as f:
d = pkl.load(f)
params = scipy.asarray(d['params'])
lp = scipy.asarray(d['lp'])
mask = (~scipy.isinf(lp)) & (~scipy.isnan(lp))
if thresh is not None:
mask = mask & (lp >= thresh)
params = params[mask]
lp = lp[mask]
gp.add_data(params, lp)
return gp
def sample_surrogate(
self,
gp,
nsamp,
burn=None,
num_proc=None,
nwalkers=None,
pool=None,
**sampler_kwargs
):
"""Run MCMC on the GP surrogate.
"""
# Make sure this has been run *before* sending out to nodes:
# gp.compute_K_L_alpha_ll()
ndim = gp.num_dim
if burn is None:
burn = nsamp // 2
if num_proc is None:
if pool is not None:
num_proc = pool._processes
else:
num_proc = multiprocessing.cpu_count()
if nwalkers is None:
nwalkers = num_proc * ndim * 2
if num_proc > 1 and pool is None:
pool = InterruptiblePool(processes=num_proc)
sampler = emcee.EnsembleSampler(
nwalkers,
ndim,
_CallGPWrapper(gp),
pool=pool,
kwargs={'return_std': False},
**sampler_kwargs
)
# Construct the initial points for the sampler:
y_sort = gp.y.argsort()[::-1]
X_sorted = gp.X[y_sort]
theta0 = X_sorted[0:nwalkers]
print("Starting MCMC sampler...this will take a while.")
try:
subprocess.call('fortune -a | cowsay -f vader-koala', shell=True)
except:
pass
t_start = time_.time()
sampler.run_mcmc(theta0, nsamp)
t_elapsed = time_.time() - t_start
print("MCMC sampler done, elapsed time is %.2fs." % (t_elapsed,))
return sampler
def analyze_envelopes(self, gp, max_include):
"""Make plots of the envelopes of parameters
"""
sort_arg = gp.y.argsort()[::-1]
y = gp.y[sort_arg[:max_include]]
X = gp.X[sort_arg[:max_include]]
X_mins = scipy.zeros_like(X)
X_maxs = scipy.zeros_like(X)
idxs = xrange(1, max_include)
for k in idxs:
X_mins[k] = X[:k].min(axis=0)
X_maxs[k] = X[:k].max(axis=0)
X_mins = X_mins[1:]
X_maxs = X_maxs[1:]
for k, l in zip(xrange(0, X.shape[1]), self.get_labels()):
f = plt.figure()
a = f.add_subplot(1, 1, 1)
a.plot(idxs, X_mins[:, k], 'b')
a.plot(idxs, X_maxs[:, k], 'b')
a.set_title(l)
def read_surrogate(self, stub, num_proc=None):
"""Attempt to construct an importance sampling estimate from the surrogate samples.
DOESN'T WORK!
"""
lp = None
ll = None
params = None
files = glob.glob(stub + '*.pkl')
for fn in files:
with open(fn, 'rb') as f:
d = pkl.load(f)
if params is None:
params = scipy.asarray(d['params'])
lp = scipy.asarray(d['lp'])
ll = scipy.asarray(d['ll'])
else:
params = scipy.vstack((params, d['params']))
lp = scipy.concatenate((lp, d['lp']))
ll = scipy.concatenate((ll, d['ll']))
mask = (~scipy.isinf(lp)) & (~scipy.isnan(lp))
params = params[mask]
lp = lp[mask]
ll = ll[mask]
if num_proc is None:
num_proc = multiprocessing.cpu_count()
pool = InterruptiblePool(processes=num_proc)
try:
DV = pool.map(_ComputeProfileWrapper(self), params)
finally:
pool.close()
DV = scipy.asarray(DV)
D = DV[:, 0, :]
V = DV[:, 1, :]
lprior = lp - ll
# Compute self-normalized importance sampling weights:
lw = lp - lprior
lw = lw - scipy.misc.logsumexp(lw)
# Compute mean profile:
# THIS DOESN'T WORK -- THE WEIGHTS ARE ALL CONCENTRATED ON ONE PROFILE.
return lw
def plot_surrogate_samples(self, gp):
"""Plots the samples from the surrogate.
The alpha is chosen based on the log-posterior.
"""
f = plt.figure()
a_D = f.add_subplot(2, 1, 1)
a_V = f.add_subplot(2, 1, 2, sharex=a_D)
a_V.set_xlabel('$r/a$')
a_D.set_ylabel('$D$ [m$^2$/s]')
a_V.set_ylabel('$V$ [m/s]')
max_lp = gp.y.max()
min_lp = gp.y.min()
for lp, p in zip(gp.y, gp.X):
D, V = self.eval_DV(p)
a_D.plot(self.roa_grid_DV, D, 'k', alpha=0.1 * (lp - min_lp) / (max_lp - min_lp))
a_V.plot(self.roa_grid_DV, V, 'k', alpha=0.1 * (lp - min_lp) / (max_lp - min_lp))
class RunData(object):
"""Class to store the run data (both raw and edited).
Assumes the current directory contains the IDL save file "run_data.sav".
Performs the following operations:
* Launches gpfit for the user to fit the Te profile.
* Launches gpfit for the user to fit the ne profile.
Parameters
----------
settings : :py:class:`Run` instance
The imported settings to use.
"""
def __init__(self, shot, time_1, time_2, roa_grid, Te_args, ne_args):
self.shot = shot
self.roa_grid = roa_grid
self.time_1 = time_1
self.time_2 = time_2
self.Te_args = Te_args
self.ne_args = ne_args
# Load the ne, Te data:
print("Loading Te data...")
self.load_Te()
print("Loading ne data...")
self.load_ne()
print("Loading and processing of run data complete.")
def load_Te(self):
"""Load and fit the Te data using gpfit.
"""
self.Te_X, self.Te_res, self.Te_p = self.load_prof('Te', self.Te_args)
def load_ne(self):
"""Load and fit the ne data using gpfit.
"""
self.ne_X, self.ne_res, self.ne_p = self.load_prof('ne', self.ne_args)
def load_prof(self, prof, flags):
"""Load the specified profile using gpfit.
Parameters
----------
prof : {'ne', 'Te'}
The profile to fit.
flags : list of str
The command line flags to pass to gpfit. Must not contain --signal,
--shot, --t-min, --t-max or --coordinate.
"""
print(
"Use gpfit to fit the %s profile. When the profile has been fit to "
"your liking, press the 'exit' button." % (prof,)
)
argv = [
'--signal', prof,
'--shot', str(self.shot),
'--t-min', str(self.time_1),
'--t-max', str(self.time_2),
'--coordinate', 'r/a',
'--no-a-over-L',
'--x-pts'
]
argv += [str(x) for x in self.roa_grid]
argv += flags
return profiletools.gui.run_gui(argv=argv)
def plot_eigenvalue_spectrum(self, thresh=0.1):
"""Plot the eigenvalue spectra of the ne, Te fits.
This is used to select a cutoff eigenvalue.
"""
eig_ne, Q_ne = scipy.linalg.eigh(self.ne_res['cov'])
eig_Te, Q_Te = scipy.linalg.eigh(self.Te_res['cov'])
num_eig_ne = (scipy.sqrt(eig_ne[::-1] / eig_ne.max()) >= thresh).sum()
num_eig_Te = (scipy.sqrt(eig_Te[::-1] / eig_Te.max()) >= thresh).sum()
f = plt.figure()
a_ne = f.add_subplot(2, 1, 1)
a_ne.plot(scipy.sqrt(eig_ne[::-1] / eig_ne.max()), 'o-')
a_ne.axhline(thresh)
a_ne.set_xlabel('number')
a_ne.set_ylabel(r'$\sqrt{\lambda / \lambda_0}$')
a_ne.set_title(r"$n_{\mathrm{e}}$")
a_Te = f.add_subplot(2, 1, 2)
a_Te.plot(scipy.sqrt(eig_Te[::-1] / eig_Te.max()), 'o-')
a_Te.axhline(thresh)
a_Te.set_xlabel('number')
a_Te.set_ylabel(r'$\sqrt{\lambda / \lambda_0}$')
a_Te.set_title(r"$T_{\mathrm{e}}$")
return num_eig_ne, num_eig_Te
class TruthData(object):
"""Class to hold the truth values for synthetic data.
"""
def __init__(
self,
params_true,
cs_den,
time,
sqrtpsinorm,
explicit_D=None,
explicit_D_grid=None,
explicit_V=None,
explicit_V_grid=None,
dlines=None,
sig_abs=None,
sig_norm=None,
cs_den_ar=None,
dlines_ar=None,
sig_abs_ar=None,
sig_norm_ar=None,
time_ar=None
):
self.params_true = params_true
self.cs_den = cs_den
self.dlines = dlines
self.sig_abs = sig_abs
self.sig_norm = sig_norm
self.time = time
self.sqrtpsinorm = sqrtpsinorm
self.cs_den_ar = cs_den_ar
self.dlines_ar = dlines_ar
self.sig_abs_ar = sig_abs_ar
self.sig_norm_ar = sig_norm_ar
self.time_ar = time_ar
self.explicit_D = explicit_D
self.explicit_D_grid = explicit_D_grid
self.explicit_V = explicit_V
self.explicit_V_grid = explicit_V_grid
class Injection(object):
"""Class to store information on a given injection.
"""
def __init__(self, t_inj, t_start, t_stop):
self.t_inj = t_inj
self.t_start = t_start
self.t_stop = t_stop
class Signal(object):
def __init__(self, y, std_y, y_norm, std_y_norm, t, name, atomdat_idx, pos=None, sqrtpsinorm=None, weights=None, blocks=0):
"""Class to store the data from a given diagnostic.
In the parameter descriptions, `n` is the number of signals (both
spatial and temporal) contained in the instance.
Parameters
----------
y : array, (`n_time`, `n`)
The unnormalized, baseline-subtracted data as a function of time and
space. If `pos` is not None, "space" refers to the chords. Wherever
there is a bad point, it should be set to NaN.
std_y : array, (`n_time`, `n`)
The uncertainty in the unnormalized, baseline-subtracted data as a
function of time and space.
y_norm : array, (`n_time`, `n`)
The normalized, baseline-subtracted data.
std_y_norm : array, (`n_time`, `n`)
The uncertainty in the normalized, baseline-subtracted data.
t : array, (`n_time`,)
The time vector of the data.
name : str
The name of the signal.
atomdat_idx : int or array of int, (`n`,)
The index or indices of the signals in the atomdat file. If a single
value is given, it is used for all of the signals. If a 1d array is
provided, these are the indices for each of the signals in `y`. If
`atomdat_idx` (or one of its entries) is -1, it will be treated as
an SXR measurement.
pos : array, (4,) or (`n`, 4), optional
The POS vector(s) for line-integrated data. If not present, the data
are assumed to be local measurements at the locations in
`sqrtpsinorm`. If a 1d array is provided, it is used for all of the
chords in `y`. Otherwise, there must be one pos vector for each of
the chords in `y`.
sqrtpsinorm : array, (`n`,), optional
The square root of poloidal flux grid the (local) measurements are
given on. If line-integrated measurements with the standard STRAHL
grid for their quadrature points are to be used this should be left
as None.
weights : array, (`n`, `n_quadrature`), optional
The quadrature weights to use. This can be left as None for a local
measurement or can be set later.
blocks : int or array of int, (`n`), optional
A set of flags indicating which channels in the :py:class:`Signal`
should be treated together as a block when normalizing. If a single
int is given, all of the channels will be taken together. Otherwise,
any channels sharing the same block number will be taken together.
"""
self.y = scipy.asarray(y, dtype=float)
if self.y.ndim != 2:
raise ValueError("y must have two dimensions!")
self.std_y = scipy.asarray(std_y, dtype=float)
if self.y.shape != self.std_y.shape:
raise ValueError("The shapes of y and std_y must match!")
self.y_norm = scipy.asarray(y_norm, dtype=float)
if self.y.shape != self.y_norm.shape:
raise ValueError("The shapes of y and y_norm must match!")
self.std_y_norm = scipy.asarray(std_y_norm, dtype=float)
if self.std_y_norm.shape != self.y.shape:
raise ValueError("The shapes of y and std_y_norm must match!")
self.t = scipy.asarray(t, dtype=float)
if self.t.ndim != 1:
raise ValueError("t must have one dimension!")
if len(self.t) != self.y.shape[0]:
raise ValueError("The length of t must equal the length of the leading dimension of y!")
if isinstance(name, str):
name = [name,] * self.y.shape[1]
self.name = name
try:
iter(atomdat_idx)
except TypeError:
self.atomdat_idx = atomdat_idx * scipy.ones(self.y.shape[1], dtype=int)
else:
self.atomdat_idx = scipy.asarray(atomdat_idx, dtype=int)
if self.atomdat_idx.ndim != 1:
raise ValueError("atomdat_idx must have at most one dimension!")
if len(self.atomdat_idx) != self.y.shape[1]:
raise ValueError("1d atomdat_idx must have the same number of elements as the second dimension of y!")
if pos is not None:
pos = scipy.asarray(pos, dtype=float)
if pos.ndim not in (1, 2):
raise ValueError("pos must have one or two dimensions!")
if pos.ndim == 1 and len(pos) != 4:
raise ValueError("pos must have 4 elements!")
if pos.ndim == 2 and (pos.shape[0] != self.y.shape[1] or pos.shape[1] != 4):
raise ValueError("pos must have shape (n, 4)!")
self.pos = pos
self.sqrtpsinorm = sqrtpsinorm
self.weights = weights
try:
iter(blocks)
except TypeError:
self.blocks = blocks * scipy.ones(self.y.shape[1], dtype=int)
else:
self.blocks = scipy.asarray(blocks, dtype=int)
if self.blocks.ndim != 1:
raise ValueError("blocks must have at most one dimension!")
if len(self.blocks) != self.y.shape[1]:
raise ValueError("1d blocks must have the same number of elements as the second dimension of y!")
def sort_t(self):
"""Sort the time axis.
"""
srt = self.t.argsort()
self.t = self.t[srt]
self.y = self.y[srt, :]
self.std_y = self.std_y[srt, :]
self.y_norm = self.y_norm[srt, :]
self.std_y_norm = self.std_y_norm[srt, :]
def plot_data(self, norm=False, f=None, share_y=False, y_label='$b$ [AU]',
max_ticks=None, rot_label=False, fast=False, ncol=6):
"""Make a big plot with all of the data.
Parameters
----------
norm : bool, optional
If True, plot the normalized data. Default is False (plot
unnormalized data).
f : :py:class:`Figure`, optional
The figure instance to make the subplots in. If not provided, a
figure will be created.
share_y : bool, optional
If True, the y axes of all of the subplots will have the same scale.
Default is False (each y axis is automatically scaled individually).
y_label : str, optional
The label to use for the y axes. Default is '$b$ [AU]'.
max_ticks : int, optional
The maximum number of ticks on the x and y axes. Default is no limit.
rot_label : bool, optional
If True, the x axis labels will be rotated 90 degrees. Default is
False (do not rotate).
fast : bool, optional
If True, errorbars will not be drawn in order to make the plotting
faster. Default is False
ncol : int, optional
The number of columns to use. Default is 6.
"""
if norm:
y = self.y_norm
std_y = self.std_y_norm
else:
y = self.y
std_y = self.std_y
if f is None:
f = plt.figure()
ncol = int(min(ncol, self.y.shape[1]))
nrow = int(scipy.ceil(1.0 * self.y.shape[1] / ncol))
gs = mplgs.GridSpec(nrow, ncol)
a = []
i_col = 0
i_row = 0
for k in xrange(0, self.y.shape[1]):
a.append(
f.add_subplot(
gs[i_row, i_col],
sharex=a[0] if len(a) >= 1 else None,
sharey=a[0] if len(a) >= 1 and share_y else None
)
)
if i_col > 0 and share_y:
plt.setp(a[-1].get_yticklabels(), visible=False)
else:
a[-1].set_ylabel(y_label)
if i_row < nrow - 2 or (i_row == nrow - 2 and i_col < self.y.shape[1] % (nrow - 1)):
plt.setp(a[-1].get_xticklabels(), visible=False)
else:
a[-1].set_xlabel('$t$ [s]')
if rot_label:
plt.setp(a[-1].xaxis.get_majorticklabels(), rotation=90)
i_col += 1
if i_col >= ncol:
i_col = 0
i_row += 1
a[-1].set_title('%s, %d' % (self.name[k], k))
good = ~scipy.isnan(self.y[:, k])
if fast:
a[-1].plot(self.t[good], y[good, k], '.')
else:
a[-1].errorbar(self.t[good], y[good, k], yerr=std_y[good, k], fmt='.')
if max_ticks is not None:
a[-1].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_ticks - 1))
a[-1].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_ticks - 1))
if share_y:
a[0].set_ylim(bottom=0.0)
a[0].set_xlim(self.t.min(), self.t.max())
f.canvas.draw()
return (f, a)
class LocalSignal(Signal):
"""Class to store local charge state density measurements.
Parameters
----------
y : array of float, (`n_time`, `n_space`)
"""
def __init__(self, y, std_y, y_norm, std_y_norm, t, sqrtpsinorm, cs_den_idx):
self.y = scipy.asarray(y, dtype=float)
if y.ndim != 2:
raise ValueError("y must have exactly 2 dimensions!")
self.std_y = scipy.asarray(std_y, dtype=float)
if self.std_y.shape != self.y.shape:
raise ValueError("std_y must have the same shape as y!")
self.y_norm = scipy.asarray(y_norm, dtype=float)
if self.y_norm.shape != self.y.shape:
raise ValueError("y_norm must have the same shape as y!")
self.std_y_norm = scipy.asarray(std_y_norm, dtype=float)
if self.std_y_norm.shape != self.y.shape:
raise ValueError("std_y_norm must have the same shape as y!")
self.sqrtpsinorm = scipy.asarray(sqrtpsinorm, dtype=float)
if self.sqrtpsinorm.ndim != 1:
raise ValueError("sqrtpsinorm must have exactly 1 dimension!")
if self.y.shape[1] != len(self.sqrtpsinorm):
raise ValueError("Length of sqrtpsinorm must equal self.y.shape[1]!")
self.t = scipy.asarray(t, dtype=float)
if self.t.ndim != 1:
raise ValueError("t must have exactly 1 dimension!")
if self.y.shape[0] != len(self.t):
raise ValueError("Length of t must equal self.y.shape[0]!")
self.cs_den_idx = cs_den_idx
@property
def name(self):
if self.cs_den_idx is None:
return [r'local $n_Z$, $\sqrt{\psi_{\mathrm{n}}}=%.1f$' % (rho,) for rho in self.sqrtpsinorm]
else:
return [r'local $n_{Z,%d}$, $\sqrt{\psi_{\mathrm{n}}}=%.1f$' % (self.cs_den_idx, rho) for rho in self.sqrtpsinorm]
class HirexData(object):
"""Helper object to load and process the HiReX-SR data.
Can load either the Ca or the Ar data.
The sequence of operations is as follows:
* Load the relevant data from 'run_data.sav'.
* Launch a GUI to flag/unflag possible outliers.
* If requested, plot the raw data.
* Parcel the data into injections, normalize and combine.
* Generate a :py:class:`Signal` containing the data. This is stored in the
attribute :py:attr:`self.signal` for later use.
Parameters
----------
injections : list of :py:class:`Injection`
The injections the data are to be grouped into.
ar : bool, optional
If True, the argon data are loaded. Otherwise the Ca data are loaded.
Default is False (load Ca data).
debug_plots : bool, optional
If True, plots are made. Default is False.
"""
def __init__(self, injections, ar=False, debug_plots=False):
data = scipy.io.readsav('run_data.sav')
if ar:
self.hirex_signal = scipy.asarray(data.ar_data.srsignal[0], dtype=float)
self.hirex_uncertainty = scipy.asarray(data.ar_data.srerr[0], dtype=float)
self.hirex_pos = scipy.asarray(data.ar_data.pos[0], dtype=float)
self.hirex_time = scipy.asarray(data.ar_data.t[0], dtype=float)
self.hirex_tht = data.ar_data.tht[0]
self.hirex_line = data.ar_data.line[0]
else:
self.hirex_signal = scipy.asarray(data.hirex_data.srsignal[0], dtype=float)
self.hirex_uncertainty = scipy.asarray(data.hirex_data.srerr[0], dtype=float)
self.hirex_pos = scipy.asarray(data.hirex_data.pos[0], dtype=float)
self.hirex_time = scipy.asarray(data.hirex_data.t[0], dtype=float)
self.hirex_tht = data.hirex_data.tht[0]
self.hirex_line = data.hirex_data.line[0]
self.shot = data.shot
self.time_1 = data.time_1
self.time_2 = data.time_2
self.hirex_flagged = (
(self.hirex_uncertainty > HIREX_THRESH) |
(self.hirex_uncertainty == 0.0)
)
# Flag bad points:
root = HirexWindow(self, ar=ar)
root.mainloop()
if debug_plots:
f = self.plot_data()
# Process the injections:
if not ar:
t = []
y = []
std_y = []
y_norm = []
std_y_norm = []
for k, i in enumerate(injections):
t_hirex_start, t_hirex_stop = profiletools.get_nearest_idx(
[i.t_start, i.t_stop],
self.hirex_time
)
hirex_signal = self.hirex_signal[t_hirex_start:t_hirex_stop + 1, :]
hirex_flagged = self.hirex_flagged[t_hirex_start:t_hirex_stop + 1, :]
hirex_uncertainty = self.hirex_uncertainty[t_hirex_start:t_hirex_stop + 1, :]
hirex_time = self.hirex_time[t_hirex_start:t_hirex_stop + 1] - i.t_inj
# Normalize to the brightest interpolated max on the brightest
# chord:
maxs = scipy.zeros(hirex_signal.shape[1])
s_maxs = scipy.zeros_like(maxs)
for j in xrange(0, hirex_signal.shape[1]):
good = ~hirex_flagged[:, j]
maxs[j], s_maxs[j] = interp_max(
hirex_time[good],
hirex_signal[good, j],
err_y=hirex_uncertainty[good, j],
debug_plots=debug_plots,
method='GP'
)
i_max = maxs.argmax()
m = maxs[i_max]
s = s_maxs[i_max]
hirex_signal[hirex_flagged] = scipy.nan
t.append(hirex_time)
y.append(hirex_signal)
std_y.append(hirex_uncertainty)
y_norm.append(hirex_signal / m)
std_y_norm.append(scipy.sqrt((hirex_uncertainty / m)**2.0 + (s / m)**2.0))
self.signal = Signal(
scipy.vstack(y),
scipy.vstack(std_y),
scipy.vstack(y_norm),
scipy.vstack(std_y_norm),
scipy.hstack(t),
'HiReX-SR',
0,
pos=self.hirex_pos
)
else:
t_mask = (self.hirex_time >= self.time_1) & (self.hirex_time <= self.time_2)
self.hirex_signal = self.hirex_signal[t_mask, :]
self.hirex_uncertainty = self.hirex_uncertainty[t_mask, :]
self.hirex_time = self.hirex_time[t_mask]
self.hirex_flagged = self.hirex_flagged[t_mask, :]
self.hirex_signal[self.hirex_flagged] = scipy.nan
normalization = scipy.nanmax(self.hirex_signal)
self.signal = Signal(
self.hirex_signal,
self.hirex_uncertainty,
self.hirex_signal / normalization,
self.hirex_uncertainty / normalization,
self.hirex_time,
'HiReX-SR (Ar)',
0,
pos=self.hirex_pos
)
def plot_data(self, z_max=None):
"""Make a 3d scatterplot of the data.
Parameters
----------
z_max : float, optional
The maximum value for the z axis. Default is None (no limit).
norm : bool, optional
If True, plot the normalized, combined data. Default is False (plot
the unnormalized, raw data).
"""
f = plt.figure()
a = f.add_subplot(1, 1, 1, projection='3d')
t = self.hirex_time
keep = ~(self.hirex_flagged.ravel())
signal = self.hirex_signal
uncertainty = self.hirex_uncertainty
CHAN, T = scipy.meshgrid(range(0, signal.shape[1]), t)
profiletools.errorbar3d(
a,
T.ravel()[keep],
CHAN.ravel()[keep],
signal.ravel()[keep],
zerr=uncertainty.ravel()[keep]
)
a.set_zlim(0, z_max)
a.set_xlabel('$t$ [s]')
a.set_ylabel('channel')
a.set_zlabel('HiReX-SR signal [AU]')
return f
class VUVData(object):
"""Helper object to load and process the VUV data.
Execution proceeds as follows:
* Loads the XEUS data.
* Allows user to select lines, background subtraction intervals.
* Loads the LoWEUS data.
* Allows the user to select lines, background subtraction intervals.
* Computes the normalization factors.
* Loads the data into a :py:class:`Signal` instance. This is stored in the
attribute :py:attr:`signal` for later use.
"""
def __init__(self, shot, injections, debug_plots=False):
self.shot = shot
self.vuv_lines = collections.OrderedDict()
self.vuv_signal = {}
self.vuv_time = {}
self.vuv_lam = {}
self.vuv_uncertainty = {}
self.load_vuv('XEUS')
try:
self.load_vuv('LoWEUS')
except:
print("Could not load LoWEUS data.")
t = []
y = []
std_y = []
y_norm = []
std_y_norm = []
for k, i in enumerate(injections):
vuv_signals = []
vuv_uncertainties = []
vuv_times = []
for s in self.vuv_lines.keys():
i_start, i_stop = profiletools.get_nearest_idx(
[i.t_start, i.t_stop],
self.vuv_time[s]
)
for l in self.vuv_lines[s]:
if l.diagnostic_lines is not None:
vuv_signals.append(
l.signal[i_start:i_stop + 1]
)
vuv_uncertainties.append(
l.uncertainty[i_start:i_stop + 1]
)
vuv_times.append(
self.vuv_time[s][i_start:i_stop + 1] - i.t_inj
)
vuv_signals = scipy.asarray(vuv_signals)
vuv_uncertainties = scipy.asarray(vuv_uncertainties)
vuv_times = scipy.asarray(vuv_times)
# We don't have a brightness cal for XEUS or LoWEUS, so normalize to
# the peak:
vuv_signals_norm = scipy.nan * scipy.zeros_like(vuv_signals)
vuv_uncertainties_norm = scipy.nan * scipy.zeros_like(vuv_uncertainties)
for j in xrange(0, vuv_signals.shape[0]):
m, s = interp_max(
vuv_times[j, :],
vuv_signals[j, :],
err_y=vuv_uncertainties[j, :],
debug_plots=debug_plots,
s_max=100.0
)
vuv_signals_norm[j, :] = vuv_signals[j, :] / m
vuv_uncertainties_norm[j, :] = (
scipy.sqrt(
(vuv_uncertainties[j, :] / m)**2.0 + (s / m)**2.0
)
)
# Assume all are on the same timebase:
t.append(vuv_times[0])
y.append(vuv_signals)
std_y.append(vuv_uncertainties)
y_norm.append(vuv_signals_norm)
std_y_norm.append(vuv_uncertainties_norm)
blocks = []
names = []
pos = []
i = 0
for s in self.vuv_lines.keys():
for l in self.vuv_lines[s]:
if l.diagnostic_lines is not None:
blocks.append(i)
i += 1
names.append(s)
pos.append(XEUS_POS if s == 'XEUS' else LOWEUS_POS)
self.signal = Signal(
scipy.hstack(y).T,
scipy.hstack(std_y).T,
scipy.hstack(y_norm).T,
scipy.hstack(std_y_norm).T,
scipy.hstack(t),
names,
scipy.asarray(blocks, dtype=int) + 1,
pos=pos,
blocks=blocks
)
def load_vuv(self, system):
"""Load the data from a VUV instrument.
Parameters
----------
system : {'XEUS', 'LoWEUS'}
The VUV instrument to load the data from.
"""
print("Loading {system} data...".format(system=system))
t = MDSplus.Tree('spectroscopy', self.shot)
N = t.getNode(system + '.spec')
self.vuv_lines[system] = []
self.vuv_signal[system] = scipy.asarray(N.data(), dtype=float)
self.vuv_time[system] = scipy.asarray(N.dim_of(idx=1).data(), dtype=float)
self.vuv_lam[system] = scipy.asarray(N.dim_of(idx=0).data(), dtype=float) / 10.0
# Get the raw count data to compute the uncertainty:
self.vuv_uncertainty[system] = (
self.vuv_signal[system] /
scipy.sqrt(t.getNode(system + '.raw:data').data())
)
print("Processing {system} data...".format(system=system))
self.select_vuv(system)
def select_vuv(self, system):
"""Select the lines to use from the given VUV spectrometer.
"""
root = VuvWindow(self, system)
root.mainloop()
class XTOMOData(object):
"""Helper object to load and process the XTOMO data.
Execution proceeds as follows:
* Loads the XTOMO data from all (core) systems which have data available.
* Allows the user to flag bad XTOMO chords and select baseline subtraction
windows.
* Baseline subtracts, normalizes and combines the data.
* Generates a :py:class:`Signal` instance for later use. This is stored in
the attribute :py:attr:`signal`.
"""
def __init__(self, shot, injections):
self.injections = injections
# Fetch the data:
tree = MDSplus.Tree('xtomo', shot)
self.xtomo_sig = {}
self.xtomo_t = {}
self.xtomo_channel_mask = {}
# Just put something dumb as a placeholder for the baseline subtraction
# ranges. This will need to be set by hand in the GUI. This is a list
# of lists of tuples. The outer list has one entry per injection. Each
# injection then has one or more 2-tuples with the (start, stop) values
# of the range(s) to use for baseline subtraction.
self.xtomo_baseline_ranges = [[(0, 0.1),],] * len(injections)
for s in (1, 3, 5):
self.xtomo_sig[s], self.xtomo_t[s] = self.load_xtomo_array(s, tree)
if self.xtomo_sig[s] is not None:
self.xtomo_channel_mask[s] = scipy.ones(
self.xtomo_sig[s].shape[0],
dtype=bool
)
# Flag the data:
root = XtomoWindow(self)
root.mainloop()
# Process the injections:
t = {1: [], 3: [], 5: []}
y = {1: [], 3: [], 5: []}
std_y = {1: [], 3: [], 5: []}
y_norm = {1: [], 3: [], 5: []}
std_y_norm = {1: [], 3: [], 5: []}
for k, i in enumerate(self.injections):
for s in (1, 3, 5):
if self.xtomo_sig[s] is not None:
i_start, i_stop = profiletools.get_nearest_idx(
[i.t_start, i.t_stop],
self.xtomo_t[s]
)
# Apply the baseline subtraction:
bsub_idxs = []
for r in self.xtomo_baseline_ranges[k]:
lb_idx, ub_idx = profiletools.get_nearest_idx(
r,
self.xtomo_t[s]
)
bsub_idxs.extend(range(lb_idx, ub_idx + 1))
# Reduce to just the unique values:
bsub_idxs = list(set(bsub_idxs))
bsub = scipy.mean(self.xtomo_sig[s][:, bsub_idxs], axis=1)
y[s].append(
self.xtomo_sig[s][self.xtomo_channel_mask[s], i_start:i_stop + 1] -
bsub[self.xtomo_channel_mask[s], None]
)
std_y[s].append(0.1 * y[s][-1])
t[s].append(self.xtomo_t[s][i_start:i_stop + 1] - i.t_inj)
for s in (1, 3, 5):
if len(y[s]) > 0:
m = y[s][-1].max()
y_norm[s].append(y[s][-1] / m)
std_y_norm[s].append(std_y[s][-1] / m)
# Assume all of the time vectors are the same:
t_sig = scipy.hstack(t[1])
y_sig = []
std_y_sig = []
y_norm_sig = []
std_y_norm_sig = []
names = []
blocks = []
weight_idxs_sig = []
for k in (1, 3, 5):
if len(t[k]) > 0:
y_sig.append(scipy.hstack(y[k]))
std_y_sig.append(scipy.hstack(std_y[k]))
y_norm_sig.append(scipy.hstack(y_norm[k]))
std_y_norm_sig.append(scipy.hstack(std_y_norm[k]))
blocks.extend([k,] * y[k][-1].shape[0])
names += ['XTOMO %d' % (k,),] * y[k][-1].shape[0]
weight_idxs_sig.append(scipy.arange(0, len(self.xtomo_channel_mask[k]))[self.xtomo_channel_mask[k]])
self.signal = Signal(
scipy.vstack(y_sig).T,
scipy.vstack(std_y_sig).T,
scipy.vstack(y_norm_sig).T,
scipy.vstack(std_y_norm_sig).T,
scipy.hstack(t_sig),
names,
-1,
blocks=blocks
)
self.signal.weight_idxs = scipy.hstack(weight_idxs_sig)
def load_xtomo_array(self, array_num, tree, n_chords=38):
"""Load the data from a given XTOMO array.
Returns a tuple of `sig`, `t`, where `sig` is an array of float with
shape (`n_chords`, `len(t)`) holding the signals from each chord and `t`
is the timebase (assumed to be the same for all chords).
Parameters
----------
array_num : int
The XTOMO array number. Nominally one of {1, 3, 5}.
tree : :py:class:`MDSplus.Tree` instance
The XTOMO tree for the desired shot.
n_chords : int, optional
The number of chords in the array. The default is 38.
"""
N = tree.getNode('brightnesses.array_{a_n:d}.chord_01'.format(a_n=array_num))
try:
t = scipy.asarray(N.dim_of().data(), dtype=float)
except:
warnings.warn(
"No data for XTOMO {a_n:d}!".format(a_n=array_num),
RuntimeWarning
)
return None, None
sig = scipy.zeros((n_chords, len(t)))
for n in xrange(0, n_chords):
N = tree.getNode(
'brightnesses.array_{a_n:d}.chord_{n:02d}'.format(
a_n=array_num,
n=n + 1
)
)
# Some of the XTOMO 3 arrays on old shots have channels which are 5
# points short. Doing it this way prevents that from being an issue.
d = scipy.asarray(N.data(), dtype=float)
sig[n, :len(d)] = d
return sig, t
def slider_plot(x, y, z, xlabel='', ylabel='', zlabel='', labels=None, plot_sum=False, **kwargs):
"""Make a plot to explore multidimensional data.
x : array of float, (`M`,)
The abscissa.
y : array of float, (`N`,)
The variable to slide over.
z : array of float, (`P`, `M`, `N`)
The variables to plot.
xlabel : str, optional
The label for the abscissa.
ylabel : str, optional
The label for the slider.
zlabel : str, optional
The label for the ordinate.
labels : list of str with length `P`
The labels for each curve in `z`.
plot_sum : bool, optional
If True, will also plot the sum over all `P` cases. Default is False.
"""
if labels is None:
labels = ['' for v in z]
f = plt.figure()
gs = mplgs.GridSpec(2, 1, height_ratios=[10, 1])
a_plot = f.add_subplot(gs[0, :])
a_slider = f.add_subplot(gs[1, :])
a_plot.set_xlabel(xlabel)
a_plot.set_ylabel(zlabel)
color_vals = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
style_vals = ['-', '--', '-.', ':']
ls_vals = []
for s in style_vals:
for c in color_vals:
ls_vals.append(c + s)
ls_cycle = itertools.cycle(ls_vals)
l = []
for v, l_ in zip(z, labels):
tmp, = a_plot.plot(x, v[:, 0], ls_cycle.next(), label=l_, **kwargs)
l.append(tmp)
if plot_sum:
l_sum, = a_plot.plot(x, z[:, :, 0].sum(axis=0), ls_cycle.next(), label='total', **kwargs)
a_plot.legend(loc='best')
title = f.suptitle('')
def update(dum):
# ls_cycle = itertools.cycle(ls_vals)
# remove_all(l)
# while l:
# l.pop()
i = int(slider.val)
for v, l_ in zip(z, l):
l_.set_ydata(v[:, i])
# l.append(a_plot.plot(x, v[:, i], ls_cycle.next(), label=l_, **kwargs))
if plot_sum:
l_sum.set_ydata(z[:, :, i].sum(axis=0))
# l.append(a_plot.plot(x, z[:, :, i].sum(axis=0), ls_cycle.next(), label='total', **kwargs))
a_plot.relim()
a_plot.autoscale()
title.set_text('%s = %.5f' % (ylabel, y[i]) if ylabel else '%.5f' % (y[i],))
f.canvas.draw()
def arrow_respond(slider, event):
if event.key == 'right':
slider.set_val(min(slider.val + 1, slider.valmax))
elif event.key == 'left':
slider.set_val(max(slider.val - 1, slider.valmin))
slider = mplw.Slider(
a_slider,
ylabel,
0,
len(y) - 1,
valinit=0,
valfmt='%d'
)
slider.on_changed(update)
update(0)
f.canvas.mpl_connect(
'key_press_event',
lambda evt: arrow_respond(slider, evt)
)
class _ComputeLnProbWrapper(object):
"""Wrapper to support parallel execution of STRAHL runs.
This is needed since instance methods are not pickleable.
Parameters
----------
run : :py:class:`Run` instance
The :py:class:`Run` to wrap.
make_dir : bool, optional
If True, a new STRAHL directory is acquired and released for each call.
Default is False (run in current directory).
for_min : bool, optional
If True, the function is wrapped in the way it needs to be for a
minimization: only -1 times the log-posterior is returned, independent
of the value of `return_blob`.
denormalize : bool, optional
If True, a normalization from [lb, ub] to [0, 1] is removed. Default is
False (don't adjust parameters).
"""
def __init__(self, run, make_dir=False, for_min=False, denormalize=False):
self.run = run
self.make_dir = make_dir
self.for_min = for_min
self.denormalize = denormalize
def __call__(self, params, **kwargs):
if self.denormalize:
bounds = scipy.asarray(self.run.get_prior().bounds[:], dtype=float)
lb = bounds[:, 0]
ub = bounds[:, 1]
params = [x * (u - l) + l for x, u, l in zip(params, ub, lb)]
try:
if self.make_dir:
acquire_working_dir()
out = self.run.DV2ln_prob(
params=params,
sign=(-1.0 if self.for_min else 1.0),
**kwargs
)
except:
warnings.warn(
"Unhandled exception. Error is: %s: %s. "
"Params are: %s" % (
sys.exc_info()[0],
sys.exc_info()[1],
params
)
)
if self.for_min:
out = scipy.inf
else:
# if kwargs.get('return_blob', False):
# if kwargs.get('light_blob', False):
# out = (-scipy.inf)
# else:
# out = (-scipy.inf, (-scipy.inf, None, None, None, ''))
# else:
out = -scipy.inf
finally:
if self.make_dir:
release_working_dir()
return out
class _UGradEval(object):
"""Wrapper object for evaluating :py:meth:`Run.u2ln_prob` in parallel.
"""
def __init__(self, run, sign, kwargs):
self.run = run
self.sign = sign
self.kwargs = kwargs
def __call__(self, p):
return self.run.u2ln_prob(p, sign=self.sign, **self.kwargs)
class _CSDenResult(object):
"""Helper object to hold the results of :py:class:`_ComputeCSDenEval`.
"""
def __init__(self, DV):
self.DV = DV
class _ComputeCSDenEval(object):
"""Wrapper class to allow parallel evaluation of charge state density profiles.
Also computes the following:
* Time at which each charge state peaks at each location. This will be
of shape (`n_cs`, `n_space`).
* The peak value of each charge state at each location. This will be of
shape (`n_cs`, `n_space`).
* Time at which each charge state reaches its highest local value
(across all spatial points). This will be of shape (`n_cs`,).
* The peak value of each charge state across all spatial points. This
will be of shape (`n_cs`,).
* The spatial point at which each charge state across all spatial points
reaches its peak value. This will be of shape (`n_cs`,).
* Time at which the total impurity density peaks at each location. This
will be of shape (`n_space`,).
* The peak value of the total impurity density at each location. This
will be of shape (`n_space`,).
* The time at which the total impurity density peaks (across all spatial
points). This will be a single float.
* The peak value of the total impurity density across all spatial
points. This will be a single float.
* The spatial point at which the total impurity density across all
spatial points reaches its peak value. This will be a single float.
* The time at which the total impurity content peaks. This will be a
single float.
* The peak number of impurity atoms in the plasma. This will be a single
float.
* The confinement time for each charge state and each location. This
will be of shape (`n_cs`, `n_space`).
* The confinement time for the total impurity density at each location.
This will be of shape (`n_space`,).
* The confinement time for the total impurity content. This will be a
single float.
"""
def __init__(self, run):
self.run = run
def __call__(self, DV):
try:
cs_den, sqrtpsinorm, time, ne, Te = self.run.DV2cs_den(DV)
except:
print("Failure!")
return None
res = _CSDenResult(DV)
# For each charge state and location:
i_peak_local = cs_den.argmax(axis=0)
res.t_cs_den_peak_local = time[i_peak_local]
res.cs_den_peak_local = cs_den.max(axis=0)
# For each charge state across all locations:
i_peak_global = res.cs_den_peak_local.argmax(axis=1)
res.t_cs_den_peak_global = res.t_cs_den_peak_local[range(cs_den.shape[1]), i_peak_global]
res.cs_den_peak_global = res.cs_den_peak_local.max(axis=1)
res.sqrtpsinorm_cs_den_peak_global = sqrtpsinorm[i_peak_global]
# For total impurity density at each location:
n = cs_den.sum(axis=1) # shape is (`n_time`, `n_space`)
i_n_peak_local = n.argmax(axis=0)
res.t_n_peak_local = time[i_n_peak_local]
res.n_peak_local = n.max(axis=0)
# For total impurity density across all locations:
i_n_peak_global = res.n_peak_local.argmax()
res.t_n_peak_global = res.t_n_peak_local[i_n_peak_global]
res.n_peak_global = res.n_peak_local[i_n_peak_global]
res.sqrtpsinorm_n_peak_global = sqrtpsinorm[i_n_peak_global]
# For total impurity content inside the LCFS:
volnorm_grid = self.run.efit_tree.psinorm2volnorm(
sqrtpsinorm**2.0,
(self.run.time_1 + self.run.time_2) / 2.0
)
V = self.run.efit_tree.psinorm2v(1.0, (self.run.time_1 + self.run.time_2) / 2.0)
mask = ~scipy.isnan(volnorm_grid)
volnorm_grid = volnorm_grid[mask]
nn = n[:, mask]
# Use the trapezoid rule:
N = V * 0.5 * ((volnorm_grid[1:] - volnorm_grid[:-1]) * (nn[:, 1:] + nn[:, :-1])).sum(axis=1)
i_N_peak = N.argmax()
res.t_N_peak = time[i_N_peak]
res.N_peak = N[i_N_peak]
# # Confinement time for each charge state and each location:
# res.tau_cs_den_local = scipy.zeros(cs_den.shape[1:3])
# for s_idx in range(0, cs_den.shape[2]):
# for cs_idx in range(0, cs_den.shape[1]):
# t_mask = (self.run.truth_data.time > res.t_cs_den_peak_local[cs_idx, s_idx] + 0.01) & (cs_den[:, cs_idx, s_idx] > 0.0) & (~scipy.isinf(cs_den[:, cs_idx, s_idx])) & (~scipy.isnan(cs_den[:, cs_idx, s_idx]))
# if t_mask.sum() < 2:
# res.tau_cs_den_local[cs_idx, s_idx] = 0.0
# else:
# X = scipy.hstack((scipy.ones((t_mask.sum(), 1)), scipy.atleast_2d(self.run.truth_data.time[t_mask]).T))
# theta, dum1, dum2, dum3 = scipy.linalg.lstsq(X.T.dot(X), X.T.dot(scipy.log(cs_den[t_mask, cs_idx, s_idx])))
# res.tau_cs_den_local[cs_idx, s_idx] = -1.0 / theta[1]
#
# # Confinement time for total impurity density at each location:
# res.tau_n_local = scipy.zeros(cs_den.shape[2])
# for s_idx in range(0, n.shape[-1]):
# t_mask = (self.run.truth_data.time > res.t_n_peak_local[s_idx] + 0.01) & (n[:, s_idx] > 0.0) & (~scipy.isinf(n[:, s_idx])) & (~scipy.isnan(n[:, s_idx]))
# if t_mask.sum() < 2:
# res.tau_n_local[s_idx] = 0.0
# else:
# X = scipy.hstack((scipy.ones((t_mask.sum(), 1)), scipy.atleast_2d(self.run.truth_data.time[t_mask]).T))
# theta, dum1, dum2, dum3 = scipy.linalg.lstsq(X.T.dot(X), X.T.dot(scipy.log(n[t_mask, s_idx])))
# res.tau_n_local[s_idx] = -1.0 / theta[1]
# Confinement time of total impurity content and shape factor:
t_mask = (time > res.t_N_peak + 0.01) & (N > 0.0) & (~scipy.isinf(N)) & (~scipy.isnan(N))
if t_mask.sum() < 2:
res.tau_N = 0.0
res.n075n0 = scipy.median(n[:, 62] / n[:, 0])
res.prof = scipy.nanmedian(n / n[:, 0][:, None], axis=0)
else:
X = scipy.hstack((scipy.ones((t_mask.sum(), 1)), scipy.atleast_2d(time[t_mask]).T))
theta, dum1, dum2, dum3 = scipy.linalg.lstsq(X.T.dot(X), X.T.dot(scipy.log(N[t_mask])))
res.tau_N = -1.0 / theta[1]
first_t_idx = scipy.where(t_mask)[0][0]
# Take the median just in case I didn't wait until far enough after
# the peak:
res.n075n0 = scipy.median(n[first_t_idx:, 62] / n[first_t_idx:, 0])
res.prof = scipy.nanmedian(n[first_t_idx:, :] / n[first_t_idx:, 0][:, None], axis=0)
return res
class _OptimizeEval(object):
"""Wrapper class to allow parallel execution of random starts when optimizing the parameters.
Parameters
----------
run : :py:class:`Run`
The :py:class:`Run` instance to wrap.
thresh : float, optional
If True, a test run of the starting
"""
def __init__(self, run, thresh=None, use_local=False):
self.run = run
# Get the bounds into the correct format for scipy.optimize.minimize:
# b = self.run.get_prior().bounds[:]
# self.bounds = [list(v) for v in b]
# for v in self.bounds:
# if scipy.isinf(v[0]):
# v[0] = None
# if scipy.isinf(v[1]):
# v[1] = None
self.thresh = thresh
self.use_local = use_local
def __call__(self, params):
"""Run the optimizer starting at the given params.
All exceptions are caught and reported.
Returns a tuple of (`u_opt`, `f_opt`, `return_code`, `num_strahl_calls`).
If it fails, returns a tuple of (None, None, `sys.exc_info()`, `num_strahl_calls`).
"""
global NUM_STRAHL_CALLS
try:
if self.thresh is not None:
l = self.run.DV2ln_prob(params, sign=-1, use_local=self.use_local)
if scipy.isinf(l) or scipy.isnan(l) or l > self.thresh:
warnings.warn("Bad start, skipping! lp=%.3g" % (l,))
return None
else:
print("Good start: lp=%.3g" % (l,))
# out = scipy.optimize.minimize(
# self.run.u2ln_prob,
# self.run.get_prior().elementwise_cdf(params),
# args=(None, -1, True),
# jac=True,
# method='TNC',
# bounds=[(0, 1),] * len(params),
# options={
# 'disp': True,
# 'maxfun': 50000,
# 'maxiter': 50000,
# # 'maxls': 50, # Doesn't seem to be supported. WTF?
# 'maxcor': 50
# }
# )
NUM_STRAHL_CALLS = 0
# out = scipy.optimize.fmin_l_bfgs_b(
# self.run.u2ln_prob,
# self.run.get_prior().elementwise_cdf(params),
# args=(None, -1, True),
# bounds=[(0, 1),] * len(params),
# iprint=50,
# maxfun=50000
# )
# First run a global optimizer:
# opt = nlopt.opt(nlopt.GN_DIRECT_L, len(params)) # LN_SBPLX
# opt.set_max_objective(self.run.u2ln_prob_local if self.use_local else self.run.u2ln_prob)
# opt.set_lower_bounds([0.0,] * opt.get_dimension())
# opt.set_upper_bounds([1.0,] * opt.get_dimension())
# # opt.set_ftol_abs(1.0)
# opt.set_ftol_rel(1e-6)
# # opt.set_maxeval(40000)#(100000)
# opt.set_maxtime(3600 * 12)
p0 = self.run.params.copy()
p0[~self.run.fixed_params] = params
# uopt = opt.optimize(self.run.get_prior().elementwise_cdf(p0)[~self.run.fixed_params])
# Then polish the minimum:
print("Polishing with SUBPLEX...")
opt = nlopt.opt(nlopt.LN_SBPLX, len(params))
opt.set_max_objective(self.run.u2ln_prob_local if self.use_local else self.run.u2ln_prob)
opt.set_lower_bounds([0.0,] * opt.get_dimension())
opt.set_upper_bounds([1.0,] * opt.get_dimension())
opt.set_ftol_rel(1e-8)
opt.set_maxtime(3600 * 12)
# uopt = opt.optimize(uopt)
uopt = opt.optimize(self.run.get_prior().elementwise_cdf(p0)[~self.run.fixed_params])
# Convert uopt back to params:
u_full = 0.5 * scipy.ones_like(self.run.params, dtype=float)
u_full[~self.run.fixed_params] = uopt
p_opt = self.run.get_prior().sample_u(u_full)[~self.run.fixed_params]
# Polish optimum again with L-BFGS-B, hopefully eventually get H:
# print("Polishing with L-BFGS-B...")
# p_opt, f_opt, d_res = scipy.optimize.fmin_l_bfgs_b(
# self.run.DV2ln_prob,
# p_opt,
# fprime=self.run.DV2jac,
# args=(-1.0,),
# bounds=scipy.asarray(self.run.get_prior().bounds[:], dtype=float)[~self.run.fixed_params],
# m=100,
# factr=1e7,
# # maxls=100
# )
out = (p_opt, opt.last_optimum_value(), opt.last_optimize_result(), NUM_STRAHL_CALLS)
# out = (p_opt, f_opt, (opt.last_optimize_result(), d_res), NUM_STRAHL_CALLS)
print("Done. Made %d calls to STRAHL." % (NUM_STRAHL_CALLS,))
return out
except:
warnings.warn(
"Minimizer failed, skipping sample. Error is: %s: %s."
% (
sys.exc_info()[0],
sys.exc_info()[1]
)
)
return (None, None, sys.exc_info(), NUM_STRAHL_CALLS)
class _ComputeProfileWrapper(object):
"""Wrapper to enable evaluation of D, V profiles in parallel.
"""
def __init__(self, run):
self.run = run
def __call__(self, params):
return self.run.eval_DV(params)
class _CallGPWrapper(object):
"""Wrapper to enable use of GaussianProcess instances with emcee Samplers.
Enforces the bounds when called to prevent runaway extrapolations.
"""
def __init__(self, gp):
self.gp = gp
# Capture the X limits:
self.X_min = gp.X.min(axis=0)
self.X_max = gp.X.max(axis=0)
def __call__(self, X, **kwargs):
X = scipy.asarray(X)
if (X < self.X_min).any() or (X > self.X_max).any():
return -scipy.inf
else:
return self.gp.predict(X, **kwargs)
def eval_profile(x, k, eig, n, params=None, mu=None):
"""Evaluate the profile.
Note that you must externally exponentiate the D profile to ensure
positivity.
Parameters
----------
x : array of float
The points to evaluate the profile at.
k : :py:class:`gptools.Kernel` instance
The covariance kernel to use.
eig : array of float
The eigenvalues to use when drawing the sample.
n : int
The derivative order to set to 0 at the origin. If `n`=0 then the
value is manually forced to zero to avoid numerical issues. The sign
of the eigenvectors is also chosen based on `n`: `n`=0 uses the left
slope constraint, `n`=1 uses the left concavity constraint.
params : array of float, optional
The values for the (free) hyperparameters of `k`. If provided, the
hyperparameters of `k` are first updated. Otherwise, `k` is used as-
is (i.e., it assumes the hyperparameters were set elsewhere).
"""
if params is not None:
if mu is None:
k.set_hyperparams(params)
else:
k.set_hyperparams(params[:k.num_free_params])
mu.set_hyperparams(params[k.num_free_params:])
if eig.ndim == 1:
eig = scipy.atleast_2d(eig).T
gp = gptools.GaussianProcess(k, mu=mu)
gp.add_data(0, 0, n=n)
y = gp.draw_sample(
x,
method='eig',
num_eig=len(eig),
rand_vars=eig,
modify_sign='left concavity' if n == 1 else 'left slope'
).ravel()
if n == 0:
y[0] = 0
return y
def source_function(t, t_start, t_rise, n_rise, t_fall, n_fall, t_cluster=0.0, h_cluster=0.0):
"""Defines a model form to approximate the shape of the source function.
Consists of an exponential rise, followed by an exponential decay and,
optionally, a constant tail to approximate clusters.
The cluster period is optional, so you can either treat this as a
5-parameter function or a 7-parameter function.
The function is set to have a peak value of 1.0.
Parameters
----------
t : array of float
The time values to evaluate the source at.
t_start : float
The time the source starts at.
t_rise : float
The length of the rise portion.
n_rise : float
The number of e-folding times to put in the rise portion.
t_fall : float
The length of the fall portion.
n_fall : float
The number of e-folding times to put in the fall portion.
t_cluster : float, optional
The length of the constant period. Default is 0.0.
h_cluster : float, optional
The height of the constant period. Default is 0.0.
"""
s = scipy.atleast_1d(scipy.zeros_like(t))
tau_rise = t_rise / n_rise
tau_fall = t_fall / n_fall
rise_idx = (t >= t_start) & (t < t_start + t_rise)
s[rise_idx] = 1.0 - scipy.exp(-(t[rise_idx] - t_start) / tau_rise)
fall_idx = (t >= t_start + t_rise) & (t < t_start + t_rise + t_fall)
s[fall_idx] = scipy.exp(-(t[fall_idx] - t_start - t_rise) / tau_fall)
s[(t >= t_start + t_rise + t_fall) & (t < t_start + t_rise + t_fall + t_cluster)] = h_cluster
return s
def interp_max(x, y, err_y=None, s_guess=0.2, s_max=10.0, l_guess=0.005, fixed_l=False, debug_plots=False, method='GP'):
"""Compute the maximum value of the smoothed data.
Estimates the uncertainty using Gaussian process regression and returns the
mean and uncertainty.
Parameters
----------
x : array of float
Abscissa of data to be interpolated.
y : array of float
Data to be interpolated.
err_y : array of float, optional
Uncertainty in `y`. If absent, the data are interpolated.
s_guess : float, optional
Initial guess for the signal variance. Default is 0.2.
s_max : float, optional
Maximum value for the signal variance. Default is 10.0
l_guess : float, optional
Initial guess for the covariance length scale. Default is 0.03.
fixed_l : bool, optional
Set to True to hold the covariance length scale fixed during the MAP
estimate. This helps mitigate the effect of bad points. Default is True.
debug_plots : bool, optional
Set to True to plot the data, the smoothed curve (with uncertainty) and
the location of the peak value.
method : {'GP', 'spline'}, optional
Method to use when interpolating. Default is 'GP' (Gaussian process
regression). Can also use a cubic spline.
"""
grid = scipy.linspace(max(0, x.min()), min(0.08, x.max()), 1000)
if method == 'GP':
hp = (
gptools.UniformJointPrior([(0, s_max),]) *
gptools.GammaJointPriorAlt([l_guess,], [0.1,])
)
k = gptools.SquaredExponentialKernel(
# param_bounds=[(0, s_max), (0, 2.0)],
hyperprior=hp,
initial_params=[s_guess, l_guess],
fixed_params=[False, fixed_l]
)
gp = gptools.GaussianProcess(k, X=x, y=y, err_y=err_y)
gp.optimize_hyperparameters(verbose=True, random_starts=100)
m_gp, s_gp = gp.predict(grid)
i = m_gp.argmax()
elif method == 'spline':
m_gp = scipy.interpolate.UnivariateSpline(
x, y, w=1.0 / err_y, s=2*len(x)
)(grid)
if scipy.isnan(m_gp).any():
print(x)
print(y)
print(err_y)
i = m_gp.argmax()
else:
raise ValueError("Undefined method %s" % (method,))
if debug_plots:
f = plt.figure()
a = f.add_subplot(1, 1, 1)
a.errorbar(x, y, yerr=err_y, fmt='.', color='b')
a.plot(grid, m_gp, color='g')
if method == 'GP':
a.fill_between(grid, m_gp - s_gp, m_gp + s_gp, color='g', alpha=0.5)
a.axvline(grid[i])
if method == 'GP':
return (m_gp[i], s_gp[i])
else:
return m_gp[i]
# def remove_all(l):
# """Remove all of the various hierarchical objects matplotlib spits out.
# """
# # TODO: This is a hack -- store the objects better!
# for v in l:
# try:
# for vv in v:
# try:
# for vvv in vv:
# vvv.remove()
# except TypeError:
# vv.remove()
# except TypeError:
# v.remove()
# def remove_all(v):
# """Remove all of the various hierarchical objects matplotlib spits out.
# """
# # TODO: This is a hack -- store the objects better!
# # A list needs an argument for remove to work, so the correct exception is
# # TypeError.
# try:
# print(type(v))
# v.remove()
# except TypeError:
# for vv in v:
# remove_all(vv)
# except Exception as e:
# import pdb
# pdb.set_trace()
def remove_all(v):
"""Yet another recursive remover, because matplotlib is stupid.
"""
try:
for vv in v:
remove_all(vv)
except TypeError:
v.remove()
def write_Ca_16_ADF15(
path='atomdat/adf15/ca/pue#ca16.dat',
Te=[5e1, 1e2, 2e2, 5e2, 7.5e2, 1e3, 1.5e3, 2e3, 4e3, 7e3, 1e4, 2e4],
ne=[1e12, 1e13, 2e13, 5e13, 1e14, 2e14, 5e14, 1e15, 2e15]
):
"""Write an ADF15-formatted file for the 19.3nm Ca 16+ line.
Computes the photon emissivity coefficients as a function of temperature and
density using the expression John Rice found for me.
TODO: GET CITATION!
TODO: Verify against Ar rates!
Parameters
----------
path : str, optional
The path to write the file to. Default is
'atomdat/adf15/ca/pue#ca16.dat'.
Te : array of float, optional
Temperatures to evaluate the model at in eV. Defaults to the values used
in pue#ca17.dat.
ne : array of float, optional
Densities to evaluate the model at in cm^-3. Defaults to the values used
in pue#ca17.dat.
"""
# Only one transition available:
NSEL = 1
TEXT = 'CA+16 EMISSIVITY COEFFTS.'
# Convert to angstroms:
WLNG = CA_16_LINES[0] * 10
NDENS = len(ne)
NTE = len(Te)
FILMEM = 'none'
TYPE = 'EXCIT'
INDM = 'T'
ISEL = 1
s = (
"{NSEL: >5d} /{TEXT:s}/\n"
"{WLNG: >8.3f} A{NDENS: >4d}{NTE: >4d} /FILMEM = {FILMEM: <8s}/TYPE = {TYPE: <8s} /INDM = {INDM:s}/ISEL ={ISEL: >5d}\n".format(
NSEL=NSEL,
TEXT=TEXT,
WLNG=WLNG,
NDENS=NDENS,
NTE=NTE,
FILMEM=FILMEM,
TYPE=TYPE,
INDM=INDM,
ISEL=ISEL
)
)
ne_str = ['{: >9.2e}'.format(i) for i in ne]
ct = 0
while ne_str:
s += ne_str.pop(0)
ct += 1
if ct == 8:
s += '\n'
ct = 0
if ct != 0:
s += '\n'
Te_str = ['{: >9.2e}'.format(i) for i in Te]
ct = 0
while Te_str:
s += Te_str.pop(0)
ct += 1
if ct == 8:
s += '\n'
ct = 0
if ct != 0:
s += '\n'
# Information from John Rice:
ne = scipy.asarray(ne, dtype=float)
Te = scipy.asarray(Te, dtype=float)
fij = 0.17
Eij = 64.3
y = Eij / Te
A = 0.6
D = 0.28
gbar = A + D * (scipy.log((y + 1) / y) - 0.4 / (y + 1)**2.0)
PEC = 1.57e-5 / (scipy.sqrt(Te) * Eij) * fij * gbar * scipy.exp(-y)
PEC_str = ['{: >9.2e}'.format(i) for i in PEC]
PEC_fmt = ''
ct = 0
while PEC_str:
PEC_fmt += PEC_str.pop(0)
ct += 1
if ct == 8:
PEC_fmt += '\n'
ct = 0
if ct != 0:
PEC_fmt += '\n'
s += PEC_fmt * NDENS
with open(path, 'w') as f:
f.write(s)
class HirexPlotFrame(tk.Frame):
"""Frame to hold the plot with the HiReX-SR time-series data.
"""
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.f = Figure()
self.suptitle = self.f.suptitle('')
self.a = self.f.add_subplot(1, 1, 1)
self.canvas = FigureCanvasTkAgg(self.f, master=self)
self.canvas.show()
self.canvas.get_tk_widget().grid(row=0, column=0, sticky='NESW')
# Need to put the toolbar in its own frame, since it automatically calls
# pack on itself, but I am using grid.
self.toolbar_frame = tk.Frame(self)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.toolbar_frame)
self.toolbar.update()
self.toolbar_frame.grid(row=1, column=0, sticky='EW')
self.canvas.mpl_connect(
'button_press_event',
lambda event: self.canvas._tkcanvas.focus_set()
)
self.canvas.mpl_connect('key_press_event', self.on_key_event)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.a.set_xlabel('$t$ [s]')
self.a.set_ylabel('HiReX-SR signal [AU]')
# TODO: Get a more clever way to handle ylim!
self.a.set_ylim(0, 1)
self.l = []
self.l_flagged = []
def on_key_event(self, evt):
"""Tie keys to the toolbar.
"""
key_press_handler(evt, self.canvas, self.toolbar)
class HirexWindow(tk.Tk):
"""GUI to flag bad HiReX-SR points.
Parameters
----------
data : :py:class:`RunData` instance
The :py:class:`RunData` object holding the information to be processed.
"""
def __init__(self, data, ar=False):
print(
"Type the indices of the bad points into the text box and press "
"ENTER to flag them. Use the arrow keys to move between channels. "
"When done, close the window to continue with the analysis."
)
tk.Tk.__init__(self)
self.data = data
self.ar = ar
self.wm_title("HiReX-SR inspector")
self.plot_frame = HirexPlotFrame(self)
self.plot_frame.grid(row=0, column=0, sticky='NESW')
# if self.ar:
# self.signal = data.ar_signal
# self.time = data.ar_time
# self.uncertainty = data.ar_uncertainty
# self.flagged = data.ar_flagged
# else:
self.signal = data.hirex_signal
self.time = data.hirex_time
self.uncertainty = data.hirex_uncertainty
self.flagged = data.hirex_flagged
self.idx_slider = tk.Scale(
master=self,
from_=0,
to=self.signal.shape[1] - 1,
command=self.update_slider,
orient=tk.HORIZONTAL
)
self.idx_slider.grid(row=1, column=0, sticky='NESW')
self.flagged_box = tk.Entry(self)
self.flagged_box.grid(row=2, column=0, sticky='NESW')
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.bind("<Left>", self.on_arrow)
self.bind("<Right>", self.on_arrow)
self.bind("<Return>", self.process_flagged)
# self.bind("<Enter>", self.process_flagged)
self.bind("<KP_Enter>", self.process_flagged)
def destroy(self):
self.process_flagged()
tk.Tk.destroy(self)
def on_arrow(self, evt):
"""Handle arrow keys to move slider.
"""
if evt.keysym == 'Right':
self.process_flagged()
self.idx_slider.set(
min(self.idx_slider.get() + 1, self.signal.shape[1] - 1)
)
elif evt.keysym == 'Left':
self.process_flagged()
self.idx_slider.set(
max(self.idx_slider.get() - 1, 0)
)
def process_flagged(self, evt=None):
"""Process the flagged points which have been entered into the text box.
"""
flagged = re.findall(
LIST_REGEX,
self.flagged_box.get()
)
flagged = scipy.asarray([int(i) for i in flagged], dtype=int)
idx = self.idx_slider.get()
self.flagged[:, idx] = False
self.flagged[flagged, idx] = True
remove_all(self.plot_frame.l_flagged)
self.plot_frame.l_flagged = []
self.plot_frame.l_flagged.append(
self.plot_frame.a.plot(
self.time[flagged],
self.signal[flagged, idx],
'rx',
markersize=12
)
)
self.plot_frame.canvas.draw()
def update_slider(self, new_idx):
"""Update the slider to the new index.
"""
# Remove the old lines:
remove_all(self.plot_frame.l)
self.plot_frame.l = []
self.plot_frame.l.append(
self.plot_frame.a.errorbar(
self.time,
self.signal[:, new_idx],
yerr=self.uncertainty[:, new_idx],
fmt='.',
color='b'
)
)
for i, x, y in zip(
xrange(0, self.signal.shape[0]),
self.time,
self.signal[:, new_idx]
):
self.plot_frame.l.append(
self.plot_frame.a.text(x, y, str(i))
)
# Insert the flagged points into the textbox:
self.flagged_box.delete(0, tk.END)
self.flagged_box.insert(
0,
', '.join(map(str, scipy.where(self.flagged[:, new_idx])[0]))
)
self.process_flagged()
# Called by process_flagged:
# self.plot_frame.canvas.draw()
class VuvPlotFrame(tk.Frame):
"""Frame to hold the plots with the XEUS data.
"""
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
# Store the lines that change when updating the time:
self.l_time = []
# Store the lines that change when updating the wavelength:
self.l_lam = []
# Store the lines that change when updating the XEUS line:
self.l_final = []
self.f = Figure()
self.suptitle = self.f.suptitle('')
gs = mplgs.GridSpec(2, 2)
self.a_im = self.f.add_subplot(gs[0, 0])
self.a_spec = self.f.add_subplot(gs[1, 0])
self.a_time = self.f.add_subplot(gs[0, 1])
self.a_final = self.f.add_subplot(gs[1, 1])
self.canvas = FigureCanvasTkAgg(self.f, master=self)
self.canvas.show()
self.canvas.get_tk_widget().grid(row=0, column=0, sticky='NESW')
# Need to put the toolbar in its own frame, since it automatically calls
# pack on itself, but I am using grid.
self.toolbar_frame = tk.Frame(self)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.toolbar_frame)
self.toolbar.update()
self.toolbar_frame.grid(row=1, column=0, sticky='EW')
self.canvas.mpl_connect(
'button_press_event',
lambda event: self.canvas._tkcanvas.focus_set()
)
self.canvas.mpl_connect('key_press_event', self.on_key_event)
self.canvas.mpl_connect('button_press_event', self.on_click)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
# Just plot the image now since it doesn't change:
LAM, T = scipy.meshgrid(
self.master.data.vuv_lam[self.master.system],
self.master.data.vuv_time[self.master.system]
)
self.im = self.a_im.pcolormesh(
LAM,
T,
self.master.data.vuv_signal[self.master.system],
cmap='gray'
)
xlim = self.a_im.get_xlim()
for x, i, c in zip(
CA_17_LINES + CA_16_LINES,
range(0, len(CA_17_LINES) + len(CA_16_LINES)),
['r'] * len(CA_17_LINES) + ['c'] * len(CA_16_LINES)
):
self.a_im.axvline(x, linestyle='--', color=c)
self.a_spec.axvline(x, linestyle='--', color=c)
self.a_im.text(
x,
self.master.data.vuv_time[self.master.system].min(),
str(i)
)
self.a_spec.text(x, 0, str(i))
self.a_im.set_xlim(xlim)
self.a_spec.set_xlim(xlim)
self.a_im.set_xlabel(r'$\lambda$ [nm]')
self.a_im.set_ylabel('$t$ [s]')
self.a_spec.set_xlabel(r'$\lambda$ [nm]')
self.a_spec.set_ylabel('raw signal [AU]')
self.a_time.set_xlabel('$t$ [s]')
self.a_time.set_ylabel('raw signal [AU]')
self.a_final.set_xlabel('$t$ [s]')
self.a_final.set_ylabel('processed signal [AU]')
def on_key_event(self, evt):
"""Tie keys to the toolbar.
"""
key_press_handler(evt, self.canvas, self.toolbar)
def on_click(self, evt):
"""Move the cursors with a click in any given axis.
Only does so if the widgetlock is not locked.
"""
if not self.canvas.widgetlock.locked():
if evt.inaxes == self.a_im:
# Update both lam and t:
lam_idx = profiletools.get_nearest_idx(
evt.xdata,
self.master.data.vuv_lam[self.master.system]
)
self.master.slider_frame.lam_slider.set(lam_idx)
t_idx = profiletools.get_nearest_idx(
evt.ydata,
self.master.data.vuv_time[self.master.system]
)
self.master.slider_frame.t_slider.set(t_idx)
elif evt.inaxes == self.a_spec:
# Only update lam:
lam_idx = profiletools.get_nearest_idx(
evt.xdata,
self.master.data.vuv_lam[self.master.system]
)
self.master.slider_frame.lam_slider.set(lam_idx)
elif evt.inaxes == self.a_time:
# Only update t:
t_idx = profiletools.get_nearest_idx(
evt.xdata,
self.master.data.vuv_time[self.master.system]
)
self.master.slider_frame.t_slider.set(t_idx)
class VuvSliderFrame(tk.Frame):
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.t_idx = None
self.lam_idx = None
self.max_val = None
self.t_slider = tk.Scale(
master=self,
from_=0,
to=len(self.master.data.vuv_time[self.master.system]) - 1,
command=self.master.update_t,
orient=tk.HORIZONTAL,
label='t'
)
self.t_slider.grid(row=0, column=0)
self.lam_slider = tk.Scale(
master=self,
from_=0,
to=len(self.master.data.vuv_lam[self.master.system]) - 1,
command=self.master.update_lam,
orient=tk.HORIZONTAL,
label=u'\u03bb'
)
self.lam_slider.grid(row=0, column=1)
self.max_val_slider = tk.Scale(
master=self,
from_=0,
to=self.master.data.vuv_signal[self.master.system].max(),
command=self.master.update_max_val,
orient=tk.HORIZONTAL,
label='max =',
resolution=0.01
)
self.max_val_slider.set(self.master.data.vuv_signal[self.master.system].max())
self.max_val_slider.grid(row=0, column=2)
class VuvWindow(tk.Tk):
def __init__(self, data, system):
tk.Tk.__init__(self)
self.data = data
self.system = system
self.wm_title(system + " inspector")
self.plot_frame = VuvPlotFrame(self)
self.plot_frame.grid(row=0, column=0, sticky='NESW')
self.slider_frame = VuvSliderFrame(self)
self.slider_frame.grid(row=1, column=0, sticky='NESW')
self.line_frame = VuvLineFrame(self)
self.line_frame.grid(row=0, column=1, rowspan=2, sticky='NESW')
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.bind("<Left>", self.on_arrow)
self.bind("<Right>", self.on_arrow)
self.bind("<Up>", self.on_arrow)
self.bind("<Down>", self.on_arrow)
def on_arrow(self, evt):
"""Handle arrow keys to move slider.
"""
if evt.keysym == 'Right':
self.slider_frame.lam_slider.set(
min(
self.slider_frame.lam_slider.get() + 1,
len(self.data.vuv_lam[self.system]) - 1
)
)
elif evt.keysym == 'Left':
self.slider_frame.lam_slider.set(
max(self.slider_frame.lam_slider.get() - 1, 0)
)
elif evt.keysym == 'Up':
self.slider_frame.t_slider.set(
min(
self.slider_frame.t_slider.get() + 1,
len(self.data.vuv_time[self.system]) - 1
)
)
elif evt.keysym == 'Down':
self.slider_frame.t_slider.set(
max(self.slider_frame.t_slider.get() - 1, 0)
)
def update_t(self, t_idx):
"""Update the time slice plotted.
"""
# Cast to int, because Tkinter is inexplicably giving me str (!?)
t_idx = int(t_idx)
# Need to check this, since apparently getting cute with setting the
# label creates an infinite recursion...
if t_idx != self.slider_frame.t_idx:
self.slider_frame.t_idx = t_idx
self.slider_frame.t_slider.config(
label="t = %.3fs" % (self.data.vuv_time[self.system][t_idx],)
)
remove_all(self.plot_frame.l_time)
self.plot_frame.l_time = []
self.plot_frame.l_time.append(
self.plot_frame.a_spec.plot(
self.data.vuv_lam[self.system],
self.data.vuv_signal[self.system][t_idx, :],
'k'
)
)
self.plot_frame.l_time.append(
self.plot_frame.a_time.axvline(
self.data.vuv_time[self.system][t_idx],
color='b'
)
)
self.plot_frame.l_time.append(
self.plot_frame.a_im.axhline(
self.data.vuv_time[self.system][t_idx],
color='b'
)
)
# self.plot_frame.a_spec.relim()
# self.plot_frame.a_spec.autoscale_view(scalex=False)
self.plot_frame.canvas.draw()
def update_lam(self, lam_idx):
"""Update the wavelength slice plotted.
"""
lam_idx = int(lam_idx)
if lam_idx != self.slider_frame.lam_idx:
self.slider_frame.lam_idx = lam_idx
self.slider_frame.lam_slider.config(
label=u"\u03bb = %.3fnm" % (self.data.vuv_lam[self.system][lam_idx],)
)
remove_all(self.plot_frame.l_lam)
self.plot_frame.l_lam = []
self.plot_frame.l_lam.append(
self.plot_frame.a_time.plot(
self.data.vuv_time[self.system],
self.data.vuv_signal[self.system][:, lam_idx],
'k'
)
)
self.plot_frame.l_lam.append(
self.plot_frame.a_spec.axvline(
self.data.vuv_lam[self.system][lam_idx],
color='g'
)
)
self.plot_frame.l_lam.append(
self.plot_frame.a_im.axvline(
self.data.vuv_lam[self.system][lam_idx],
color='g'
)
)
# self.plot_frame.a_time.relim()
# self.plot_frame.a_time.autoscale_view(scalex=False)
self.plot_frame.canvas.draw()
def update_max_val(self, max_val):
"""Update the maximum value on the image plot.
"""
max_val = float(max_val)
if max_val != self.slider_frame.max_val:
self.slider_frame.max_val = max_val
self.plot_frame.im.set_clim(vmax=max_val)
self.plot_frame.canvas.draw()
class VuvLineFrame(tk.Frame):
"""Frame that holds the controls to setup VUV line information.
"""
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
# Keep track of the selected idx separately, since tkinter is stupid
# about it (loses state when using tab to move between text boxes):
self.idx = None
self.listbox_label = tk.Label(self, text="defined lines:", anchor=tk.SW)
self.listbox_label.grid(row=0, column=0, columnspan=2, sticky='NESW')
self.listbox = tk.Listbox(self)
self.listbox.grid(row=1, column=0, columnspan=2, sticky='NESW')
self.listbox.bind('<<ListboxSelect>>', self.on_select)
self.add_button = tk.Button(self, text="+", command=self.add_line)
self.add_button.grid(row=2, column=0, sticky='NESW')
self.remove_button = tk.Button(self, text="-", command=self.remove_line)
self.remove_button.grid(row=2, column=1, sticky='NESW')
self.included_lines_label = tk.Label(self, text="included lines:", anchor=tk.SW)
self.included_lines_label.grid(row=3, column=0, columnspan=2, sticky='NESW')
self.included_lines_box = tk.Entry(self)
self.included_lines_box.grid(row=4, column=0, columnspan=2, sticky='NESW')
self.lam_lb_label = tk.Label(self, text=u"\u03bb min (nm):", anchor=tk.SW)
self.lam_lb_label.grid(row=5, column=0, sticky='NESW')
self.lam_lb_box = tk.Entry(self)
self.lam_lb_box.grid(row=6, column=0, sticky='NESW')
self.lam_ub_label = tk.Label(self, text=u"\u03bb max (nm):", anchor=tk.SW)
self.lam_ub_label.grid(row=5, column=1, sticky='NESW')
self.lam_ub_box = tk.Entry(self)
self.lam_ub_box.grid(row=6, column=1, sticky='NESW')
self.t_lb_label = tk.Label(self, text="baseline start (s):", anchor=tk.SW)
self.t_lb_label.grid(row=7, column=0, sticky='NESW')
self.t_lb_box = tk.Entry(self)
self.t_lb_box.grid(row=8, column=0, sticky='NESW')
self.t_ub_label = tk.Label(self, text="baseline end (s):", anchor=tk.SW)
self.t_ub_label.grid(row=7, column=1, sticky='NESW')
self.t_ub_box = tk.Entry(self)
self.t_ub_box.grid(row=8, column=1, sticky='NESW')
self.apply_button = tk.Button(self, text="apply", command=self.on_apply)
self.apply_button.grid(row=9, column=0, columnspan=2, sticky='NESW')
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=1)
# Add the existing VuvLine instances to the GUI:
if self.master.data.vuv_lines[self.master.system]:
for l in self.master.data.vuv_lines[self.master.system]:
self.listbox.insert(tk.END, ', '.join(map(str, l.diagnostic_lines)))
else:
self.add_line()
def on_select(self, event):
"""Handle selection of a new line.
"""
# TODO: This should save the current state into the selected line
try:
self.idx = int(self.listbox.curselection()[0])
except IndexError:
self.idx = None
if self.idx is not None:
self.included_lines_box.delete(0, tk.END)
if self.master.data.vuv_lines[self.master.system][self.idx].diagnostic_lines is not None:
self.included_lines_box.insert(
0,
', '.join(
map(
str,
self.master.data.vuv_lines[self.master.system][self.idx].diagnostic_lines
)
)
)
self.lam_lb_box.delete(0, tk.END)
if self.master.data.vuv_lines[self.master.system][self.idx].lam_lb is not None:
self.lam_lb_box.insert(
0,
self.master.data.vuv_lines[self.master.system][self.idx].lam_lb
)
self.lam_ub_box.delete(0, tk.END)
if self.master.data.vuv_lines[self.master.system][self.idx].lam_ub is not None:
self.lam_ub_box.insert(
0,
self.master.data.vuv_lines[self.master.system][self.idx].lam_ub
)
if self.master.data.vuv_lines[self.master.system][self.idx].t_lb is not None:
self.t_lb_box.delete(0, tk.END)
self.t_lb_box.insert(
0,
self.master.data.vuv_lines[self.master.system][self.idx].t_lb
)
if self.master.data.vuv_lines[self.master.system][self.idx].t_ub is not None:
self.t_ub_box.delete(0, tk.END)
self.t_ub_box.insert(
0,
self.master.data.vuv_lines[self.master.system][self.idx].t_ub
)
remove_all(self.master.plot_frame.l_final)
self.master.plot_frame.l_final = []
if self.master.data.vuv_lines[self.master.system][self.idx].signal is not None:
self.master.plot_frame.l_final.append(
self.master.plot_frame.a_final.plot(
self.master.data.vuv_time[self.master.system],
self.master.data.vuv_lines[self.master.system][self.idx].signal,
'k'
)
)
self.master.plot_frame.canvas.draw()
def add_line(self):
"""Add a new (empty) line to the listbox.
"""
self.master.data.vuv_lines[self.master.system].append(VuvLine(self.master.system))
self.listbox.insert(tk.END, "unassigned")
self.listbox.selection_clear(0, tk.END)
self.listbox.selection_set(tk.END)
self.on_select(None)
def remove_line(self):
"""Remove the currently-selected line from the listbox.
"""
if self.idx is not None:
self.master.data.vuv_lines[self.master.system].pop(self.idx)
self.listbox.delete(self.idx)
self.included_lines_box.delete(0, tk.END)
self.lam_lb_box.delete(0, tk.END)
self.lam_ub_box.delete(0, tk.END)
# Don't clear the time boxes, since we will usually want the same
# time window for baseline subtraction.
# self.t_lb_box.delete(0, tk.END)
# self.t_ub_box.delete(0, tk.END)
self.idx = None
def on_apply(self):
"""Apply the current settings and update the plot.
"""
if self.idx is None:
print("Please select a line to apply!")
self.bell()
return
included_lines = re.findall(LIST_REGEX, self.included_lines_box.get())
if len(included_lines) == 0:
print("No lines to include!")
self.bell()
return
try:
included_lines = [int(l) for l in included_lines]
except ValueError:
print("Invalid entry in included lines!")
self.bell()
return
try:
lam_lb = float(self.lam_lb_box.get())
except ValueError:
print("Invalid lower bound for wavelength!")
self.bell()
return
try:
lam_ub = float(self.lam_ub_box.get())
except ValueError:
print("Invalid upper bound for wavelength!")
self.bell()
return
try:
t_lb = float(self.t_lb_box.get())
except ValueError:
print("Invalid baseline start!")
self.bell()
return
try:
t_ub = float(self.t_ub_box.get())
except ValueError:
print("Invalid baseline end!")
self.bell()
return
xl = self.master.data.vuv_lines[self.master.system][self.idx]
xl.diagnostic_lines = included_lines
xl.lam_lb = lam_lb
xl.lam_ub = lam_ub
xl.t_lb = t_lb
xl.t_ub = t_ub
xl.process_bounds(self.master.data)
self.listbox.delete(self.idx)
self.listbox.insert(self.idx, ', '.join(map(str, included_lines)))
remove_all(self.master.plot_frame.l_final)
self.master.plot_frame.l_final = []
self.master.plot_frame.l_final.append(
self.master.plot_frame.a_final.plot(
self.master.data.vuv_time[self.master.system],
xl.signal,
'k'
)
)
self.master.plot_frame.canvas.draw()
class VuvLine(object):
"""Class to store information on a single VUV diagnostic line.
The line may encapsulate more than one "diagnostic line" from the STRAHL
output in case these lines overlap too much.
Assumes you set the relevant attributes externally, then call
:py:meth:`process_bounds`.
Attributes
----------
diagnostic_lines : list of int
List of the indices of the lines included in the spectral region of the
line.
lam_lb : float
Lower bound of wavelength to include (nm).
lam_ub : float
Upper bound of wavelength to include (nm).
t_lb : float
Lower bound of time to use for baseline subtraction.
t_ub : float
Upper bound of time to use for baseline subtraction.
signal : array, (`N`,)
The `N` timepoints of the combined, baseline-subtracted signal.
"""
def __init__(self, system, diagnostic_lines=None, lam_lb=None, lam_ub=None, t_lb=None, t_ub=None):
self.system = system
self.diagnostic_lines = diagnostic_lines
self.lam_lb = lam_lb
self.lam_ub = lam_ub
self.t_lb = t_lb
self.t_ub = t_ub
self.signal = None
self.uncertainty = None
def process_bounds(self, data):
# Find the indices in the data:
lam_lb_idx, lam_ub_idx = profiletools.get_nearest_idx(
[self.lam_lb, self.lam_ub],
data.vuv_lam[self.system]
)
t_lb_idx, t_ub_idx = profiletools.get_nearest_idx(
[self.t_lb, self.t_ub],
data.vuv_time[self.system]
)
# Form combined spectrum:
# The indices are reversed for lambda vs. index:
self.signal = data.vuv_signal[self.system][:, lam_ub_idx:lam_lb_idx + 1].sum(axis=1)
# Perform the baseline subtraction:
self.signal -= self.signal[t_lb_idx:t_ub_idx + 1].mean()
# Compute the propagated uncertainty:
self.uncertainty = (data.vuv_uncertainty[self.system][:, lam_ub_idx:lam_lb_idx + 1]**2).sum(axis=1)
self.uncertainty += (self.uncertainty[t_lb_idx:t_ub_idx + 1]**2).sum() / (t_ub_idx - t_lb_idx + 1)**2
self.uncertainty = scipy.sqrt(self.uncertainty)
class XtomoPlotFrame(tk.Frame):
"""Frame to hold the plot with the XTOMO time-series data.
"""
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.f = Figure()
self.suptitle = self.f.suptitle('')
self.a = self.f.add_subplot(1, 1, 1)
self.canvas = FigureCanvasTkAgg(self.f, master=self)
self.canvas.show()
self.canvas.get_tk_widget().grid(row=0, column=0, sticky='NESW')
# Need to put the toolbar in its own frame, since it automatically calls
# pack on itself, but I am using grid.
self.toolbar_frame = tk.Frame(self)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.toolbar_frame)
self.toolbar.update()
self.toolbar_frame.grid(row=1, column=0, sticky='EW')
self.canvas.mpl_connect(
'button_press_event',
lambda event: self.canvas._tkcanvas.focus_set()
)
self.canvas.mpl_connect('key_press_event', self.on_key_event)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.a.set_xlabel('$t$ [s]')
self.a.set_ylabel('XTOMO signal [AU]')
# Produce the plots here so all of the variables are in place:
# self.l_raw, = self.a.plot(
# self.master.data.xtomo_t[self.master.system],
# self.master.data.xtomo_sig[self.master.system][0],
# 'b',
# label='raw'
# )
# self.l_smoothed, = self.a.plot(
# self.master.data.xtomo_t[self.master.system],
# scipy.convolve(
# self.master.data.xtomo_sig[self.master.system][0],
# scipy.ones(10) / 10.0,
# mode='same'
# ),
# 'g',
# label='smoothed'
# )
# Just put dummy values here, and call "apply" first thing:
# self.l_bsub, = self.a.plot(
# self.master.data.xtomo_t[self.master.system],
# self.master.data.xtomo_sig[self.master.system][0],
# 'm',
# label='baseline-subtracted'
# )
self.l_bsub_smoothed, = self.a.plot(
self.master.data.xtomo_t[int(self.master.sys_state.get())][::100],
scipy.convolve(
self.master.data.xtomo_sig[int(self.master.sys_state.get())][0],
scipy.ones(10) / 10.0,
mode='same'
)[::100],
'k',
label='baseline-subtracted, smoothed'
)
self.l_inj_time = self.a.axvline(
self.master.data.injections[0].t_inj,
color='r',
label='injection time'
)
self.span_inj_window = self.a.axvspan(
self.master.data.injections[0].t_start,
self.master.data.injections[0].t_stop,
color='r',
alpha=0.2
)
self.a.legend(loc='best')
def on_key_event(self, evt):
"""Tie keys to the toolbar.
"""
key_press_handler(evt, self.canvas, self.toolbar)
class XtomoWindow(tk.Tk):
"""GUI to set bad channels and baseline subtraction ranges for XTOMO data.
"""
def __init__(self, data):
print(
"Enter baseline subtraction ranges as '(lb1, ub1), (lb2, ub2)'. "
"Press enter to apply the baseline subtraction and boxcar smoothing. "
"Use the right/left arrows to change channels and the up/down arrows "
"to change systems. The baseline subtraction is the same for all "
"channels/systems across a given injection. "
"Close the window when done to continue the analysis."
)
tk.Tk.__init__(self)
self.data = data
# self.system = system
self.current_inj = 0
self.wm_title("XTOMO inspector")
# Set these up first, since self.plot_frame needs them:
self.sys_s = [
str(k) for k in self.data.xtomo_sig.keys()
if self.data.xtomo_sig[k] is not None
]
self.sys_state = tk.StringVar(self)
self.sys_state.set(self.sys_s[0])
# Now set everything else up in sequence:
self.plot_frame = XtomoPlotFrame(self)
self.plot_frame.grid(row=0, column=0, sticky='NESW', rowspan=8)
self.sys_label = tk.Label(self, text='system:')
self.sys_label.grid(row=0, column=1, sticky='SE')
self.sys_menu = tk.OptionMenu(
self,
self.sys_state,
*self.sys_s,
command=self.update_sys
)
self.sys_menu.grid(row=0, column=2, sticky='SW')
self.chan_label = tk.Label(self, text='channel:')
self.chan_label.grid(row=1, column=1, sticky='SE')
self.chan_state = tk.StringVar(self)
self.chan_state.set("0")
# Put the trace on the variable not the menu so we can change the
# options later:
self.chan_state.trace('w', self.update_channel)
self.channel_s = [str(v) for v in range(0, self.data.xtomo_sig[int(self.sys_state.get())].shape[0])]
self.chan_menu = tk.OptionMenu(
self,
self.chan_state,
*self.channel_s
)
self.chan_menu.grid(row=1, column=2, sticky='SW')
self.inj_label = tk.Label(self, text='injection:')
self.inj_label.grid(row=2, column=1, sticky='NSE')
self.inj_state = tk.StringVar(self)
self.inj_state.set("0")
self.inj_s = [str(v) for v in range(0, len(self.data.injections))]
self.inj_menu = tk.OptionMenu(
self,
self.inj_state,
*self.inj_s,
command=self.update_inj
)
self.inj_menu.grid(row=2, column=2, sticky='NSW')
self.bad_state = tk.IntVar(self)
self.bad_state.set(
int(not self.data.xtomo_channel_mask[int(self.sys_state.get())][int(self.chan_state.get())])
)
self.bad_check = tk.Checkbutton(
self,
text="bad channel",
variable=self.bad_state,
command=self.toggle_bad
)
self.bad_check.grid(row=3, column=1, sticky='NW', columnspan=2)
self.boxcar_label = tk.Label(self, text='boxcar points:')
self.boxcar_label.grid(row=4, column=1, sticky='NE')
self.boxcar_spin = tk.Spinbox(
self,
from_=1,
to=100001,
command=self.apply,
increment=2.0
)
self.boxcar_spin.grid(row=4, column=2, sticky='NW')
self.baseline_ranges_label = tk.Label(
self,
text="baseline subtraction range(s):"
)
self.baseline_ranges_label.grid(row=5, column=1, sticky='NW', columnspan=2)
self.baseline_ranges_box = tk.Entry(self)
self.baseline_ranges_box.grid(row=6, column=1, sticky='NEW', columnspan=2)
self.baseline_ranges_box.delete(0, tk.END)
self.baseline_ranges_box.insert(
0,
str(self.data.xtomo_baseline_ranges[int(self.inj_state.get())])[1:-1]
)
self.apply_button = tk.Button(self, text="apply", command=self.apply)
self.apply_button.grid(row=7, column=1, sticky='NW', columnspan=2)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.bind("<Left>", self.on_arrow)
self.bind("<Right>", self.on_arrow)
self.bind("<Up>", self.on_arrow)
self.bind("<Down>", self.on_arrow)
self.bind("<Return>", self.apply)
self.bind("<KP_Enter>", self.apply)
self.apply(plot_inj_time=True)
def destroy(self):
self.apply(plot_bsub=False, plot_inj_time=False)
tk.Tk.destroy(self)
# TODO: Sometimes this hangs when matplotlib windows are still open. For
# now I will just instruct the user to close them, but a more permanent
# solution would be nice.
print(
"XtomoWindow.destroy complete. You may need to close all open "
"matplotlib windows if the terminal is hanging."
)
def update_sys(self, new_idx=None):
"""Update the system displayed, keeping the injection constant.
The channel will revert to the first channel of the new system.
This function should:
- Reset the options in the channel menu to reflect the new system.
- Replot the data.
- Retrieve the correct value for the "bad channel" check box and set
it accordingly.
"""
self.channel_s = [str(v) for v in range(0, self.data.xtomo_sig[int(self.sys_state.get())].shape[0])]
self.chan_menu['menu'].delete(0, tk.END)
for c in self.channel_s:
self.chan_menu['menu'].add_command(
label=c,
command=tk._setit(self.chan_state, c)
)
self.chan_state.set(self.channel_s[0])
def update_channel(self, *args):
"""Update the channel displayed, keeping the injection constant.
This function should:
- Replot the data (modifying in place if possible). This means four
curves to plot:
- Raw data
- Smoothed data
- Baseline-subtracted data
- Smoothed baseline-subtracted data
- Retrieve the correct value for the "bad channel" check box and set
it accordingly.
"""
self.apply(plot_inj_time=False)
self.bad_state.set(
not self.data.xtomo_channel_mask[int(self.sys_state.get())][int(self.chan_state.get())]
)
def update_inj(self, new_idx=None):
"""Update the injection displayed, keeping the channel constant.
This function should:
- Store the baseline subtraction range(s) for the previous
injection.
- Load the baseline subtraction range(s) for the new injection.
- Update the plot with the new baseline subtraction range. This
means two curves to plot:
- Baseline-subtracted data
- Smoothed baseline-subtracted data
- Update the vertical bar on the plot to show the injection
location.
"""
self.data.xtomo_baseline_ranges[self.current_inj] = eval(
'[' + self.baseline_ranges_box.get() + ']'
)
self.current_inj = int(self.inj_state.get())
self.baseline_ranges_box.delete(0, tk.END)
self.baseline_ranges_box.insert(
0,
str(self.data.xtomo_baseline_ranges[int(self.inj_state.get())])[1:-1]
)
self.apply(plot_inj_time=True)
def toggle_bad(self):
"""Update the flagging of a bad channel.
This function should set the state to the appropriate value. By doing
this as soon as the button is clicked, we avoid having to handle
anything with this when changing state.
"""
self.data.xtomo_channel_mask[int(self.sys_state.get())][int(self.chan_state.get())] = not bool(self.bad_state.get())
def apply(self, evt=None, plot_bsub=True, plot_inj_time=False):
"""Apply the selected boxcar smoothing and baseline subtraction.
This function should:
- Store the baseline subtraction range(s) for the current injection.
- Update the plot with the new baseline subtraction range and boxcar
smoothing. This means two curves to plot:
- Baseline-subtracted data
- Smoothed baseline-subtracted data
"""
print("Applying settings...")
self.data.xtomo_baseline_ranges[int(self.inj_state.get())] = eval(
'[' + self.baseline_ranges_box.get() + ']'
)
# if plot_raw:
# self.plot_frame.l_raw.set_ydata(
# self.data.xtomo_sig[int(self.sys_state.get())][int(self.chan_state.get())]
# )
# self.plot_frame.l_smoothed.set_ydata(
# scipy.convolve(
# self.data.xtomo_sig[int(self.sys_state.get())][int(self.chan_state.get())],
# scipy.ones(int(self.boxcar_spin.get())) / float(self.boxcar_spin.get()),
# mode='same'
# )
# )
if plot_bsub:
# TODO: This will need to be pulled out into a separate function!
bsub_idxs = []
for r in self.data.xtomo_baseline_ranges[int(self.inj_state.get())]:
lb_idx, ub_idx = profiletools.get_nearest_idx(
r,
self.data.xtomo_t[int(self.sys_state.get())]
)
bsub_idxs.extend(range(lb_idx, ub_idx + 1))
# Reduce to just the unique values:
bsub_idxs = list(set(bsub_idxs))
bsub = scipy.mean(self.data.xtomo_sig[int(self.sys_state.get())][int(self.chan_state.get())][bsub_idxs])
# self.plot_frame.l_bsub.set_ydata(
# self.data.xtomo_sig[int(self.sys_state.get())][int(self.chan_state.get())] - bsub
# )
self.plot_frame.l_bsub_smoothed.set_ydata(
scipy.convolve(
self.data.xtomo_sig[int(self.sys_state.get())][int(self.chan_state.get())],
scipy.ones(int(self.boxcar_spin.get())) / float(self.boxcar_spin.get()),
mode='same'
)[::100] - bsub
)
if plot_inj_time:
self.plot_frame.l_inj_time.set_xdata(
[self.data.injections[int(self.inj_state.get())].t_inj,] * 2
)
xy = self.plot_frame.span_inj_window.get_xy()
xy[[0, 1, 4], 0] = self.data.injections[int(self.inj_state.get())].t_start
xy[[2, 3], 0] = self.data.injections[int(self.inj_state.get())].t_stop
self.plot_frame.span_inj_window.set_xy(xy)
if plot_bsub or plot_inj_time:
self.plot_frame.a.relim()
self.plot_frame.a.autoscale_view()
self.plot_frame.f.canvas.draw()
print("done!")
def on_arrow(self, evt):
"""Handle arrow key events by updating the relevant slider.
This function should:
- Use right/left arrows to change channels
- Use up/down arrows to change system
"""
if evt.keysym == 'Right':
if int(self.chan_state.get()) < int(self.channel_s[-1]):
self.chan_state.set(str(int(self.chan_state.get()) + 1))
# self.update_channel()
else:
self.bell()
elif evt.keysym == 'Left':
if int(self.chan_state.get()) > int(self.channel_s[0]):
self.chan_state.set(str(int(self.chan_state.get()) - 1))
# self.update_channel()
else:
self.bell()
# TODO: This is hard-coded to assume we only ever use (1, 3, 5). This
# should be fixed.
elif evt.keysym == 'Up':
if int(self.sys_state.get()) < int(self.sys_s[-1]):
self.sys_state.set(str(int(self.sys_state.get()) + 2))
self.update_sys()
else:
self.bell()
elif evt.keysym == 'Down':
if int(self.sys_state.get()) > int(self.sys_s[0]):
self.sys_state.set(str(int(self.sys_state.get()) - 2))
self.update_sys()
else:
self.bell()
def read_ADF15(path, debug_plots=[], order=1):
"""Read photon emissivity coefficients from an ADF15 file.
Returns a dictionary whose keys are the wavelengths of the lines in
angstroms. The value is an interp2d instance that will evaluate the PEC at
a desired dens, temp.
Parameter `order` lets you change the order of interpolation -- use 1
(linear) to speed things up, higher values for more accuracy.
"""
with open(path, 'r') as f:
lines = f.readlines()
header = lines.pop(0)
# Get the expected number of lines by reading the header:
num_lines = int(header.split()[0])
pec_dict = {}
for i in xrange(0, num_lines):
# Get the wavelength, number of densities and number of temperatures
# from the first line of the entry:
l = lines.pop(0)
header = l.split()
try:
lam = float(header[0])
except ValueError:
# These lines appear to occur when lam has more digits than the
# allowed field width. We don't care about these lines, so we will
# just ditch them.
warnings.warn(
"Bad line, ISEL=%d, lam=%s" % (i + 1, header[0]),
RuntimeWarning
)
lam = None
if 'excit' not in l.lower():
warnings.warn(
"Throwing out non-excitation line, ISEL=%d, lam=%s" % (i + 1, header[0]),
RuntimeWarning
)
lam = None
num_den = int(header[2])
num_temp = int(header[3])
# Get the densities:
dens = []
while len(dens) < num_den:
dens += [float(v) for v in lines.pop(0).split()]
dens = scipy.asarray(dens)
# Get the temperatures:
temp = []
while len(temp) < num_temp:
temp += [float(v) for v in lines.pop(0).split()]
temp = scipy.asarray(temp)
# Get the PEC's:
PEC = []
while len(PEC) < num_den:
PEC.append([])
while len(PEC[-1]) < num_temp:
PEC[-1] += [float(v) for v in lines.pop(0).split()]
PEC = scipy.asarray(PEC)
if lam is not None:
if lam not in pec_dict:
pec_dict[lam] = []
pec_dict[lam].append(
scipy.interpolate.RectBivariateSpline(
scipy.log10(dens),
scipy.log10(temp),
PEC,
kx=order,
ky=order
)
)
# {'dens': dens, 'temp': temp, 'PEC': PEC}
if lam in debug_plots:
ne_eval = scipy.linspace(dens.min(), dens.max(), 100)
Te_eval = scipy.linspace(temp.min(), temp.max(), 100)
NE, TE = scipy.meshgrid(ne_eval, Te_eval)
PEC_eval = pec_dict[lam][-1].ev(scipy.log10(NE), scipy.log10(TE))
f = plt.figure()
a = f.add_subplot(111, projection='3d')
# a.set_xscale('log')
# a.set_yscale('log')
a.plot_surface(NE, TE, PEC_eval, alpha=0.5)
DENS, TEMP = scipy.meshgrid(dens, temp)
a.scatter(DENS.ravel(), TEMP.ravel(), PEC.T.ravel(), color='r')
a.set_xlabel('$n_e$ [cm$^{-3}$]')
a.set_ylabel('$T_e$ [eV]')
a.set_zlabel('PEC')
f.suptitle(str(lam))
return pec_dict
def read_atomdat(path):
"""Read the Ca.atomdat file to get out the diagnostic lines specification.
Returns ordered arrays of the charge state, center wavelength (in angstroms)
and half-width of the window to use (in angstroms).
"""
with open(path, 'r') as f:
lines = f.readlines()
l = lines.pop(0)
while l[0:2] != 'cd':
l = lines.pop(0)
# ditch the specification:
lines.pop(0)
# empty line:
lines.pop(0)
# header for number of lines:
lines.pop(0)
# Now get the number of lines:
num_lines = int(lines.pop(0).strip())
# empty line:
lines.pop(0)
# Header:
lines.pop(0)
# Now read the line specifications:
charges = []
CWL = []
HW = []
for i in xrange(0, num_lines):
data = lines.pop(0).split()
charges.append(float(data[0]))
CWL.append(float(data[1]))
HW.append(float(data[2]))
return (charges, CWL, HW)
def compute_emiss(pec_dict, cw, hw, ne, nZ, Te, no_ne=False):
"""Compute the emission summed over all lines in a given window.
This is very approximate -- it just adds up the photons per second for the
included lines as computed directly from the PECs.
Parameters
----------
pec_dict : dictionary
The photon emission coefficient dictionary as returned by
:py:func:`read_ADF15` for the desired charge state.
cw : array of float
The center wavelengths of the bins to use, in angstroms.
hw : array of float
The half-widths of the bins to use, in angstroms.
ne : array of float
The electron density on the grid, in cm^3.
nZ : array of float
The density of the selected charge state on the grid, in cm^3.
Te : array of float
The electron temperature on the grid, in eV.
no_ne : bool, optional
If True, the PEC is taken to not depend on density. Default is False.
"""
lb = cw - hw
ub = cw + hw
wl = scipy.asarray(pec_dict.keys())
included = wl[(wl >= lb) & (wl <= ub)]
emiss = scipy.zeros_like(ne)
for lam in included:
# Need to loop over all lines having the same lam:
for p in pec_dict[lam]:
if no_ne:
emiss += 1.986449e-15 / lam * ne * nZ * p(scipy.log10(Te))
else:
emiss += 1.986449e-15 / lam * ne * nZ * p.ev(
scipy.log10(ne),
scipy.log10(Te)
)
# Sometimes there are interpolation issues with the PECs:
emiss[emiss < 0] = 0.0
return emiss
def flush_blobs(sampler, burn):
"""Zeros out all blobs up to (but not including) the one at burn.
"""
for i_step in xrange(0, burn):
b_chains = sampler.blobs[i_step]
for i_chain in xrange(0, len(b_chains)):
b_chains[i_chain] = (-scipy.inf, None, None, None, 'cleared')
class _InterpBrightWrapper(object):
def __init__(self, t, num_s, num_v, num_x):
self.t = t
self.num_s = num_s
self.num_v = num_v
self.num_x = num_x
def __call__(self, params):
# TODO: This needs to be updated to handle XTOMO -- implementation is incomplete!
s, v, t_, dt_s, dt_v = params
sbright_interp = scipy.zeros((len(self.t), self.num_s))
vbright_interp = scipy.zeros((len(self.t), self.num_v))
xbright_interp = scipy.zeros((len(self.t), self.num_x))
postinj_s = (self.t >= dt_s)
for j in xrange(0, s.shape[1]):
sbright_interp[postinj_s, j] = scipy.interpolate.InterpolatedUnivariateSpline(
t_ + dt_s,
s[:, j]
)(self.t[postinj_s])
postinj_v = (self.t >= dt_v)
for j in xrange(0, v.shape[1]):
vbright_interp[postinj_v, j] = scipy.interpolate.InterpolatedUnivariateSpline(
t_ + dt_v,
v[:, j]
)(self.t[postinj_v])
return (sbright_interp, vbright_interp, xbright_interp)
class HirexVuvFrame(tk.Frame):
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.h_frame = tk.Frame(self)
self.h_frame.grid(row=0, column=0, sticky='NESW')
self.h_frame.grid_columnconfigure(0, weight=1)
self.h_frame.grid_rowconfigure(0, weight=1)
self.v_frame = tk.Frame(self)
self.v_frame.grid(row=0, column=1, sticky='NESW')
self.v_frame.grid_columnconfigure(0, weight=1)
self.v_frame.grid_rowconfigure(0, weight=1)
self.f_h = Figure()
self.suptitle_h = self.f_h.suptitle("")
self.f_v = Figure()
self.suptitle_v = self.f_v.suptitle("")
self.canvas_h = FigureCanvasTkAgg(self.f_h, master=self.h_frame)
self.canvas_h.show()
self.canvas_h.get_tk_widget().grid(row=0, column=0, sticky='NESW')
self.canvas_v = FigureCanvasTkAgg(self.f_v, master=self.v_frame)
self.canvas_v.show()
self.canvas_v.get_tk_widget().grid(row=0, column=0, sticky='NESW')
dum, self.a_H = self.master.r.signals[0].plot_data(f=self.f_h, norm=self.master.r.normalize)
dum, self.a_V = self.master.r.signals[1].plot_data(f=self.f_v, norm=self.master.r.normalize, ncol=1)
# Make dummy lines to modify data in:
self.l_H = []
self.l_V = []
for k, a in enumerate(self.a_H):
l, = a.plot(
self.master.time_vec - self.master.r.time_1,
scipy.zeros_like(self.master.time_vec)
)
self.l_H.append(l)
for k, a in enumerate(self.a_V):
l, = a.plot(
self.master.time_vec - self.master.r.time_1,
scipy.zeros_like(self.master.time_vec)
)
self.l_V.append(l)
# Need to put the toolbar in its own frame, since it automatically calls
# pack on itself, but I am using grid.
self.toolbar_frame_h = tk.Frame(self)
self.toolbar_h = NavigationToolbar2TkAgg(self.canvas_h, self.toolbar_frame_h)
self.toolbar_h.update()
self.toolbar_frame_h.grid(row=1, column=0, sticky='EW')
self.toolbar_frame_v = tk.Frame(self)
self.toolbar_v = NavigationToolbar2TkAgg(self.canvas_v, self.toolbar_frame_v)
self.toolbar_v.update()
self.toolbar_frame_v.grid(row=1, column=1, sticky='EW')
self.canvas_h.mpl_connect(
'button_press_event',
lambda event: self.canvas_h._tkcanvas.focus_set()
)
self.canvas_h.mpl_connect('key_press_event', self.on_key_event)
self.canvas_v.mpl_connect(
'button_press_event',
lambda event: self.canvas_v._tkcanvas.focus_set()
)
self.canvas_v.mpl_connect('key_press_event', self.on_key_event)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
def on_key_event(self, evt):
"""Tie keys to the toolbar.
"""
key_press_handler(evt, self.canvas, self.toolbar)
class DVFrame(tk.Frame):
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.f = Figure(figsize=(2, 2))
self.a_D = self.f.add_subplot(2, 1, 1)
self.a_D.set_title('$D$ [m$^2$/s]')
self.a_V = self.f.add_subplot(2, 1, 2)
self.a_V.set_title('$V$ [m/s]')
self.a_V.set_xlabel('$r/a$')
self.l_D, = self.a_D.plot(
self.master.r.roa_grid_DV,
scipy.zeros_like(self.master.r.roa_grid_DV)
)
self.l_V, = self.a_V.plot(
self.master.r.roa_grid_DV,
scipy.zeros_like(self.master.r.roa_grid_DV)
)
self.canvas = FigureCanvasTkAgg(self.f, master=self)
self.canvas.show()
self.canvas.get_tk_widget().grid(row=0, column=0, sticky='NESW')
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
class ParameterFrame(tk.Frame):
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.labels = []
self.boxes = []
row = 0
for l, b in zip(self.master.r.get_labels(), self.master.r.get_prior().bounds[:]):
self.labels.append(tk.Label(self, text=l.translate(None, '$\\')))
self.labels[-1].grid(row=row, column=0, sticky='NSE')
self.boxes.append(
tk.Spinbox(
self,
from_=b[0],
to=b[1],
command=self.master.apply,
increment=max((b[1] - b[0]) / 100.0, 0.0001),
)
)
self.boxes[-1].grid(row=row, column=1, sticky='NSW')
row += 1
class BoxcarFrame(tk.Frame):
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.boxcar_label = tk.Label(self, text='XTOMO boxcar points:')
self.boxcar_label.grid(row=0, column=0, sticky='NSE')
self.boxcar_spin = tk.Spinbox(
self,
from_=1,
to=100001,
command=self.master.apply,
increment=2.0
)
self.boxcar_spin.grid(row=0, column=1, sticky='NSW')
class XTOMOExplorerPlotFrame(tk.Frame):
def __init__(self, system, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.system = system
self.f = Figure()
self.suptitle = self.f.suptitle("")
self.canvas = FigureCanvasTkAgg(self.f, master=self)
self.canvas.show()
self.canvas.get_tk_widget().grid(row=0, column=0, sticky='NESW')
dum, self.a_X = self.master.master.r.signals[2].plot_data(
norm=self.master.master.r.normalize,
f=self.f
)
# Make dummy lines to modify data in:
self.l_X = []
for k, a in enumerate(self.a_X):
l, = a.plot(
self.master.master.time_vec - self.master.master.r.time_1,
scipy.zeros_like(self.master.master.time_vec),
'g'
)
self.l_X.append(l)
# Need to put the toolbar in its own frame, since it automatically calls
# pack on itself, but I am using grid.
self.toolbar_frame = tk.Frame(self)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.toolbar_frame)
self.toolbar.update()
self.toolbar_frame.grid(row=1, column=0, sticky='EW')
self.canvas.mpl_connect(
'button_press_event',
lambda event: self.canvas._tkcanvas.focus_set()
)
self.canvas.mpl_connect('key_press_event', self.on_key_event)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
def on_key_event(self, evt):
"""Tie keys to the toolbar.
"""
key_press_handler(evt, self.canvas, self.toolbar)
class XTOMOExplorerWindow(tk.Toplevel):
def __init__(self, *args, **kwargs):
tk.Toplevel.__init__(self, *args, **kwargs)
self.XTOMO_frames = [XTOMOExplorerPlotFrame(0, self),]
for k, f in enumerate(self.XTOMO_frames):
f.grid(row=0, column=k, sticky='NESW')
self.grid_rowconfigure(0, weight=1)
for k in range(0, len(self.XTOMO_frames)):
self.grid_columnconfigure(k, weight=1)
class ParameterExplorer(tk.Tk):
def __init__(self, r):
tk.Tk.__init__(self)
self.r = r
# Do a dummy STRAHL run to figure out the length of the time vector:
params = self.r.get_prior().random_draw()
cs_den, sqrtpsinorm, time, ne, Te = self.r.DV2cs_den(params)
self.time_vec = time
self.wm_title("Parameter Explorer")
self.hirex_vuv_frame = HirexVuvFrame(self)
self.hirex_vuv_frame.grid(row=0, column=0, sticky='NESW', rowspan=3)
self.DV_frame = DVFrame(self)
self.DV_frame.grid(row=0, column=1, sticky='NESW')
self.parameter_frame = ParameterFrame(self)
self.parameter_frame.grid(row=1, column=1, sticky='NESW')
# self.boxcar_frame = BoxcarFrame(self)
# self.boxcar_frame.grid(row=2, column=1, sticky='NESW')
self.apply_button = tk.Button(self, text='apply', command=self.apply)
self.apply_button.grid(row=2, column=1, sticky='NESW')
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.bind("<Return>", self.apply)
self.bind("<KP_Enter>", self.apply)
# self.XTOMO_window = XTOMOExplorerWindow(self)
def apply(self, evt=None):
print("begin apply...")
params = [float(b.get()) for b in self.parameter_frame.boxes]
D, V = self.r.eval_DV(params)
self.DV_frame.l_D.set_ydata(D)
self.DV_frame.l_V.set_ydata(V)
self.DV_frame.a_D.relim()
self.DV_frame.a_V.relim()
self.DV_frame.a_D.autoscale_view()
self.DV_frame.a_V.autoscale_view()
self.DV_frame.f.canvas.draw()
try:
cs_den, sqrtpsinorm, time, ne, Te = self.r.DV2cs_den(params)
except TypeError:
print('fail!')
return
dlines = self.r.cs_den2dlines(cs_den, sqrtpsinorm, time, ne, Te)
sig = self.r.dlines2sig(dlines, time, params=params)
lp = self.r.sig2ln_prob(sig, time, params=params)
self.hirex_vuv_frame.suptitle_h.set_text("%.3e" % (lp,))
self.hirex_vuv_frame.suptitle_v.set_text("%.3e" % (lp,))
eig_D, eig_V, knots_D, knots_V, param_scaling, param_source, eig_ne, eig_Te = self.r.split_params(params)
time = time - self.r.time_1
time_s = scipy.sort(self.r.signals[0].t + param_source[0])
time_v = scipy.sort(self.r.signals[1].t + param_source[1])
# time_xtomo = scipy.sort(self.r.signals[2].t + param_source[2])
for k, l in enumerate(self.hirex_vuv_frame.l_H):
l.set_ydata(sig[0][:, k])
l.set_xdata(time_s)
for k, l in enumerate(self.hirex_vuv_frame.l_V):
l.set_ydata(sig[1][:, k])
l.set_xdata(time_v)
# TODO: This is an ugly holdover from how this used to be done...should
# be updated.
# for frame in self.XTOMO_window.XTOMO_frames:
# for k, l in enumerate(frame.l_X):
# l.set_ydata(sig[2][:, k])
# l.set_xdata(time_xtomo)
# frame.f.canvas.draw()
self.hirex_vuv_frame.f_h.canvas.draw()
self.hirex_vuv_frame.f_v.canvas.draw()
print("apply done!")
| gpl-3.0 |
iismd17/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
STANDFIRE/standfire | legacy/intervene.py | 2 | 6901 | # -*- coding: utf-8 -*-
"""
The intervene module is a collection of treatment algorithms
"""
import pandas as pd
import math
__authors__ = "Lucas Wells"
__Copyright__ = "Copyright 2015, STANDFIRE"
class BaseSilv(object):
"""
Collector class for treatments.
:ivar trees: Pandas data frame of trees
:ivar extent: min_x, min_y, max_x, max_y coordinates of trees data frame
..note:: All treatment class should inherit BaseSilv
"""
# class attributes
treatment_collection = {}
def __init__(self, trees):
"""Constructor"""
# instance variables
self.trees = ""
self.extent = {"min_x": 0, "min_y": 0, "max_x": 0, "max_y": 0}
# type check and handle accordingly
if isinstance(trees, pd.DataFrame):
self.trees = trees
elif type(trees) == str:
try:
self.trees = pd.read_csv(trees)
except:
raise TypeError("String argument must point to .csv file")
else:
raise TypeError("argument type must be either an instance of "
"Pandas.DataFrame() or a string indicating a path "
"to a comma-delimted file")
# set extent
self.set_extent(min(self.trees['xloc']), min(self.trees['yloc']),
max(self.trees['xloc']), max(self.trees['yloc']))
def get_trees(self):
"""
Returns the trees data frame of the object
:return: trees data frame
:rtype: Pandas.DataFrame
"""
return self.trees
def add_to_treatment_collection(self, treatment, ID):
"""
Adds treatment to static class attribute in intervene.BaseSilv()
"""
self.treatment_collection[ID] = treatment
def clear_treatment_collection(self):
"""
Deletes all treatment currently in the treatment collection class
attribute
"""
self.treatment_collection = {}
def set_extent(self, min_x, min_y, max_x, max_y):
"""
Sets extent instance variable
:param min_x: minimum x coordinate
:type min_x: float
:param min_y: minimum y coordinate
:type min_y: float
:param max_x: maximum x coordinate
:type max_x: float
:param max_y: maximum y coordinate
:type max_y: float
.. note:: ``set_extent`` is automatically called by ``BaseSilv()``
constructor
"""
self.extent["min_x"] = min_x
self.extent["min_y"] = min_y
self.extent["max_x"] = max_x
self.extent["max_y"] = max_y
def get_extent(self):
"""
Returns bounding box of tree coordinates
:return: [min_x, min_y, max_x, max_y]
:rtype: list of floats
:Examples:
>>> from standfire.intervene import SpaceCrowns
>>> space = SpaceCrowns("/Users/standfire/test_trees.csv")
>>> bbox = space.get_extent()
>>> bbox
[1.3, 3.5, 63.1, 61.4]
"""
return self.extent
class SpaceCrowns(BaseSilv):
"""
:ivar crown_space: instance variable for crown spacing; initial value = 0
"""
def __init__(self, trees):
"""Constructor"""
# call parent class constructor
super(SpaceCrowns, self).__init__(trees)
# instance attributes
self.crown_space = 0
self.treatment_options = {1: "thin from below to crown spacing",
2: "thin from above to crown spacing",
3: "random thin to crown spacing"}
def set_crown_space(self, crown_space):
"""
Sets spacing between crowns for the treatment
:param crown_space: crown spacing in units of input data frame
:type crown_space: float
"""
self.crown_space = crown_space
def get_treatment_options(self):
"""
Returns dictionary of treatment options
:return: treatment option codes and description
:rtype: dictionary
"""
return self.treatment_options
def get_distance(self, tree_a, tree_b):
"""
Calculate the distance between two trees
Uses Pythagoras' theorem to calculate distance between two tree crowns
in units of input data frame
:param tree_a: indexed row of tree a in Pandas data frame
:type tree_a: int
:param tree_b: indexed row of tree b in Pandas data frame
:type tree_b: int
:return: distance between two crowns in units of input data frame
:rtype: float
"""
# get x,y coordinates of tree_a and tree_b
x1, x2 = self.trees['xloc'][tree_a], self.trees['xloc'][tree_b]
y1, y2 = self.trees['yloc'][tree_a], self.trees['yloc'][tree_b]
# get crown radii of tree_a and tree_b
crad_a, crad_b = self.trees['crd'][tree_a], self.trees['crd'][tree_b]
# return the distance between trees (Pythagoras' theorem)
return math.sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2)) - (crad_a + crad_b)
def treat(self):
"""
Treatment algorithm for removing trees based on input crown spacing
.. todo:: Optimize algorithm by incorporating ``search_rad``.
.. todo:: split this function into 3
"""
connect = {}
trees = self.trees
search_rad = self.crown_space + (max(trees['crd']) * 2)
if self.crown_space == 0:
return self.trees
print "WARNING: no trees were remove because crown spacing = 0"
else:
for i in trees.index:
connect[i] = []
for e in trees.index[(trees['xloc'] < trees['xloc'][i] + 15) &
(trees['xloc'] > trees['xloc'][i] - 15) &
(trees['yloc'] < trees['yloc'][i] + 15) &
(trees['yloc'] > trees['yloc'][i] - 15)]:
if e != i:
space = self.get_distance(i, e)
if space < self.crown_space:
connect[i].append(e)
dbh_dsc = trees.sort(['dbh'], ascending=False)
indx = []
for i in dbh_dsc.index:
if i not in indx:
# only thin trees to the right of the burner
if dbh_dsc['xloc'][i] > 50:
if connect[i]:
vals = connect[i]
for e in vals:
indx.append(e)
try:
connect.pop(e)
except:
pass
thinned = dbh_dsc.drop(indx)
return thinned
| gpl-3.0 |
wanggang3333/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
JT5D/scikit-learn | sklearn/svm/tests/test_sparse.py | 5 | 10439 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
clf.fit(X, Y)
sp_clf = svm.SVC(kernel='linear', probability=True, random_state=0)
sp_clf.fit(X_sp, Y)
assert_array_equal(sp_clf.predict(T), true_result)
assert_true(sparse.issparse(sp_clf.support_vectors_))
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_true(sparse.issparse(sp_clf.dual_coef_))
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_true(sparse.issparse(sp_clf.coef_))
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
assert_array_almost_equal(clf.support_, sp_clf.support_)
assert_array_almost_equal(clf.predict(T), sp_clf.predict(T))
# refit with a different dataset
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
assert_array_almost_equal(clf.support_, sp_clf.support_)
assert_array_almost_equal(clf.predict(T2), sp_clf.predict(T2))
assert_array_almost_equal(clf.predict_proba(T2),
sp_clf.predict_proba(T2), 4)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
"""Test the sparse SVC with the iris dataset"""
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.todense(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_array_almost_equal(
clf.predict(iris.data.todense()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
def test_error():
"""
Test that it gives proper exception on deficient input
"""
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
"""
Similar to test_SVC
"""
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=4)
def test_linearsvc_iris():
"""Test the sparse LinearSVC with the iris dataset"""
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.todense(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.todense()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.todense()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
"""
Test class weights
"""
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
"""
Test weights on individual samples
"""
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
"""
Test that sparse liblinear honours intercept_scaling param
"""
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
"""
Test on a subset from the 20newsgroups dataset.
This catchs some bugs if input is not correctly converted into
sparse format or weights are not correctly initialized.
"""
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.todense(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.todense())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
frxstrem/fys3150 | project2/code-joseph/jacobi.py | 1 | 1544 | from matplotlib.pyplot import *
from numpy import *
S1 = loadtxt("eigenvectorsminmin.dat")
E1 = loadtxt("eigenvaluesminmin.dat")
S2 = loadtxt("eigenvectorsmin.dat")
E2 = loadtxt("eigenvaluesmin.dat")
S3 = loadtxt("eigenvectors1.dat")
E3 = loadtxt("eigenvalues1.dat")
S4 = loadtxt("eigenvectors5.dat")
E4 = loadtxt("eigenvalues5.dat")
index1 = argmin(E1)
index2 = argmin(E2)
index3 = argmin(E3)
index4 = argmin(E4)
vector1 = S1[:,index1]/sqrt(60./800.)
vector2 = S2[:,index2]/sqrt(60./800.)
vector3 = S3[:,index3]/sqrt(60./800.)
vector4 = S4[:,index4]/sqrt(60./800.)
x =linspace(0,60,len(vector1))
##############Writing to file for Latex representation
with open("vectors.dat","w") as fp:
fp.write("rho rhoN omega vec \n")
for i in range(len(vector1)):
fp.write("%f %d %f %f \n" %(x[i], 60, 0.01, vector1[i]))
for i in range(len(vector2)):
fp.write("%f %d %f %f \n" %(x[i], 60, 0.5, vector2[i]))
for i in range(len(vector3)):
fp.write("%f %d %f %f \n" %(x[i], 60, 1, vector3[i]))
for i in range(len(vector4)):
fp.write("%f %d %f %f \n" %(x[i], 60, 5, vector4[i]))
#################
#################PLOTTING
plot(x,abs(vector1/sqrt(60./800.)),label="$\\omega_r$ = 0.01")
plot(x,abs(vector2/sqrt(60./800.)),label="$\\omega_r$ = 0.5")
plot(x,abs(vector3/sqrt(60./800.)),label="$\\omega_r$ = 1")
plot(x,abs(vector4/sqrt(60./800.)),label="$\\omega_r$ = 5")
legend()
xlabel("$\\rho$")
ylabel("$\\Psi$")
title("Eigenvectors of ground state as a function of \\omega")
show()
| mit |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/tri/triplot.py | 21 | 3124 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
def triplot(ax, *args, **kwargs):
"""
Draw a unstructured triangular grid as lines and/or markers.
The triangulation to plot can be specified in one of two ways;
either::
triplot(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
triplot(x, y, ...)
triplot(x, y, triangles, ...)
triplot(x, y, triangles=triangles, ...)
triplot(x, y, mask=mask, ...)
triplot(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The remaining args and kwargs are the same as for
:meth:`~matplotlib.axes.Axes.plot`.
Return a list of 2 :class:`~matplotlib.lines.Line2D` containing
respectively:
- the lines plotted for triangles edges
- the markers plotted for triangles nodes
**Example:**
.. plot:: mpl_examples/pylab_examples/triplot_demo.py
"""
import matplotlib.axes
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
x, y, edges = (tri.x, tri.y, tri.edges)
# Decode plot format string, e.g., 'ro-'
fmt = ""
if len(args) > 0:
fmt = args[0]
linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)
# Insert plot format string into a copy of kwargs (kwargs values prevail).
kw = kwargs.copy()
for key, val in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if val is not None:
kw[key] = kwargs.get(key, val)
# Draw lines without markers.
# Note 1: If we drew markers here, most markers would be drawn more than
# once as they belong to several edges.
# Note 2: We insert nan values in the flattened edges arrays rather than
# plotting directly (triang.x[edges].T, triang.y[edges].T)
# as it considerably speeds-up code execution.
linestyle = kw['linestyle']
kw_lines = kw.copy()
kw_lines['marker'] = 'None' # No marker to draw.
kw_lines['zorder'] = kw.get('zorder', 1) # Path default zorder is used.
if (linestyle is not None) and (linestyle not in ['None', '', ' ']):
tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)
tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)
tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(),
**kw_lines)
else:
tri_lines = ax.plot([], [], **kw_lines)
# Draw markers separately.
marker = kw['marker']
kw_markers = kw.copy()
kw_markers['linestyle'] = 'None' # No line to draw.
if (marker is not None) and (marker not in ['None', '', ' ']):
tri_markers = ax.plot(x, y, **kw_markers)
else:
tri_markers = ax.plot([], [], **kw_markers)
return tri_lines + tri_markers
| bsd-3-clause |
weinbe58/QuSpin | examples/scripts/example9.py | 3 | 5112 | from __future__ import print_function, division
import sys,os
# line 4 and line 5 below are for development purposes and can be removed
qspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,qspin_path)
#####################################################################
# example 9 #
# In this script we demonstrate how to use QuSpin's #
# general basis class to construct user-defined symmetry sectors. #
# We study thermalisation in the 2D transverse-field Ising model #
# with periodic boundary conditions. #
#####################################################################
from quspin.operators import hamiltonian, exp_op # operators
from quspin.basis import spin_basis_1d, spin_basis_general # spin basis constructor
from quspin.tools.measurements import obs_vs_time # calculating dynamics
from quspin.tools.Floquet import Floquet_t_vec # period-spaced time vector
import numpy as np # general math functions
import matplotlib.pyplot as plt # plotting library
#
###### define model parameters ######
L_1d = 16 # length of chain for spin 1/2
Lx, Ly = 4, 4 # linear dimension of spin 1 2d lattice
N_2d = Lx*Ly # number of sites for spin 1
Omega = 2.0 # drive frequency
A = 2.0 # drive amplitude
#
###### setting up user-defined symmetry transformations for 2d lattice ######
s = np.arange(N_2d) # sites [0,1,2,....]
x = s%Lx # x positions for sites
y = s//Lx # y positions for sites
T_x = (x+1)%Lx + Lx*y # translation along x-direction
T_y = x +Lx*((y+1)%Ly) # translation along y-direction
P_x = x + Lx*(Ly-y-1) # reflection about x-axis
P_y = (Lx-x-1) + Lx*y # reflection about y-axis
Z = -(s+1) # spin inversion
#
###### setting up bases ######
basis_1d = spin_basis_1d(L_1d,kblock=0,pblock=1,zblock=1) # 1d - basis
basis_2d = spin_basis_general(N_2d,kxblock=(T_x,0),kyblock=(T_y,0),
pxblock=(P_x,0),pyblock=(P_y,0),zblock=(Z,0)) # 2d - basis
# print information about the basis
print("Size of 1D H-space: {Ns:d}".format(Ns=basis_1d.Ns))
print("Size of 2D H-space: {Ns:d}".format(Ns=basis_2d.Ns))
#
###### setting up operators in hamiltonian ######
# setting up site-coupling lists
Jzz_1d=[[-1.0,i,(i+1)%L_1d] for i in range(L_1d)]
hx_1d =[[-1.0,i] for i in range(L_1d)]
#
Jzz_2d=[[-1.0,i,T_x[i]] for i in range(N_2d)]+[[-1.0,i,T_y[i]] for i in range(N_2d)]
hx_2d =[[-1.0,i] for i in range(N_2d)]
# setting up hamiltonians
# 1d
Hzz_1d=hamiltonian([["zz",Jzz_1d]],[],basis=basis_1d,dtype=np.float64)
Hx_1d =hamiltonian([["x",hx_1d]],[],basis=basis_1d,dtype=np.float64)
# 2d
Hzz_2d=hamiltonian([["zz",Jzz_2d]],[],basis=basis_2d,dtype=np.float64)
Hx_2d =hamiltonian([["x",hx_2d]],[],basis=basis_2d,dtype=np.float64)
#
###### calculate initial states ######
# calculating bandwidth for non-driven hamiltonian
[E_1d_min],psi_1d = Hzz_1d.eigsh(k=1,which="SA")
[E_2d_min],psi_2d = Hzz_2d.eigsh(k=1,which="SA")
# setting up initial states
psi0_1d = psi_1d.ravel()
psi0_2d = psi_2d.ravel()
#
###### time evolution ######
# stroboscopic time vector
nT = 200 # number of periods to evolve to
t=Floquet_t_vec(Omega,nT,len_T=1) # t.vals=t, t.i=initial time, t.T=drive period
# creating generators of time evolution using exp_op class
U1_1d = exp_op(Hzz_1d+A*Hx_1d,a=-1j*t.T/4)
U2_1d = exp_op(Hzz_1d-A*Hx_1d,a=-1j*t.T/2)
U1_2d = exp_op(Hzz_2d+A*Hx_2d,a=-1j*t.T/4)
U2_2d = exp_op(Hzz_2d-A*Hx_2d,a=-1j*t.T/2)
# user-defined generator for stroboscopic dynamics
def evolve_gen(psi0,nT,*U_list):
yield psi0
for i in range(nT): # loop over number of periods
for U in U_list: # loop over unitaries
psi0 = U.dot(psi0)
yield psi0
# get generator objects for time-evolved states
psi_1d_t = evolve_gen(psi0_1d,nT,U1_1d,U2_1d,U1_1d)
psi_2d_t = evolve_gen(psi0_2d,nT,U1_2d,U2_2d,U1_2d)
#
###### compute expectation values of observables ######
# measure Hzz as a function of time
Obs_1d_t = obs_vs_time(psi_1d_t,t.vals,dict(E=Hzz_1d),return_state=True)
Obs_2d_t = obs_vs_time(psi_2d_t,t.vals,dict(E=Hzz_2d),return_state=True)
# calculating the entanglement entropy density
Sent_time_1d = basis_1d.ent_entropy(Obs_1d_t["psi_t"],sub_sys_A=range(L_1d//2))["Sent_A"]
Sent_time_2d = basis_2d.ent_entropy(Obs_2d_t["psi_t"],sub_sys_A=range(N_2d//2))["Sent_A"]
# calculate entanglement entropy density
s_p_1d = np.log(2)-2.0**(-L_1d//2)/L_1d
s_p_2d = np.log(2)-2.0**(-N_2d//2)/N_2d
#
###### plotting results ######
plt.plot(t.strobo.inds,(Obs_1d_t["E"]-E_1d_min)/(-E_1d_min),marker='.',markersize=5,label="$S=1/2$")
plt.plot(t.strobo.inds,(Obs_2d_t["E"]-E_2d_min)/(-E_2d_min),marker='.',markersize=5,label="$S=1$")
plt.grid()
plt.ylabel("$Q(t)$",fontsize=20)
plt.xlabel("$t/T$",fontsize=20)
plt.savefig("TFIM_Q.pdf")
plt.figure()
plt.plot(t.strobo.inds,Sent_time_1d/s_p_1d,marker='.',markersize=5,label="$1d$")
plt.plot(t.strobo.inds,Sent_time_2d/s_p_2d,marker='.',markersize=5,label="$2d$")
plt.grid()
plt.ylabel("$s_{\mathrm{ent}}(t)/s_\mathrm{Page}$",fontsize=20)
plt.xlabel("$t/T$",fontsize=20)
plt.legend(loc=0,fontsize=16)
plt.tight_layout()
plt.savefig("TFIM_S.pdf")
#plt.show()
plt.close() | bsd-3-clause |
milankl/swm | calc/misc/spec_ens _calc.py | 1 | 3245 | ## COMPUTE ENSTROPHY SPECTRUM
from __future__ import print_function
path = '/home/mkloewer/python/swm/'
import os; os.chdir(path) # change working directory
import numpy as np
from scipy import sparse
import time as tictoc
from netCDF4 import Dataset
import glob
import matplotlib.pyplot as plt
exec(open(path+'swm_param.py').read())
exec(open(path+'swm_operators.py').read())
exec(open(path+'swm_rhs.py').read())
exec(open(path+'swm_integration.py').read())
exec(open(path+'swm_output.py').read())
# OPTIONS
runfolder = [2,3]
print('Calculating EKE spectrogramms from run ' + str(runfolder))
##
def ens_spec_avg(z,dx,dy):
""" Computes a wavenumber-frequency plot for 3D (t,x,y) data via radial (k = sqrt(kx**2 + ky**2)) integration. TODO: correct normalisation, so that the integral in normal space corresponds to the integral in Fourier space.
"""
nt,ny,nx = np.shape(z)
kx = (1/(dx))*np.hstack((np.arange(0,(nx+1)/2.),np.arange(-nx/2.+1,0)))/float(nx)
ky = (1/(dy))*np.hstack((np.arange(0,(ny+1)/2.),np.arange(-ny/2.+1,0)))/float(ny)
kxx,kyy = np.meshgrid(kx,ky)
# radial distance from kx,ky = 0
kk = np.sqrt(kxx**2 + kyy**2)
if nx >= ny: #kill negative wavenumbers
k = kx[:int(nx/2)+1]
else:
k = ky[:int(ny/2)+1]
dk = k[1] - k[0]
# create radial coordinates, associated with k[i]
# nearest point interpolation to get points within the -.5,.5 annulus
rcoords = []
for i in range(len(k)):
rcoords.append(np.where((kk>(k[i]-.5*dk))*(kk<=(k[i]+.5*dk))))
# 2D FFT average
pz = np.empty((nt,ny,nx))
for i in range(nt):
pz[i,:,:] = abs(np.fft.fft2(z[i,:,:]))**2
if i % 100 == 0:
print(i)
pz_avg = .5*pz.mean(axis=0)
# mulitply by dk to have the corresponding integral
ens_spec = np.zeros(len(k))
for i in range(len(k)):
ens_spec[i] = np.sum(pz_avg[rcoords[i][0],rcoords[i][1]])*dk
return k[1:],ens_spec[1:] # eliminate zero wavenumber
## read data
for r,i in zip(runfolder,range(len(runfolder))):
runpath = path+'data/run%04i' % r
if i == 0:
u = np.load(runpath+'/u_sub.npy')
v = np.load(runpath+'/v_sub.npy')
#h = np.load(runpath+'/h_sub.npy')
time = np.load(runpath+'/t_sub.npy')
print('run %i read.' % r)
else:
u = np.concatenate((u,np.load(runpath+'/u_sub.npy')))
v = np.concatenate((v,np.load(runpath+'/v_sub.npy')))
#h = np.concatenate((h,np.load(runpath+'/h_sub.npy')))
time = np.hstack((time,np.load(runpath+'/t_sub.npy')))
print('run %i read.' % r)
t = time / 3600. / 24. # in days
tlen = len(time)
dt = time[1] - time[0]
## read param
global param
param = np.load(runpath+'/param.npy').all()
param['output'] = 0
set_grad_mat()
set_interp_mat()
#reshape u,v
u = u.reshape((tlen,param['Nu'])).T
v = v.reshape((tlen,param['Nv'])).T
z = (Gvx.dot(v) - Guy.dot(u)).T.reshape((tlen,param['ny']+1,param['nx']+1))
del u,v
##
k,p = ens_spec_avg(z,param['dx'],param['dy'])
##
dic = dict()
all_var2export = ['k','p']
for v in all_var2export:
exec('dic[v] ='+v)
np.save(runpath+'/analysis/spec_ens.npy',dic)
print('Everything stored.')
| gpl-3.0 |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/matplotlib/testing/compare.py | 11 | 12935 | """
Provides a collection of utilities for comparing (image) results.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import hashlib
import os
import shutil
import numpy as np
import matplotlib
from matplotlib.compat import subprocess
from matplotlib.testing.noseclasses import ImageComparisonFailure
from matplotlib import _png
from matplotlib import _get_cachedir
from matplotlib import cbook
from distutils import version
__all__ = ['compare_float', 'compare_images', 'comparable_formats']
def make_test_filename(fname, purpose):
"""
Make a new filename by inserting `purpose` before the file's
extension.
"""
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
def compare_float(expected, actual, relTol=None, absTol=None):
"""
Fail if the floating point values are not close enough, with
the given message.
You can specify a relative tolerance, absolute tolerance, or both.
"""
if relTol is None and absTol is None:
raise ValueError("You haven't specified a 'relTol' relative "
"tolerance or a 'absTol' absolute tolerance "
"function argument. You must specify one.")
msg = ""
if absTol is not None:
absDiff = abs(expected - actual)
if absTol < absDiff:
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Abs diff: {absDiff}',
'Abs tol: {absTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
if relTol is not None:
# The relative difference of the two values. If the expected value is
# zero, then return the absolute value of the difference.
relDiff = abs(expected - actual)
if expected:
relDiff = relDiff / abs(expected)
if relTol < relDiff:
# The relative difference is a ratio, so it's always unit-less.
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Rel diff: {relDiff}',
'Rel tol: {relTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
return msg or None
def get_cache_dir():
cachedir = _get_cachedir()
if cachedir is None:
raise RuntimeError('Could not find a suitable configuration directory')
cache_dir = os.path.join(cachedir, 'test_cache')
if not os.path.exists(cache_dir):
try:
cbook.mkdirs(cache_dir)
except IOError:
return None
if not os.access(cache_dir, os.W_OK):
return None
return cache_dir
def get_file_hash(path, block_size=2 ** 20):
md5 = hashlib.md5()
with open(path, 'rb') as fd:
while True:
data = fd.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def make_external_conversion_command(cmd):
def convert(old, new):
cmdline = cmd(old, new)
pipe = subprocess.Popen(
cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if not os.path.exists(new) or errcode:
msg = "Conversion command failed:\n%s\n" % ' '.join(cmdline)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
return convert
def _update_converter():
gs, gs_v = matplotlib.checkdep_ghostscript()
if gs_v is not None:
cmd = lambda old, new: \
[gs, '-q', '-sDEVICE=png16m', '-dNOPAUSE', '-dBATCH',
'-sOutputFile=' + new, old]
converter['pdf'] = make_external_conversion_command(cmd)
converter['eps'] = make_external_conversion_command(cmd)
if matplotlib.checkdep_inkscape() is not None:
cmd = lambda old, new: \
['inkscape', '-z', old, '--export-png', new]
converter['svg'] = make_external_conversion_command(cmd)
#: A dictionary that maps filename extensions to functions which
#: themselves map arguments `old` and `new` (filenames) to a list of strings.
#: The list can then be passed to Popen to convert files with that
#: extension to png format.
converter = {}
_update_converter()
def comparable_formats():
"""
Returns the list of file formats that compare_images can compare
on this system.
"""
return ['png'] + list(six.iterkeys(converter))
def convert(filename, cache):
"""
Convert the named file into a png file. Returns the name of the
created file.
If *cache* is True, the result of the conversion is cached in
`matplotlib._get_cachedir() + '/test_cache/'`. The caching is based
on a hash of the exact contents of the input file. The is no limit
on the size of the cache, so it may need to be manually cleared
periodically.
"""
base, extension = filename.rsplit('.', 1)
if extension not in converter:
raise ImageComparisonFailure(
"Don't know how to convert %s files to png" % extension)
newname = base + '_' + extension + '.png'
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
# Only convert the file if the destination doesn't already exist or
# is out of date.
if (not os.path.exists(newname) or
os.stat(newname).st_mtime < os.stat(filename).st_mtime):
if cache:
cache_dir = get_cache_dir()
else:
cache_dir = None
if cache_dir is not None:
hash_value = get_file_hash(filename)
new_ext = os.path.splitext(newname)[1]
cached_file = os.path.join(cache_dir, hash_value + new_ext)
if os.path.exists(cached_file):
shutil.copyfile(cached_file, newname)
return newname
converter[extension](filename, newname)
if cache_dir is not None:
shutil.copyfile(newname, cached_file)
return newname
#: Maps file extensions to a function which takes a filename as its
#: only argument to return a list suitable for execution with Popen.
#: The purpose of this is so that the result file (with the given
#: extension) can be verified with tools such as xmllint for svg.
verifiers = {}
# Turning this off, because it seems to cause multiprocessing issues
if matplotlib.checkdep_xmllint() and False:
verifiers['svg'] = lambda filename: [
'xmllint', '--valid', '--nowarning', '--noout', filename]
def verify(filename):
"""Verify the file through some sort of verification tool."""
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
base, extension = filename.rsplit('.', 1)
verifier = verifiers.get(extension, None)
if verifier is not None:
cmd = verifier(filename)
pipe = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if errcode != 0:
msg = "File verification command failed:\n%s\n" % ' '.join(cmd)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
def crop_to_same(actual_path, actual_image, expected_path, expected_image):
# clip the images to the same size -- this is useful only when
# comparing eps to pdf
if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
aw, ah = actual_image.shape
ew, eh = expected_image.shape
actual_image = actual_image[int(aw / 2 - ew / 2):int(
aw / 2 + ew / 2), int(ah / 2 - eh / 2):int(ah / 2 + eh / 2)]
return actual_image, expected_image
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
num_values = np.prod(expectedImage.shape)
abs_diff_image = abs(expectedImage - actualImage)
# On Numpy 1.6, we can use bincount with minlength, which is much
# faster than using histogram
expected_version = version.LooseVersion("1.6")
found_version = version.LooseVersion(np.__version__)
if found_version >= expected_version:
histogram = np.bincount(abs_diff_image.ravel(), minlength=256)
else:
histogram = np.histogram(abs_diff_image, bins=np.arange(257))[0]
sum_of_squares = np.sum(histogram * np.arange(len(histogram)) ** 2)
rms = np.sqrt(float(sum_of_squares) / num_values)
return rms
def compare_images(expected, actual, tol, in_decorator=False):
"""
Compare two "image" files checking differences within a tolerance.
The two given filenames may point to files which are convertible to
PNG via the `.converter` dictionary. The underlying RMS is calculated
with the `.calculate_rms` function.
Parameters
----------
expected : str
The filename of the expected image.
actual :str
The filename of the actual image.
tol : float
The tolerance (a color value difference, where 255 is the
maximal difference). The test fails if the average pixel
difference is greater than this value.
in_decorator : bool
If called from image_comparison decorator, this should be
True. (default=False)
Example
-------
img1 = "./baseline/plot.png"
img2 = "./output/plot.png"
compare_images( img1, img2, 0.001 ):
"""
if not os.path.exists(actual):
msg = "Output image %s does not exist." % actual
raise Exception(msg)
if os.stat(actual).st_size == 0:
msg = "Output image file %s is empty." % actual
raise Exception(msg)
verify(actual)
# Convert the image to png
extension = expected.split('.')[-1]
if not os.path.exists(expected):
raise IOError('Baseline image %r does not exist.' % expected)
if extension != 'png':
actual = convert(actual, False)
expected = convert(expected, True)
# open the image files and remove the alpha channel (if it exists)
expectedImage = _png.read_png_int(expected)
actualImage = _png.read_png_int(actual)
expectedImage = expectedImage[:, :, :3]
actualImage = actualImage[:, :, :3]
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
# convert to signed integers, so that the images can be subtracted without
# overflow
expectedImage = expectedImage.astype(np.int16)
actualImage = actualImage.astype(np.int16)
rms = calculate_rms(expectedImage, actualImage)
diff_image = make_test_filename(actual, 'failed-diff')
if rms <= tol:
if os.path.exists(diff_image):
os.unlink(diff_image)
return None
save_diff_image(expected, actual, diff_image)
results = dict(rms=rms, expected=str(expected),
actual=str(actual), diff=str(diff_image), tol=tol)
if not in_decorator:
# Then the results should be a string suitable for stdout.
template = ['Error: Image files did not match.',
'RMS Value: {rms}',
'Expected: \n {expected}',
'Actual: \n {actual}',
'Difference:\n {diff}',
'Tolerance: \n {tol}', ]
results = '\n '.join([line.format(**results) for line in template])
return results
def save_diff_image(expected, actual, output):
expectedImage = _png.read_png(expected)
actualImage = _png.read_png(actual)
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
expectedImage = np.array(expectedImage).astype(np.float)
actualImage = np.array(actualImage).astype(np.float)
assert expectedImage.ndim == actualImage.ndim
assert expectedImage.shape == actualImage.shape
absDiffImage = abs(expectedImage - actualImage)
# expand differences in luminance domain
absDiffImage *= 255 * 10
save_image_np = np.clip(absDiffImage, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:, :, 0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:, :, 3] = 255
_png.write_png(save_image_np.tostring(), width, height, output)
| gpl-2.0 |
aselle/tensorflow | tensorflow/python/estimator/canned/baseline_test.py | 5 | 55313 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for baseline.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator.canned import baseline
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
BIAS_NAME = 'baseline/bias'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return check_ops.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [variables.global_variables_initializer()]
with tf_session.Session() as sess:
sess.run(init_all_op)
saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
def _baseline_regressor_fn(*args, **kwargs):
return baseline.BaselineRegressor(*args, **kwargs)
def _baseline_classifier_fn(*args, **kwargs):
return baseline.BaselineClassifier(*args, **kwargs)
# Tests for Baseline Regressor.
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaselineRegressorEvaluationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(
input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1)
# Logit is bias = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
baseline_regressor = _baseline_regressor_fn(
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
label_dim = 2
with ops.Graph().as_default():
variables.Variable([46.0, 58.0], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = baseline_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, ops.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is bias which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaselineRegressorPredictTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with ops.Graph().as_default():
variables.Variable([.2], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = baseline_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
with ops.Graph().as_default():
variables.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = baseline_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = bias, shape=[batch_size, label_dimension]
self.assertAllClose([[0.2, 0.4, 0.6], [0.2, 0.4, 0.6]],
predicted_scores)
class BaselineRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaselineRegressorTrainingTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
label_dimension,
expected_global_step,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(self._model_dir,
ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([label_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
checkpoint_utils.load_variable(self._model_dir,
BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create BaselineRegressor.
label = 5.
age = 17
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(label_dimension=1, expected_global_step=num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
est = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(label_dimension=1, expected_global_step=200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
est = _baseline_regressor_fn(
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1,
'w': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(label_dimension=1, expected_global_step=200)
def testFromScratch(self):
# Create BaselineRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=num_steps,
expected_bias=[0.])
def testFromCheckpoint(self):
# Create initial checkpoint.
bias = 7.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = 6.
# loss = (logits - label)^2 = (7 - 5)^2 = 4
mock_optimizer = self._mock_optimizer(expected_loss=4.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=[bias])
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias
# logits[0] = 5.
# logits[1] = 5.
# loss = sum(logits - label)^2 = (5 - 5)^2 + (5 - 3)^2 = 4
mock_optimizer = self._mock_optimizer(expected_loss=4.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
# Tests for Baseline Classifier.
class BaselineClassifierTrainingTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return distribute_lib.increment_var(global_step)
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
return distribute_lib.increment_var(global_step)
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(
self, n_classes, expected_global_step, expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape for (name, shape) in
checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
checkpoint_utils.load_variable(
self._model_dir, ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
checkpoint_utils.load_variable(
self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_2}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formula
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=-1 * math.log(1.0/n_classes))
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = bias = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = bias and label = 1
# so, loss = 1 * -log ( softmax(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
bias = [-1.0]
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = bias
# logits[0] = -1.
# logits[1] = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(-1) ) = 0.3132
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = bias and label = [1, 0]
# so, loss = 1 * -log ( softmax(logits)[label] )
if n_classes == 2:
expected_loss = (1.3133 + 0.3132)
else:
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': (age)}, (label)),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaselineClassifierEvaluationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = -log(sigmoid(-1)) = 1.3133
# Prediction = sigmoid(-1) = 0.2689
expected_metrics = {
metric_keys.MetricKeys.LOSS: 1.3133,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 1.3133,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( softmax(logits)[label] )
logits = bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., -1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
# Prediction = sigmoid(-1) = 0.2689
expected_loss = 1.3133 + 0.3132
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.5,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 0.75,
}
else:
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.5,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age), 'w': (weights)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., -1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
# weights = [1., 2.]
expected_loss = 1.3133 * 1. + 0.3132 * 2.
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, -1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE: (
max(label_mean, 1-label_mean)),
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 2. / (1. + 2.),
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaselineClassifierPredictTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = bias[0]
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [1],
'classes': [label_output_fn(1)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.array(bias)
class_ids = onedim_logits.argmax()
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'classes': [label_output_fn(class_ids)],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllClose(sorted_key_dict(expected_predictions),
sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredictions(n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
class BaselineClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=x)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
# Tests for Baseline logit_fn.
class BaselineLogitFnTest(test.TestCase):
def test_basic_logit_correctness(self):
"""baseline_logit_fn simply returns the bias variable."""
with ops.Graph().as_default():
logit_fn = baseline._baseline_logit_fn_builder(num_outputs=2)
logits = logit_fn(features={'age': [[23.], [31.]]})
with variable_scope.variable_scope('baseline', reuse=True):
bias_var = variable_scope.get_variable('bias')
with tf_session.Session() as sess:
sess.run([variables.global_variables_initializer()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var.assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
if __name__ == '__main__':
test.main()
| apache-2.0 |
rhattersley/iris | lib/iris/tests/unit/plot/test_contour.py | 11 | 2995 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.contour` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.contour(self.cube, coords=('bar', 'str_coord'))
self.assertPointsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.contour(self.cube, coords=('str_coord', 'bar'))
self.assertPointsTickLabels('xaxis')
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
iplt.contour(self.cube, axes=ax, coords=('bar', 'str_coord'))
plt.close(fig)
self.assertPointsTickLabels('yaxis', ax)
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
iplt.contour(self.cube, axes=ax, coords=('str_coord', 'bar'))
plt.close(fig)
self.assertPointsTickLabels('xaxis', ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.contour, self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord('foo').points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord('bar').points
self.bar_index = np.arange(self.bar.size)
self.data = self.cube.data
self.dataT = self.data.T
self.mpl_patch = self.patch('matplotlib.pyplot.contour')
self.draw_func = iplt.contour
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
JohanComparat/pySU | galaxy/bin_LF/inDev/LF_3d_plot.py | 1 | 3537 | import numpy as n
import matplotlib
matplotlib.rcParams['font.size']=14
import matplotlib.pyplot as p
import glob
import sys
from scipy.optimize import curve_fit
import cPickle
import os
from os.path import join
data_dir = os.environ['DATA_DIR']
Pdir = join(data_dir,"Products_Galaxies", "emissionLineLuminosityFunctions")
# "/home/comparat/database/Products_Galaxies/emissionLineLuminosityFunctions/" # on eboss
lines = "H1_4862", "O3_5007", "O2_3728"
###################### O2 3728 #######################
###################### O2 3728 #######################
###################### O2 3728 #######################
line = lines[2]
files = n.array(glob.glob(join(Pdir,line, "*.points")))
files.sort()
print files, line
logls = lambda z : -0.182 * z + 41.272
ps = lambda z: 0.741 * z - 2.78
a = -1.8
sig = 0.53
saundersFct=lambda logl, z : 10**ps(z) * (10**logl/10**logls(z))**(a+1) * n.e**( -n.log10( 1 +10**logl/10**logls(z))**2./(2*sig**2.))
loglsUP = lambda z : -0.182 * z + 41.421
psUP = lambda z: 0.741 * z - 3.026
saundersFctUp=lambda logl, z : 10**psUP(z) * (10**logl/10**loglsUP(z))**(a+1) * n.e**( -n.log10( 1 +10**logl/10**loglsUP(z))**2./(2*sig**2.))
loglsLOW = lambda z : -0.182 * z + 41.123
psLOW = lambda z: 0.741 * z - 2.534
saundersFctLow=lambda logl, z : 10**psLOW(z) * (10**logl/10**loglsLOW(z))**(a+1) * n.e**( -n.log10( 1 +10**logl/10**loglsLOW(z))**2./(2*sig**2.))
#log_{10}(L_*) & =( -0.182 \pm 0.21 ) z + ( 41.272 \pm 0.149 ) \\
#log_{10}(\Phi_*) & =( 0.741 \pm 0.347 ) z + ( -2.78 \pm 0.246 ) \\
#\alpha &= -1.8 \pm 1.1 \\% ({\rm fixed}) \\
#\sigma &= 0.53 \; ({\rm fixed}) \\
logl, phi, phierr, z = [], [], [] ,[]
for ii in range(len(files)):
x, y, ye = n.loadtxt(files[ii],unpack=True)
logl.append(n.log10(x))
phi.append(y)
phierr.append(ye)
zz = float(files[ii].split('_')[-1].split('.')[-2][1:])/1000.
if zz>0.5 :
z.append( n.ones_like(ye)*zz )
if zz<0.5 :
z.append( n.ones_like(ye)*1.2 )
logl = n.hstack((logl))
phi = n.hstack((phi))
phierr = n.hstack((phierr))
z = n.hstack(( z ))
ys = n.arange(z.min()-2*0.05,z.max()+2*0.05,0.05)
xs = n.arange(logl.min()-2*0.1, logl.max()+2*0.1,0.1)
X,Y = n.meshgrid(xs,ys)
Z = saundersFct(X,Y)
Z_up = saundersFctUp(X,Y)
Z_low = saundersFctLow(X,Y)
fig = p.figure(1,(9,9))
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, Y, n.log10(Z))#, rstride=10, cstride=10)
ax.plot_wireframe(X, Y, n.log10(Z_up))#, rstride=10, cstride=10)
ax.plot_wireframe(X, Y, n.log10(Z_low))#, rstride=10, cstride=10)
ok = (phi>phierr)
sc1 = ax.scatter(logl[ok],z[ok],n.log10(phi[ok]), s=n.ones_like(z[ok])*5, c='r', marker='o')
sc1.set_edgecolor('face')
#sc1 = ax.errorbar(logl[ok],z[ok],n.log10(phi[ok]), yerr=phierr[ok]/phi[ok])
ax.legend()
ax.set_xlabel(r'log ($L[O_{II}]/$ [erg s$^{-1}$])')
ax.set_ylabel('redshift')
ax.set_ylim((0.5,1.25))
ax.set_zlabel(r'$\Phi$ [Mpc$^{-3}/$dlog L]')
#ax.set_yscale('log')
#ax.set_zscale('log')
p.savefig(join(Pdir ,line, "LF-evolution.pdf"))
p.show()
fig = p.figure(1,(9,9))
ax = fig.add_subplot(111, projection='3d')
sc1 = ax.scatter(logl[ok],z[ok],phi[ok]/saundersFct(logl[ok],z[ok]), s=n.ones_like(z[ok])*3, c='r', marker='o', rasterized=True)
sc1.set_edgecolor('face')
ax.legend()
ax.set_xlabel(r'log ($L[O_{II}]/$ [erg s$^{-1}$])')
ax.set_ylabel('redshift')
ax.set_ylim((0.5,1.25))
ax.set_zlim((0,2))
ax.set_zlabel(r'data / model')
#ax.set_yscale('log')
#ax.set_zscale('log')
p.savefig(join(Pdir ,line, "LF-evolution-data-model-ratio.pdf"))
p.show()
sys.exit()
| cc0-1.0 |
khalidm/hiplexpipe | scripts/alignment_stats.py | 1 | 9100 | import argparse
import os
import time
import logging as log
import pandas as pd
import yaml
import subprocess
#import matplotlib
from os import listdir
from os.path import isfile, join
#matplotlib.use('Agg') # non-interactive backend
#import matplotlib.pyplot as plt
#import seaborn as sns
import numpy as np
from string import Template
from subprocess import Popen, PIPE
import pysam
import pybedtools
from scipy import stats
def getColumn(col_num, data):
column = []
for row in data:
row_val = int(row[col_num])
column.append(row_val)
return column
def calculate_median_depth(c):
column = getColumn(5 , c)
return np.median(np.array(column))
def calculate_average_depth(c):
count = 0;
total = len(c);
for row in c:
count = count + int(row[5])
average = float(count*1.0/total*1.0)
return average
def calculate_zero_depth_intervals(c, depth):
count = 0;
total = len(c);
for row in c:
if(int(row[5]) >= 0 and int(row[5]) <= depth):
count = count + 1
percent = (count*1.0/total)*100.0
#return the percentage
return percent
def calculate_x_depth_intervals(c, depth):
count = 0;
total = len(c);
for row in c:
if(int(row[5]) >= depth):
count = count + 1
percent = (count*1.0/total)*100.0
#return the percentage
return percent
def calculate_x_depth_intervals_folds(c, start_depth, end_depth):
count = 0;
total = len(c);
for row in c:
if(int(row[5]) >= start_depth and int(row[5]) <= end_depth):
count = count + 1
percent = (count*1.0/total)*100.0
#return the percentage
return percent
def main():
"""The main function"""
current_dir = os.getcwd()
#mypath = current_dir + "/bams/"
mypath = current_dir + "/alignments/"
#bed_file = "/vlsci/UOM0040/shared/djp/rover_file/crc/CRC_10g_23May16.final.rover.bed"
fastq_dir = current_dir + "/fastqs/"
config_file = "pipeline.config"
with open(config_file, 'r') as stream:
try:
bed_file = yaml.load(stream)['target_bed']
except yaml.YAMLError as exc:
print("Error with config file: " + exc)
onlyfiles = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".primary.primerclipped.bam"):
current_file = mypath + str(file)
onlyfiles.append(os.path.join(root, file))
#onlyfiles = [files for files in listdir(mypath) if (isfile(join(mypath, files)) and (files.endswith('.primary.primerclipped.bam')))]
#file_paths = [join(mypath,files) for files in listdir(mypath) if (isfile(join(mypath, files)) and (files.endswith('.bam')))]
#onlyfiles = [files for files in listdir(mypath) if (files.endswith(''))]
#print onlyfiles
#print len(onlyfiles)
# stats list
header = '\t'.join([ 'Sample_ID', 'Total_fastq_reads', 'Primary_reads', 'Reads_mapping_to_genome' , 'Reads_mapping_to_target', 'Percent_reads_mapping_to_genome', 'Percent_reads_mapping_to_target', 'Average_depth', \
'Percent_target_not_covered', 'Percent_target_covered_at_<10X', 'Percent_target_covered_at_10X', 'Percent_target_covered_at_20X', 'Percent_target_covered_at_50X', 'Median_depth', \
'Percent_target_covered_at_median', \
'Percent_target_covered_at_median_10_fold', 'Percent_target_covered_at_median_20_fold', 'Percent_target_covered_at_median_30_fold', \
'Percent_target_covered_at_median_40_fold', 'Percent_target_covered_at_median_50_fold'])
#, 'Percent_target_covered_at_q50', \
#'Percent_target_covered_at_q60', 'Percent_target_covered_at_q70', 'Percent_target_covered_at_q80'])
#header = "Sample\tTotal_reads\tMapped_reads"
print header
for bam_file in onlyfiles:
current_bam_file = join(mypath, bam_file)
temp_bam_file = os.path.basename(current_bam_file)
sample = temp_bam_file.replace(".primary.primerclipped.bam", "")
fastq1 = fastq_dir + sample + "_L01_R1_001.fastq"
fastq2 = fastq_dir + sample + "_L01_R2_001.fastq"
fastq1_lc = int(subprocess.check_output(["wc", "-l", fastq1]).lstrip(' ').split(' ')[0])
fastq2_lc = int(subprocess.check_output(["wc", "-l", fastq2]).lstrip(' ').split(' ')[0])
total_fastq_lines = fastq1_lc + fastq2_lc
total_fastq_reads = total_fastq_lines / 4
flagstats = pysam.flagstat(current_bam_file)
all_reads = int(flagstats.split('\n')[0].split('+')[0])
reads_mapping_to_genome = int(flagstats.split('\n')[5].split('+')[0])
x = pybedtools.example_bedtool(current_bam_file)
b = pybedtools.example_bedtool(bed_file)
y = x.intersect(b).moveto(join(mypath, 'temp.bam'))
c = b.coverage(x)
average_depth = calculate_average_depth(c)
median_depth = calculate_median_depth(c)
percent_target_not_covered = calculate_zero_depth_intervals(c, 0)
percent_target_covered_at_L10X = calculate_zero_depth_intervals(c, 10)
percent_target_covered_at_10X = calculate_x_depth_intervals(c, 10)
percent_target_covered_at_20X = calculate_x_depth_intervals(c, 20)
percent_target_covered_at_50X = calculate_x_depth_intervals(c, 50)
percent_target_covered_at_median = calculate_x_depth_intervals_folds(c, median_depth, median_depth)
# Using percentage from median
#percent_target_covered_at_median_X10 = calculate_x_depth_intervals_folds(c, (median_depth - median_depth * (10.0/100)), (median_depth + median_depth * (10.0/100)))
'''
percent_target_covered_at_median_10_fold = calculate_x_depth_intervals_folds(c, (median_depth - (median_depth * 0.10)), (median_depth + (median_depth * 0.10)))
percent_target_covered_at_median_20_fold = calculate_x_depth_intervals_folds(c, (median_depth - (median_depth * 0.20)), (median_depth + (median_depth * 0.20)))
percent_target_covered_at_median_50_fold = calculate_x_depth_intervals_folds(c, (median_depth - (median_depth * 0.50)), (median_depth + (median_depth * 0.50)))
percent_target_covered_at_median_60_fold = calculate_x_depth_intervals_folds(c, (median_depth - (median_depth * 0.60)), (median_depth + (median_depth * 0.60)))
percent_target_covered_at_median_70_fold = calculate_x_depth_intervals_folds(c, (median_depth - (median_depth * 0.70)), (median_depth + (median_depth * 0.70)))
percent_target_covered_at_median_80_fold = calculate_x_depth_intervals_folds(c, (median_depth - (median_depth * 0.80)), (median_depth + (median_depth * 0.80)))
'''
percent_target_covered_at_median_10_fold = calculate_x_depth_intervals(c, (median_depth / (10)))
percent_target_covered_at_median_20_fold = calculate_x_depth_intervals(c, (median_depth / (20)))
percent_target_covered_at_median_30_fold = calculate_x_depth_intervals(c, (median_depth / (30)))
percent_target_covered_at_median_40_fold = calculate_x_depth_intervals(c, (median_depth / (40)))
percent_target_covered_at_median_50_fold = calculate_x_depth_intervals(c, (median_depth / (50)))
stats_temp = pysam.flagstat(join(mypath,'temp.bam'))
on_target_reads = int(stats_temp.split('\n')[0].split('+')[0])
reads_mapping_to_target = int(stats_temp.split('\n')[5].split('+')[0])
#percent_reads_mapping_to_genome = ((reads_mapping_to_genome * 1.0)/all_reads)*100.0
percent_reads_mapping_to_genome = ((reads_mapping_to_genome * 1.0)/total_fastq_reads)*100.0
#percent_reads_mapping_to_target = ((reads_mapping_to_target * 1.0)/on_target_reads)*100.0
percent_reads_mapping_to_target = ((reads_mapping_to_target * 1.0)/total_fastq_reads)*100.0
os.remove(join(mypath,'temp.bam'))
print("%s\t%d\t%d\t%d\t%d\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f" % (sample, total_fastq_reads, all_reads, reads_mapping_to_genome, reads_mapping_to_target, \
percent_reads_mapping_to_genome, percent_reads_mapping_to_target, average_depth, \
percent_target_not_covered, percent_target_covered_at_L10X, percent_target_covered_at_10X, percent_target_covered_at_20X, percent_target_covered_at_50X, \
median_depth, percent_target_covered_at_median, \
percent_target_covered_at_median_10_fold, percent_target_covered_at_median_20_fold, percent_target_covered_at_median_30_fold, \
percent_target_covered_at_median_40_fold, percent_target_covered_at_median_50_fold))
#print bam_file + "\t" + str(all_reads) + "\t" + str(reads_mapping_to_genome) + "\t" + str(reads_mapping_to_target) + "\t" + \
# str(percent_reads_mapping_to_genome) + "\t" + str(percent_reads_mapping_to_target) + "\t" + str(average_depth) + "\t" + \
# str(percent_target_not_covered) + "\t" + str(percent_target_covered_at_10X) + "\t" + str(percent_target_covered_at_20X) + "\t" + str(percent_target_covered_at_50X)
if __name__ == '__main__':
main()
| mit |
WarrenWeckesser/numpy | benchmarks/benchmarks/bench_io.py | 17 | 7644 | from .common import Benchmark, get_squares
import numpy as np
from io import StringIO
class Copy(Benchmark):
params = ["int8", "int16", "float32", "float64",
"complex64", "complex128"]
param_names = ['type']
def setup(self, typename):
dtype = np.dtype(typename)
self.d = np.arange((50 * 500), dtype=dtype).reshape((500, 50))
self.e = np.arange((50 * 500), dtype=dtype).reshape((50, 500))
self.e_d = self.e.reshape(self.d.shape)
self.dflat = np.arange((50 * 500), dtype=dtype)
def time_memcpy(self, typename):
self.d[...] = self.e_d
def time_memcpy_large_out_of_place(self, typename):
l = np.ones(1024**2, dtype=np.dtype(typename))
l.copy()
def time_cont_assign(self, typename):
self.d[...] = 1
def time_strided_copy(self, typename):
self.d[...] = self.e.T
def time_strided_assign(self, typename):
self.dflat[::2] = 2
class CopyTo(Benchmark):
def setup(self):
self.d = np.ones(50000)
self.e = self.d.copy()
self.m = (self.d == 1)
self.im = (~ self.m)
self.m8 = self.m.copy()
self.m8[::8] = (~ self.m[::8])
self.im8 = (~ self.m8)
def time_copyto(self):
np.copyto(self.d, self.e)
def time_copyto_sparse(self):
np.copyto(self.d, self.e, where=self.m)
def time_copyto_dense(self):
np.copyto(self.d, self.e, where=self.im)
def time_copyto_8_sparse(self):
np.copyto(self.d, self.e, where=self.m8)
def time_copyto_8_dense(self):
np.copyto(self.d, self.e, where=self.im8)
class Savez(Benchmark):
def setup(self):
self.squares = get_squares()
def time_vb_savez_squares(self):
np.savez('tmp.npz', **self.squares)
class LoadtxtCSVComments(Benchmark):
# benchmarks for np.loadtxt comment handling
# when reading in CSV files
params = [10, int(1e2), int(1e4), int(1e5)]
param_names = ['num_lines']
def setup(self, num_lines):
data = [u'1,2,3 # comment'] * num_lines
# unfortunately, timeit will only run setup()
# between repeat events, but not for iterations
# within repeats, so the StringIO object
# will have to be rewinded in the benchmark proper
self.data_comments = StringIO(u'\n'.join(data))
def time_comment_loadtxt_csv(self, num_lines):
# benchmark handling of lines with comments
# when loading in from csv files
# inspired by similar benchmark in pandas
# for read_csv
# need to rewind StringIO object (unfortunately
# confounding timing result somewhat) for every
# call to timing test proper
np.loadtxt(self.data_comments,
delimiter=u',')
self.data_comments.seek(0)
class LoadtxtCSVdtypes(Benchmark):
# benchmarks for np.loadtxt operating with
# different dtypes parsed / cast from CSV files
params = (['float32', 'float64', 'int32', 'int64',
'complex128', 'str', 'object'],
[10, int(1e2), int(1e4), int(1e5)])
param_names = ['dtype', 'num_lines']
def setup(self, dtype, num_lines):
data = [u'5, 7, 888'] * num_lines
self.csv_data = StringIO(u'\n'.join(data))
def time_loadtxt_dtypes_csv(self, dtype, num_lines):
# benchmark loading arrays of various dtypes
# from csv files
# state-dependent timing benchmark requires
# rewind of StringIO object
np.loadtxt(self.csv_data,
delimiter=u',',
dtype=dtype)
self.csv_data.seek(0)
class LoadtxtCSVStructured(Benchmark):
# benchmarks for np.loadtxt operating with
# a structured data type & CSV file
def setup(self):
num_lines = 50000
data = [u"M, 21, 72, X, 155"] * num_lines
self.csv_data = StringIO(u'\n'.join(data))
def time_loadtxt_csv_struct_dtype(self):
# obligate rewind of StringIO object
# between iterations of a repeat:
np.loadtxt(self.csv_data,
delimiter=u',',
dtype=[('category_1', 'S1'),
('category_2', 'i4'),
('category_3', 'f8'),
('category_4', 'S1'),
('category_5', 'f8')])
self.csv_data.seek(0)
class LoadtxtCSVSkipRows(Benchmark):
# benchmarks for loadtxt row skipping when
# reading in csv file data; a similar benchmark
# is present in the pandas asv suite
params = [0, 500, 10000]
param_names = ['skiprows']
def setup(self, skiprows):
np.random.seed(123)
test_array = np.random.rand(100000, 3)
self.fname = 'test_array.csv'
np.savetxt(fname=self.fname,
X=test_array,
delimiter=',')
def time_skiprows_csv(self, skiprows):
np.loadtxt(self.fname,
delimiter=',',
skiprows=skiprows)
class LoadtxtReadUint64Integers(Benchmark):
# pandas has a similar CSV reading benchmark
# modified to suit np.loadtxt
params = [550, 1000, 10000]
param_names = ['size']
def setup(self, size):
arr = np.arange(size).astype('uint64') + 2**63
self.data1 = StringIO(u'\n'.join(arr.astype(str).tolist()))
arr = arr.astype(object)
arr[500] = -1
self.data2 = StringIO(u'\n'.join(arr.astype(str).tolist()))
def time_read_uint64(self, size):
# mandatory rewind of StringIO object
# between iterations of a repeat:
np.loadtxt(self.data1)
self.data1.seek(0)
def time_read_uint64_neg_values(self, size):
# mandatory rewind of StringIO object
# between iterations of a repeat:
np.loadtxt(self.data2)
self.data2.seek(0)
class LoadtxtUseColsCSV(Benchmark):
# benchmark selective column reading from CSV files
# using np.loadtxt
params = [2, [1, 3], [1, 3, 5, 7]]
param_names = ['usecols']
def setup(self, usecols):
num_lines = 5000
data = [u'0, 1, 2, 3, 4, 5, 6, 7, 8, 9'] * num_lines
self.csv_data = StringIO(u'\n'.join(data))
def time_loadtxt_usecols_csv(self, usecols):
# must rewind StringIO because of state
# dependence of file reading
np.loadtxt(self.csv_data,
delimiter=u',',
usecols=usecols)
self.csv_data.seek(0)
class LoadtxtCSVDateTime(Benchmark):
# benchmarks for np.loadtxt operating with
# datetime data in a CSV file
params = [20, 200, 2000, 20000]
param_names = ['num_lines']
def setup(self, num_lines):
# create the equivalent of a two-column CSV file
# with date strings in the first column and random
# floating point data in the second column
dates = np.arange('today', 20, dtype=np.datetime64)
np.random.seed(123)
values = np.random.rand(20)
date_line = u''
for date, value in zip(dates, values):
date_line += (str(date) + ',' + str(value) + '\n')
# expand data to specified number of lines
data = date_line * (num_lines // 20)
self.csv_data = StringIO(data)
def time_loadtxt_csv_datetime(self, num_lines):
# rewind StringIO object -- the timing iterations
# are state-dependent
X = np.loadtxt(self.csv_data,
delimiter=u',',
dtype=([('dates', 'M8[us]'),
('values', 'float64')]))
self.csv_data.seek(0)
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/semi_supervised/label_propagation.py | 39 | 16726 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semi-supervised classification algorithms. At a high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supports RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# License: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
elif callable(self.kernel):
if y is None:
return self.kernel(X, X)
else:
return self.kernel(X, y)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" or an explicit function "
" are supported at this time." % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix.
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propagation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
ominux/scikit-learn | sklearn/linear_model/ridge.py | 1 | 18199 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# License: Simplified BSD
import numpy as np
from .base import LinearModel
from ..utils.extmath import safe_sparse_dot
from ..utils import safe_asanyarray
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
def _solve(A, b, solver, tol):
# helper method for ridge_regression, A is symmetric positive
if solver == 'auto':
if hasattr(A, 'todense'):
solver = 'sparse_cg'
else:
solver = 'dense_cholesky'
if solver == 'sparse_cg':
if b.ndim < 2:
from scipy.sparse import linalg as sp_linalg
sol, error = sp_linalg.cg(A, b, tol=tol)
if error:
raise ValueError("Failed with error code %d" % error)
return sol
else:
# sparse_cg cannot handle a 2-d b.
sol = []
for j in range(b.shape[1]):
sol.append(_solve(A, b[:, j], solver="sparse_cg", tol=tol))
return np.array(sol).T
elif solver == 'dense_cholesky':
from scipy import linalg
if hasattr(A, 'todense'):
A = A.todense()
return linalg.solve(A, b, sym_pos=True, overwrite_a=True)
else:
raise NotImplementedError('Solver %s not implemented' % solver)
def ridge_regression(X, y, alpha, sample_weight=1.0, solver='auto', tol=1e-3):
"""
Solve the ridge equation by the method of normal equations.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
solver : {'auto', 'dense_cholesky', 'sparse_cg'}, optional
Solver to use in the computational routines. 'delse_cholesky'
will use the standard scipy.linalg.solve function, 'sparse_cg'
will use the a conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropiate depending on the matrix X.
tol: float
Precision of the solution.
Returns
-------
coef: array, shape = [n_features] or [n_responses, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
is_sparse = False
if hasattr(X, 'todense'): # lazy import of scipy.sparse
from scipy import sparse
is_sparse = sparse.issparse(X)
if is_sparse:
if n_features > n_samples or \
isinstance(sample_weight, np.ndarray) or \
sample_weight != 1.0:
I = sparse.lil_matrix((n_samples, n_samples))
I.setdiag(np.ones(n_samples) * alpha * sample_weight)
c = _solve(X * X.T + I, y, solver, tol)
coef = X.T * c
else:
I = sparse.lil_matrix((n_features, n_features))
I.setdiag(np.ones(n_features) * alpha)
coef = _solve(X.T * X + I, X.T * y, solver, tol)
else:
if n_features > n_samples or \
isinstance(sample_weight, np.ndarray) or \
sample_weight != 1.0:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
A = np.dot(X, X.T)
A.flat[::n_samples + 1] += alpha * sample_weight
coef = np.dot(X.T, _solve(A, y, solver, tol))
else:
# ridge
# w = inv(X^t X + alpha*Id) * X.T y
A = np.dot(X.T, X)
A.flat[::n_features + 1] += alpha
coef = _solve(A, np.dot(X.T, y), solver, tol)
return coef.T
class Ridge(LinearModel):
"""
Ridge regression.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
overwrite_X : boolean, optional
If True, X will not be copied
Default is False
tol: float
Precision of the solution.
Attributes
----------
coef_: array, shape = [n_features] or [n_responses, n_features]
Weight vector(s).
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y)
Ridge(alpha=1.0, fit_intercept=True, normalize=False, overwrite_X=False,
tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
overwrite_X=False, tol=1e-3):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.overwrite_X = overwrite_X
self.tol = tol
def fit(self, X, y, sample_weight=1.0, solver='auto'):
"""
Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
solver : {'auto', 'dense_cholesky', 'sparse_cg'}
Solver to use in the computational
routines. 'delse_cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the a
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropiate depending on the matrix X.
Returns
-------
self : returns an instance of self.
"""
X = safe_asanyarray(X, dtype=np.float)
y = np.asanyarray(y, dtype=np.float)
X, y, X_mean, y_mean, X_std = \
self._center_data(X, y, self.fit_intercept,
self.normalize, self.overwrite_X)
self.coef_ = ridge_regression(X, y, self.alpha, sample_weight,
solver, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
return self
class RidgeClassifier(Ridge):
"""Classifier using Ridge regression
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
Attributes
----------
coef_: array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
Note
----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach.
"""
def fit(self, X, y, solver='auto'):
"""
Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
solver : {'auto', 'dense_cholesky', 'sparse_cg'}
Solver to use in the computational
routines. 'delse_cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the a
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropiate depending on the matrix X.
Returns
-------
self : returns an instance of self.
"""
self.label_binarizer = LabelBinarizer()
Y = self.label_binarizer.fit_transform(y)
Ridge.fit(self, X, Y, solver=solver)
return self
def decision_function(self, X):
return Ridge.predict(self, X)
def predict(self, X):
"""
Predict target values according to the fitted model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
Y = self.decision_function(X)
return self.label_binarizer.inverse_transform(Y)
class _RidgeGCV(LinearModel):
"""
Ridge regression with built-in Generalized Cross-Validation, i.e.
efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
Reference
---------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=[0.1, 1.0, 10.0], fit_intercept=True, normalize=False,
score_func=None, loss_func=None, overwrite_X=False):
self.alphas = np.asanyarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.score_func = score_func
self.loss_func = loss_func
self.overwrite_X = overwrite_X
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
from scipy import linalg
v, Q = linalg.eigh(K)
return K, v, Q
def _errors(self, v, Q, y, alpha):
G = np.dot(np.dot(Q, np.diag(1.0 / (v + alpha))), Q.T)
c = np.dot(G, y)
G_diag = np.diag(G)
# handle case when y is 2-d
G_diag = G_diag if len(y.shape) == 1 else G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, K, v, Q, y, alpha):
n_samples = y.shape[0]
G = np.dot(np.dot(Q, np.diag(1.0 / (v + alpha))), Q.T)
c = np.dot(G, y)
KG = np.dot(K, G)
#KG = np.dot(np.dot(Q, np.diag(v / (v + alpha))), Q.T)
KG_diag = np.diag(KG)
denom = np.ones(n_samples) - KG_diag
if len(y.shape) == 2:
# handle case when y is 2-d
KG_diag = KG_diag[:, np.newaxis]
denom = denom[:, np.newaxis]
num = np.dot(KG, y) - KG_diag * y
return num / denom, c
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X = safe_asanyarray(X, dtype=np.float)
y = np.asanyarray(y, dtype=np.float)
n_samples = X.shape[0]
X, y, X_mean, y_mean, X_std = LinearModel._center_data(X, y,
self.fit_intercept, self.normalize, self.overwrite_X)
K, v, Q = self._pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
M = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
error = self.score_func is None and self.loss_func is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = self._errors(v, Q, y, sample_weight * alpha)
else:
out, c = self._values(K, v, Q, y, sample_weight * alpha)
M[:, i] = out.ravel()
C.append(c)
if error:
best = M.mean(axis=0).argmin()
else:
func = self.score_func if self.score_func else self.loss_func
out = [func(y.ravel(), M[:, i]) for i in range(len(self.alphas))]
best = np.argmax(out) if self.score_func else np.argmin(out)
self.best_alpha = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
return self
class RidgeCV(LinearModel):
"""
Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Parameters
----------
alphas: numpy array of shape [n_alpha]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
loss_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediciton (small is good)
if None is passed, the score of the estimator is maximized
score_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediciton (big is good)
if None is passed, the score of the estimator is maximized
See also
--------
Ridge
"""
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]), fit_intercept=True,
normalize=False, score_func=None, loss_func=None, cv=None):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.score_func = score_func
self.loss_func = loss_func
self.cv = cv
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
cv : cross-validation generator, optional
If None, Generalized Cross-Validationn (efficient Leave-One-Out)
will be used.
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas, self.fit_intercept,
self.score_func, self.loss_func)
estimator.fit(X, y, sample_weight=sample_weight)
self.best_alpha = estimator.best_alpha
else:
parameters = {'alpha': self.alphas}
# FIXME: sample_weight must be split into training/validation data
# too!
#fit_params = {'sample_weight' : sample_weight}
fit_params = {}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator
self.best_alpha = gs.best_estimator.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeClassifierCV(RidgeCV):
def fit(self, X, y, sample_weight=1.0, class_weight=None):
"""
Fit the ridge classifier.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
class_weight : dict, optional
Weights associated with classes in the form
{class_label : weight}. If not given, all classes are
supposed to have weight one.
sample_weight : float or numpy array of shape [n_samples]
Sample weight
Returns
-------
self : object
Returns self.
"""
if class_weight is None:
class_weight = {}
sample_weight2 = np.array([class_weight.get(k, 1.0) for k in y])
self.label_binarizer = LabelBinarizer()
Y = self.label_binarizer.fit_transform(y)
RidgeCV.fit(self, X, Y,
sample_weight=sample_weight * sample_weight2)
return self
def decision_function(self, X):
return RidgeCV.predict(self, X)
def predict(self, X):
Y = self.decision_function(X)
return self.label_binarizer.inverse_transform(Y)
| bsd-3-clause |
heli522/scikit-learn | sklearn/naive_bayes.py | 128 | 28358 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
mjasher/gac | GAC/flopy/mbase.py | 1 | 41047 | """
mbase module
This module contains the base model and base package classes from which
all of the other models and packages inherit from.
"""
from __future__ import print_function
import numpy as np
from numpy.lib.recfunctions import stack_arrays
import sys
import os
import subprocess as sp
import webbrowser as wb
import warnings
from .modflow.mfparbc import ModflowParBc as mfparbc
from flopy import utils
# Global variables
iconst = 1 # Multiplier for individual array elements in integer and real arrays read by MODFLOW's U2DREL, U1DREL and U2DINT.
iprn = -1 # Printout flag. If >= 0 then array values read are printed in listing file.
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
# test for exe in current working directory
if is_exe(program):
return program
# test for exe in path statement
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class BaseModel(object):
"""
MODFLOW based models base class
Parameters
----------
modelname : string
Name of the model. Model files will be given this name. (default is
'modflowtest'
namefile_ext : string
name file extension (default is 'nam')
exe_name : string
name of the modflow executable
model_ws : string
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
"""
def __init__(self, modelname='modflowtest', namefile_ext='nam',
exe_name='mf2k.exe', model_ws=None,
structured=True):
"""
BaseModel init
"""
self.__name = modelname
self.namefile_ext = namefile_ext
self.namefile = self.__name + '.' + self.namefile_ext
self.packagelist = []
self.heading = ''
self.exe_name = exe_name
self.external_extension = 'ref'
if model_ws is None: model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
# print '\n%s not valid, workspace-folder was changed to %s\n' % (model_ws, os.getcwd())
print('\n{0:s} not valid, workspace-folder was changed to {1:s}\n'.format(model_ws, os.getcwd()))
model_ws = os.getcwd()
self.model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ''
return
def set_exename(self, exe_name):
"""
Set the name of the executable.
Parameters
----------
exe_name : name of the executable
"""
self.exe_name = exe_name
return
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif (isinstance(p, type(pp))):
print('****Warning -- two packages of the same type: ', type(p), type(pp))
print('replacing existing Package...')
self.packagelist[i] = p
return
if self.verbose:
print('adding Package: ', p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname in pp.name:
if self.verbose:
print('removing Package: ', pp.name)
self.packagelist.pop(i)
return
raise StopIteration('Package name ' + pname + ' not found in Package list')
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive)
Returns
-------
pp : Package object
Package object of type :class:`flopy.mbase.Package`
"""
return self.get_package(item)
def build_array_name(self, num, prefix):
"""
Build array name
Parameters
----------
num : int
array number
prefix : string
array prefix
"""
return self.external_path + prefix + '_' + str(num) + '.' + self.external_extension
def assign_external(self, num, prefix):
"""
Assign external file
Parameters
----------
num : int
array number
prefix : string
array prefix
"""
fname = self.build_array_name(num, prefix)
unit = (self.next_ext_unit())
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(False)
return fname, unit
def add_external(self, fname, unit, binflag=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
self.external_fnames.pop(i)
self.external_units.pop(i)
self.external_binflag.pop(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
self.external_fnames.pop(i)
self.external_units.pop(i)
self.external_binflag.pop(i)
else:
raise Exception(' either fname or unit must be passed to remove_external()')
return
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
s = ''
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = s + ('{0:12s} {1:3d} {2:s} {3:s}\n'.format(p.name[i],
p.unit_number[i],
p.file_name[i],
p.extra[i]))
return s
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc.
Returns
-------
pp : Package object
Package object of type :class:`flopy.mbase.Package`
"""
for pp in (self.packagelist):
if (pp.name[0].upper() == name.upper()):
return pp
return None
def get_package_list(self):
"""
Get a list of all the package names.
Parameters
----------
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in (self.packagelist):
val.append(pp.name[0].upper())
return val
def change_model_ws(self, new_pth=None):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
sys.stdout.write('\ncreating model workspace...\n {}\n'.format(new_pth))
os.makedirs(new_pth)
except:
# print '\n%s not valid, workspace-folder was changed to %s\n' % (new_pth, os.getcwd())
print('\n{0:s} not valid, workspace-folder was changed to {1:s}\n'.format(new_pth, os.getcwd()))
new_pth = os.getcwd()
# --reset the model workspace
self.model_ws = new_pth
sys.stdout.write('\nchanging model workspace...\n {}\n'.format(new_pth))
# reset the paths for each package
for pp in (self.packagelist):
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
return None
def run_model(self, silent=False, pause=False, report=False):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (the default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (the default is False).
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# Check to make sure that program and namefile exist
exe = which(self.exe_name)
if exe is None:
import platform
if platform.system() in 'Windows':
if not self.exe_name.lower().endswith('.exe'):
exe = which(self.exe_name + '.exe')
if exe is None:
s = 'The program {} does not exist or is not executable.'.format(self.exe_name)
raise Exception(s)
else:
if not silent:
s = 'FloPy is using the following executable to run the model: {}'.format(exe)
print(s)
if not os.path.isfile(os.path.join(self.model_ws, self.namefile)):
s = 'The namefile for this model does not exists: {}'.format(self.namefile)
raise Exception(s)
proc = sp.Popen([self.exe_name, self.namefile],
stdout=sp.PIPE, cwd=self.model_ws)
while True:
line = proc.stdout.readline()
c = line.decode('utf-8')
if c != '':
if 'normal termination of simulation' in c.lower():
success = True
c = c.rstrip('\r\n')
if not silent:
print('{}'.format(c))
if report == True:
buff.append(c)
else:
break
if pause == True:
input('Press Enter to continue...')
return ([success, buff])
def write_input(self, SelPackList=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
# org_dir = os.getcwd()
#os.chdir(self.model_ws)
if self.verbose:
print('\nWriting packages:')
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(' Package: ', p.name[0])
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(' Package: ', p.name[0])
p.write_file()
break
if self.verbose:
print(' ')
# write name file
self.write_name_file()
#os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception('IMPLEMENTATION ERROR: writenamefile must be overloaded')
def get_name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return self.__name
def set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = value
self.namefile = self.__name + '.' + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + '.' + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
name = property(get_name, set_name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (mflist)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return. (default is zero)
key : str
mflist dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
# valid keyword arguments
if 'kper' in kwargs:
kper = int(kwargs.pop('kper'))
else:
kper = 0
if 'mflay' in kwargs:
mflay = kwargs.pop('mflay')
else:
mflay = None
if 'filename_base' in kwargs:
fileb = kwargs.pop('filename_base')
else:
fileb = None
if 'file_extension' in kwargs:
fext = kwargs.pop('file_extension')
fext = fext.replace('.', '')
else:
fext = 'png'
if 'key' in kwargs:
key = kwargs.pop('key')
else:
key = None
if self.verbose:
print('\nPlotting Packages')
axes = []
ifig = 0
if SelPackList is None:
for p in self.packagelist:
caxs = p.plot(initial_fig=ifig,
filename_base=fileb, file_extension=fext,
kper=kper, mflay=mflay, key=key)
# unroll nested lists of axes into a single list of axes
if isinstance(caxs, list):
for c in caxs:
axes.append(c)
else:
axes.append(caxs)
# update next active figure number
ifig = len(axes) + 1
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(' Plotting Package: ', p.name[0])
caxs = p.plot(initial_fig=ifig,
filename_base=fileb, file_extension=fext,
kper=kper, mflay=mflay, key=key)
# unroll nested lists of axes into a single list of axes
if isinstance(caxs, list):
for c in caxs:
axes.append(c)
else:
axes.append(caxs)
# update next active figure number
ifig = len(axes) + 1
break
if self.verbose:
print(' ')
return axes
class Package(object):
"""
Base package class from which most other packages are derived.
"""
def __init__(self, parent, extension='glo', name='GLOBAL', unit_number=1, extra='',
allowDuplicates=False):
"""
Package init
"""
self.parent = parent # To be able to access the parent modflow object's attributes
if (not isinstance(extension, list)):
extension = [extension]
self.extension = []
self.file_name = []
for e in extension:
self.extension.append(e)
file_name = self.parent.name + '.' + e
self.file_name.append(file_name)
self.fn_path = os.path.join(self.parent.model_ws, self.file_name[0])
if (not isinstance(name, list)):
name = [name]
self.name = name
if (not isinstance(unit_number, list)):
unit_number = [unit_number]
self.unit_number = unit_number
if (not isinstance(extra, list)):
self.extra = len(self.unit_number) * [extra]
else:
self.extra = extra
self.url = 'index.html'
self.allowDuplicates = allowDuplicates
self.acceptable_dtypes = [int, np.float32, str]
return
def __repr__(self):
s = self.__doc__
exclude_attributes = ['extension', 'heading', 'name', 'parent', 'url']
for attr, value in sorted(self.__dict__.items()):
if not (attr in exclude_attributes):
if (isinstance(value, list)):
if (len(value) == 1):
# s = s + ' %s = %s (list)\n' % (attr, str(value[0]))
s = s + ' {0:s} = {1:s}\n'.format(attr, str(value[0]))
else:
# s = s + ' %s (list, items = %d)\n' % (attr, len(value))
s = s + ' {0:s} (list, items = {1:d}\n'.format(attr, len(value))
elif (isinstance(value, np.ndarray)):
# s = s + ' %s (array, shape = %s)\n' % (attr, value.shape.__str__()[1:-1] )
s = s + ' {0:s} (array, shape = {1:s}\n'.fomrat(attr, value.shape__str__()[1:-1])
else:
# s = s + ' %s = %s (%s)\n' % (attr, str(value), str(type(value))[7:-2])
s = s + ' {0:s} = {1:s} ({2:s}\n'.format(attr, str(value), str(type(value))[7:-2])
return s
def __getitem__(self, item):
if not isinstance(item, list) and not isinstance(item, tuple):
assert item in list(self.stress_period_data.data.keys()), "package.__getitem__() kper " + str(
item) + " not in data.keys()"
return self.stress_period_data[item]
if item[1] not in self.dtype.names:
raise Exception("package.__getitem(): item \'" + item + "\' not in dtype names " + str(self.dtype.names))
assert item[0] in list(self.stress_period_data.data.keys()), "package.__getitem__() kper " + str(
item[0]) + " not in data.keys()"
if self.stress_period_data.vtype[item[0]] == np.recarray:
return self.stress_period_data[item[0]][item[1]]
def __setitem__(self, key, value):
raise NotImplementedError("package.__setitem__() not implemented")
def __setattr__(self, key, value):
var_dict = vars(self)
if key in list(var_dict.keys()):
old_value = var_dict[key]
if isinstance(old_value, utils.util_2d):
value = utils.util_2d(self.parent, old_value.shape,
old_value.dtype, value,
name=old_value.name,
fmtin=old_value.fmtin,
locat=old_value.locat)
elif isinstance(old_value, utils.util_3d):
value = utils.util_3d(self.parent, old_value.shape,
old_value.dtype, value,
name=old_value.name_base,
fmtin=old_value.fmtin,
locat=old_value.locat)
elif isinstance(old_value, utils.transient_2d):
value = utils.transient_2d(self.parent, old_value.shape,
old_value.dtype, value,
name=old_value.name_base,
fmtin=old_value.fmtin,
locat=old_value.locat)
elif isinstance(old_value, utils.mflist):
value = utils.mflist(self.parent, old_value.dtype, data=value)
elif isinstance(old_value, list):
if isinstance(old_value[0], utils.util_3d):
new_list = []
for vo, v in zip(old_value, value):
new_list.append(utils.util_3d(self.parent, vo.shape,
vo.dtype, v,
name=vo.name_base,
fmtin=vo.fmtin,
locat=vo.locat))
value = new_list
elif isinstance(old_value[0], utils.util_2d):
new_list = []
for vo, v in zip(old_value, value):
new_list.append(utils.util_2d(self.parent, vo.shape,
vo.dtype, v,
name=vo.name,
fmtin=vo.fmtin,
locat=vo.locat))
value = new_list
super(Package, self).__setattr__(key, value)
@staticmethod
def add_to_dtype(dtype, field_names, field_types):
if not isinstance(field_names, list):
field_names = [field_names]
if not isinstance(field_types, list):
field_types = [field_types] * len(field_names)
newdtypes = [dtype]
for field_name, field_type in zip(field_names, field_types):
tempdtype = np.dtype([(field_name, field_type)])
newdtypes.append(tempdtype)
newdtype = sum((dtype.descr for dtype in newdtypes), [])
newdtype = np.dtype(newdtype)
return newdtype
def plot(self, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (mflist)
package input data
Parameters
----------
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return. (default is zero)
key : str
mflist dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.dis.plot()
"""
# valid keyword arguments
if 'kper' in kwargs:
kper = kwargs.pop('kper')
else:
kper = 0
if 'filename_base' in kwargs:
fileb = kwargs.pop('filename_base')
else:
fileb = None
if 'mflay' in kwargs:
mflay = kwargs.pop('mflay')
else:
mflay = None
if 'file_extension' in kwargs:
fext = kwargs.pop('file_extension')
fext = fext.replace('.', '')
else:
fext = 'png'
if 'key' in kwargs:
key = kwargs.pop('key')
else:
key = None
if 'initial_fig' in kwargs:
ifig = int(kwargs.pop('initial_fig'))
else:
ifig = 0
inc = self.parent.nlay
if mflay is not None:
inc = 1
axes = []
for item, value in self.__dict__.items():
caxs = []
if isinstance(value, utils.mflist):
if self.parent.verbose:
print('plotting {} package mflist instance: {}'.format(self.name[0], item))
if key is None:
names = ['{} location stress period {} layer {}'.format(self.name[0], kper+1, k+1)
for k in range(self.parent.nlay)]
else:
names = ['{} {} data stress period {} layer {}'.format(self.name[0], key, kper+1, k+1)
for k in range(self.parent.nlay)]
fignum = list(range(ifig, ifig+inc))
ifig = fignum[-1] + 1
caxs.append(value.plot(key, names, kper,
filename_base=fileb, file_extension=fext, mflay=mflay,
fignum=fignum, colorbar=True))
elif isinstance(value, utils.util_3d):
if self.parent.verbose:
print('plotting {} package util_3d instance: {}'.format(self.name[0], item))
fignum = list(range(ifig, ifig+inc))
ifig = fignum[-1] + 1
caxs.append(value.plot(filename_base=fileb, file_extension=fext, mflay=mflay,
fignum=fignum, colorbar=True))
elif isinstance(value, utils.util_2d):
if len(value.shape) == 2:
if self.parent.verbose:
print('plotting {} package util_2d instance: {}'.format(self.name[0], item))
fignum = list(range(ifig, ifig+1))
ifig = fignum[-1] + 1
caxs.append(value.plot(filename_base=fileb, file_extension=fext,
fignum=fignum, colorbar=True))
elif isinstance(value, utils.transient_2d):
if self.parent.verbose:
print('plotting {} package transient_2d instance: {}'.format(self.name[0], item))
fignum = list(range(ifig, ifig+inc))
ifig = fignum[-1] + 1
caxs.append(value.plot(filename_base=fileb, file_extension=fext, kper=kper,
fignum=fignum, colorbar=True))
elif isinstance(value, list):
for v in value:
if isinstance(v, utils.util_3d):
if self.parent.verbose:
print('plotting {} package util_3d instance: {}'.format(self.name[0], item))
fignum = list(range(ifig, ifig+inc))
ifig = fignum[-1] + 1
caxs.append(v.plot(filename_base=fileb, file_extension=fext, mflay=mflay,
fignum=fignum, colorbar=True))
else:
pass
# unroll nested lists os axes into a single list of axes
if isinstance(caxs, list):
for c in caxs:
if isinstance(c, list):
for cc in c:
axes.append(cc)
else:
axes.append(c)
else:
axes.append(caxs)
return axes
def to_shapefile(self, filename, **kwargs):
"""
Export 2-D, 3-D, and transient 2-D model data to shapefile (polygons). Adds an
attribute for each layer in each data array
Parameters
----------
filename : str
Shapefile name to write
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.lpf.to_shapefile('test_hk.shp')
"""
s = 'to_shapefile() method not implemented for {} Package'.format(self.name)
raise Exception(s)
# try:
# if isinstance(self.stress_period_data, utils.mflist):
# self.stress_period_data.to_shapefile(*args, **kwargs)
# except:
# pass
def webdoc(self):
if self.parent.version == 'mf2k':
wb.open('http://water.usgs.gov/nrp/gwsoftware/modflow2000/Guide/' + self.url)
elif self.parent.version == 'mf2005':
wb.open('http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/' + self.url)
elif self.parent.version == 'ModflowNwt':
wb.open('http://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/' + self.url)
def write_file(self):
"""
Every Package needs its own write_file function
"""
print('IMPLEMENTATION ERROR: write_file must be overloaded')
return
@staticmethod
def load(model, pack_type, f, nper=None, pop_key_list=None):
"""
The load method has not been implemented for this package.
"""
bc_pack_types = []
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# check for parameters
nppak = 0
if "parameter" in line.lower():
t = line.strip().split()
#assert int(t[1]) == 0,"Parameters are not supported"
nppak = np.int(t[1])
mxl = 0
if nppak > 0:
mxl = np.int(t[2])
if model.verbose:
print(' Parameters detected. Number of parameters = ', nppak)
line = f.readline()
#dataset 2a
t = line.strip().split()
ipakcb = 0
try:
if int(t[1]) != 0:
ipakcb = 53
pop_key_list = model.pop_key_list(int(t[1]), pop_key_list)
except:
pass
options = []
aux_names = []
if len(t) > 2:
it = 2
while it < len(t):
toption = t[it]
if toption.lower() is 'noprint':
options.append(toption)
elif 'aux' in toption.lower():
options.append(' '.join(t[it:it + 2]))
aux_names.append(t[it + 1].lower())
it += 1
it += 1
# set partype
# and read phiramp for modflow-nwt well package
partype = ['cond']
if 'flopy.modflow.mfwel.modflowwel'.lower() in str(pack_type).lower():
partype = ['flux']
specify = False
ipos = f.tell()
line = f.readline()
# test for specify keyword if a NWT well file - This is a temporary hack
if 'specify' in line.lower():
specify = True
t = line.strip().split()
phiramp = np.float32(t[1])
try:
phiramp_unit = np.int32(t[2])
except:
phiramp_unit = 2
options.append('specify {} {} '.format(phiramp, phiramp_unit))
else:
f.seek(ipos)
elif 'flopy.modflow.mfchd.modflowchd'.lower() in str(pack_type).lower():
partype = ['shead', 'ehead']
# read parameter data
if nppak > 0:
dt = pack_type.get_empty(1, aux_names=aux_names).dtype
pak_parms = mfparbc.load(f, nppak, dt, model.verbose)
#pak_parms = mfparbc.load(f, nppak, len(dt.names))
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
#read data for every stress period
bnd_output = None
stress_period_data = {}
for iper in range(nper):
if model.verbose:
print(" loading " + str(pack_type) + " for kper {0:5d}".format(iper + 1))
line = f.readline()
if line == '':
break
t = line.strip().split()
itmp = int(t[0])
itmpp = 0
try:
itmpp = int(t[1])
except:
pass
if itmp == 0:
bnd_output = None
current = pack_type.get_empty(itmp, aux_names=aux_names)
elif itmp > 0:
current = pack_type.get_empty(itmp, aux_names=aux_names)
for ibnd in range(itmp):
line = f.readline()
if "open/close" in line.lower():
#raise NotImplementedError("load() method does not support \'open/close\'")
oc_filename = os.path.join(model.model_ws, line.strip().split()[1])
assert os.path.exists(oc_filename), "Package.load() error: open/close filename " + \
oc_filename + " not found"
try:
current = np.genfromtxt(oc_filename, dtype=current.dtype)
current = current.view(np.recarray)
except Exception as e:
raise Exception("Package.load() error loading open/close file " + oc_filename + \
" :" + str(e))
assert current.shape[0] == itmp, "Package.load() error: open/close rec array from file " + \
oc_filename + " shape (" + str(current.shape) + \
") does not match itmp: {0:d}".format(itmp)
break
try:
t = line.strip().split()
current[ibnd] = tuple(t[:len(current.dtype.names)])
except:
t = []
for ivar in range(len(current.dtype.names)):
istart = ivar * 10
istop = istart + 10
t.append(line[istart:istop])
current[ibnd] = tuple(t[:len(current.dtype.names)])
# convert indices to zero-based
current['k'] -= 1
current['i'] -= 1
current['j'] -= 1
bnd_output = np.recarray.copy(current)
else:
bnd_output = np.recarray.copy(current)
for iparm in range(itmpp):
line = f.readline()
t = line.strip().split()
pname = t[0].lower()
iname = 'static'
try:
tn = t[1]
c = tn.lower()
instance_dict = pak_parms.bc_parms[pname][1]
if c in instance_dict:
iname = c
else:
iname = 'static'
except:
pass
par_dict, current_dict = pak_parms.get(pname)
data_dict = current_dict[iname]
par_current = pack_type.get_empty(par_dict['nlst'], aux_names=aux_names)
# get appropriate parval
if model.mfpar.pval is None:
parval = np.float(par_dict['parval'])
else:
try:
parval = np.float(model.mfpar.pval.pval_dict[pname])
except:
parval = np.float(par_dict['parval'])
# fill current parameter data (par_current)
for ibnd, t in enumerate(data_dict):
par_current[ibnd] = tuple(t[:len(par_current.dtype.names)])
par_current['k'] -= 1
par_current['i'] -= 1
par_current['j'] -= 1
for ptype in partype:
par_current[ptype] *= parval
if bnd_output is None:
bnd_output = np.recarray.copy(par_current)
else:
bnd_output = stack_arrays((bnd_output, par_current),
asrecarray=True, usemask=False)
if bnd_output is None:
stress_period_data[iper] = itmp
else:
stress_period_data[iper] = bnd_output
pak = pack_type(model, ipakcb=ipakcb,
stress_period_data=stress_period_data, \
dtype=pack_type.get_empty(0, aux_names=aux_names).dtype, \
options=options)
return pak
| gpl-2.0 |
west-tandon/selectivesearch-ml | test/impact_test.py | 1 | 2519 | import os
import shutil
import tempfile
from unittest.mock import MagicMock
import pandas as pd
import ossml.impacts as impacts
from ossml.utils import BucketFeature
from ossml.utils import Dataset
from test.utils_test import UtilsTest
class ImpactsTest(UtilsTest):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def feature_path(self):
return os.path.join(self.test_dir, "path")
def test_train_payoff(self):
# given
dataset = Dataset([self.qf1(), self.qf2()],
[self.sf1(), self.sf2()],
[BucketFeature("payoff", '', 2, 2)], 2)
df = pd.DataFrame({
'QID': [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2],
'qf1': [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3],
'qf2': [10, 10, 10, 10, 20, 20, 20, 20, 30, 30, 30, 30],
'SID': [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
'sf1': [1, 1, 11, 11, 2, 2, 22, 22, 3, 3, 33, 33],
'sf2': [10, 10, 110, 110, 20, 20, 220, 220, 30, 30, 330, 330],
'BID': [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
'payoff': [1, 4, 11, 44, 2, 5, 22, 55, 3, 6, 33, 66]
})[['QID', 'qf1', 'qf2', 'SID', 'sf1', 'sf2', 'BID', 'payoff']]
dataset.load = MagicMock(return_value=df)
model, err = impacts.train_payoffs(dataset)
dataset.load = MagicMock(return_value=df[['QID', 'qf1', 'qf2', 'SID', 'sf1', 'sf2', 'BID']])
impacts.predict_payoffs(dataset, model)
def test_run_train_and_run_predict(self):
j = {
"basename": os.path.join(self.test_dir, "basename"),
"shards": 2,
"buckets": 2,
"impact_features": {
"base": self.feature_path(),
"query": [f.name for f in [self.qf1(), self.qf2()]],
"shard": [f.name for f in [self.sf1(), self.sf2()]],
"bucket": [self.bf1().name]
}
}
model_path = os.path.join(self.test_dir, "model")
impacts.run_train(j, model_path)
j['impact_features']['bucket'] = []
impacts.run_predict(j, model_path)
for shard in range(2):
for bucket in range(2):
with open("{0}#{1}#{2}.payoff".format(j['basename'], shard, bucket)) as f:
lines = f.readlines()
self.assertEqual(len(lines), 3)
for v in lines:
float(v)
| mit |
arahuja/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
fabianvaccaro/pygums | pythonLibs/mahotas-1.1.0/build/lib.linux-armv6l-2.7/mahotas/io/matplotlibwrap.py | 2 | 1706 | # vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Copyright (C) 2013 Luis Pedro Coelho
#
# License: MIT (see COPYING file)
import numpy as np
# Importing matplotlib checks that it is importable without triggering any
# initialization (unlike importing pyplot)
import matplotlib
def imread(filename, as_grey=False):
"""
img = imread(filename, as_grey=False)
Reads an image from file `filename`
Parameters
----------
filename : file name
as_grey : Whether to convert to grey scale image (default: no)
Returns
-------
img : ndarray
"""
from matplotlib import pyplot as plt
img = plt.imread(filename)
if as_grey and len(img.shape) == 3:
# these are the values that wikipedia says are typical
transform = np.array([0.30, 0.59, 0.11])
return np.dot(img, transform)
return img
def imsave(filename, array):
'''
imsave(filename, array)
Writes `array` into file `filename`
Parameters
----------
filename : str
path on file system
array : ndarray-like
'''
from matplotlib import pyplot as plt
import numpy as np
if len(array.shape) == 2:
import warnings
warnings.warn('mahotas.imsave: The `matplotlib` backend does not support saving greyscale images natively.\n'
'Emulating by saving in RGB format (with all channels set to same value).\n'
'If this is a problem, please use another IO backend\n'
'\n'
'See http://mahotas.readthedocs.org/en/latest/io.html \n'
)
array = np.dstack([array, array, array])
plt.imsave(filename, array)
| gpl-2.0 |
karolciba/playground | amsbook/chapter1/ex16.py | 1 | 1628 | #!/usr/bin/env python
import random
import sys
number = 100
if len(sys.argv) > 1:
number = int(sys.argv[1])
def boygen():
genders = ['B', 'G']
g = random.choice(genders)
yield g
while g != 'B':
g = random.choice(genders)
yield g
raise StopIteration
def pairgen():
genders = ['B', 'G']
boy = False
girl = False
while not boy or not girl:
g = random.choice(genders)
if g == 'B':
boy = True
if g == 'G':
girl = True
yield g
raise StopIteration
def orderedpairgen():
genders = ['B', 'G']
boy = False
girl = False
while not boy or not girl:
g = random.choice(genders)
if g == 'B':
boy = True
if boy == True and g == 'G':
girl = True
yield g
raise StopIteration
def anypairgen():
genders = ['B', 'G']
boys = 0
girls = 0
while boys < 2 and girls < 2:
g = random.choice(genders)
if g == 'B':
boys += 1
if g == 'G':
girls += 1
yield g
raise StopIteration
def twoboysgen():
genders = ['B', 'G']
boys = 0
while boys < 2:
g = random.choice(genders)
if g == 'B':
boys += 1
yield g
raise StopIteration
def stats(gen = boygen, number = 1000):
lengths = []
for x in xrange(number):
s = sum(1 for x in gen())
lengths.append(s)
import matplotlib.pyplot as plt
m = max(lengths)
plt.hist(lengths, bins = m)
plt.show()
return sum(lengths)/float(len(lengths)), max(lengths)
| unlicense |
chreman/dramavis | dramalyzer.py | 2 | 37039 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
import os
import csv
from itertools import chain, zip_longest
from collections import Counter
import logging
import numpy as np
from numpy import ma
import pandas as pd
import networkx as nx
from scipy import stats
from scipy.optimize import curve_fit
from sklearn import linear_model
from sklearn.metrics import r2_score
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from tqdm import tqdm
from linacorpus import LinaCorpus, Lina
from dramaplotter import plotGraph
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
__author__ = """Christopher Kittel <web at christopherkittel.eu>,
Frank Fischer <ffischer at hse.ru>"""
__copyright__ = "Copyright 2017"
__license__ = "MIT"
__version__ = "0.4 (beta)"
__maintainer__ = "Frank Fischer <ffischer at hse.ru>"
__status__ = "Development" # 'Development', 'Production' or 'Prototype'
class CorpusAnalyzer(LinaCorpus):
def __init__(self, inputfolder, outputfolder, logpath, major_only=False,
randomization=1000):
super(CorpusAnalyzer, self).__init__(inputfolder, outputfolder)
self.logger = logging.getLogger("corpusAnalyzer")
formatter = logging.Formatter('%(asctime)-15s %(name)s [%(levelname)s]'
'%(message)s')
fh = logging.FileHandler(logpath)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.logpath = logpath
self.major_only = major_only
self.randomization = randomization
def analyze_dramas(self, action):
"""
Reads all XMLs in the inputfolder,
returns an iterator of lxml.etree-objects created
with lxml.etree.parse("dramafile.xml").
"""
for dramafile in tqdm(self.dramafiles, desc="Dramas", mininterval=1):
drama = DramaAnalyzer(dramafile, self.outputfolder, self.logpath,
action, self.major_only, self.randomization)
yield drama
def get_char_metrics(self):
self.logger.info("Exporting character metrics.")
dramas = self.analyze_dramas(action="char_metrics")
header = [
'ID', 'author', 'title', 'year',
'frequency', 'degree', 'betweenness', 'closeness'
]
dfs = []
quot_quot_dfs = []
for drama in dramas:
temp_df = pd.DataFrame(index=[drama.ID])
for m in header[1:3]:
temp_df[m] = drama.metadata.get(m)
temp_df['year'] = drama.metadata.get('date_definite')
for m in header[4:]:
temp_df[m] = drama.get_top_ranked_chars()[m]
temp_df['ID'] = drama.ID
dfs.append(temp_df)
quot_quot_dfs.append(drama.quartile_quot)
df = pd.concat(dfs)
df = df[header]
df.index = df['ID']
df.index.name = 'index'
df.to_csv(os.path.join(self.outputfolder,
"central_characters.csv"), sep=";")
self.logger.info("Exporting corpus quartile metrics.")
df = pd.concat(quot_quot_dfs, axis=1).T
df.index.name = "index"
df.to_csv(os.path.join(self.outputfolder,
"corpus_quartile_metrics.csv"),
sep=";")
def get_graph_metrics(self):
self.logger.info("Exporting corpus metrics.")
dramas = self.analyze_dramas(action="corpus_metrics")
df = pd.concat([d.graph_metrics for d in dramas])
header = [
'ID', 'author', 'title', 'subtitle', 'year', 'genretitle',
'filename',
'charcount', 'edgecount', 'maxdegree', 'avgdegree', 'diameter',
'clustering_coefficient', 'clustering_coefficient_random',
'avgpathlength', 'average_path_length_random', 'density',
'segment_count', 'count_type', 'all_in_index',
'change_rate_mean', 'change_rate_std',
'final_scene_size_index', 'characters_last_in',
'connected_components', 'spearman_rho_avg', 'spearman_rho_std',
'spearman_rho_content_vs_network',
'spearman_rho_content_vs_network_top',
'spearman_rho_content_vs_network_bottom',
'component_sizes'
]
df.index = df["ID"]
df.index.name = "index"
df[header].to_csv(os.path.join(self.outputfolder,
"corpus_metrics.csv"), sep=";")
def get_both_metrics(self):
self.logger.info("Exporting character metrics.")
dramas = self.analyze_dramas(action="both")
header = [
'ID', 'author', 'title', 'year',
'frequency', 'degree', 'betweenness', 'closeness'
]
char_dfs = []
graph_dfs = []
quot_quot_dfs = []
for drama in dramas:
temp_df = pd.DataFrame(index=[drama.ID])
for m in header[1:3]:
temp_df[m] = drama.metadata.get(m)
temp_df['year'] = drama.metadata.get('date_definite')
for m in header[4:]:
temp_df[m] = drama.get_top_ranked_chars()[m]
temp_df['ID'] = drama.ID
char_dfs.append(temp_df)
graph_dfs.append(drama.graph_metrics)
quot_quot_dfs.append(drama.quartile_quot)
df = pd.concat(char_dfs)
df = df[header]
df.index = df['ID']
df.index.name = 'index'
df.to_csv(os.path.join(self.outputfolder,
"central_characters.csv"), sep=";")
self.logger.info("Exporting corpus metrics.")
df = pd.concat(graph_dfs)
header = [
'ID', 'author', 'title', 'subtitle', 'year', 'genretitle',
'filename',
'charcount', 'edgecount', 'maxdegree', 'avgdegree', 'diameter',
'clustering_coefficient', 'clustering_coefficient_random',
'avgpathlength', 'average_path_length_random', 'density',
'segment_count', 'count_type', 'all_in_index',
'change_rate_mean', 'change_rate_std',
'final_scene_size_index', 'characters_last_in',
'connected_components', 'spearman_rho_avg', 'spearman_rho_std',
'spearman_rho_content_vs_network',
'spearman_rho_content_vs_network_top',
'spearman_rho_content_vs_network_bottom',
'component_sizes'
]
df.index = df["ID"]
df.index.name = "index"
df.to_csv(os.path.join(self.outputfolder, "corpus_metrics.csv"),
sep=";")
self.logger.info("Exporting corpus quartile metrics.")
df_cq = pd.concat(quot_quot_dfs, axis=1).T
df_cq.index = df["ID"]
df_cq.index.name = "index"
df_cq.to_csv(os.path.join(self.outputfolder,
"corpus_quartile_metrics.csv"), sep=";")
class DramaAnalyzer(Lina):
def __init__(self, dramafile, outputfolder, logpath,
action, major_only, randomization=1000):
super(DramaAnalyzer, self).__init__(dramafile, outputfolder)
self.logger = logging.getLogger("dramaAnalyzer")
formatter = logging.Formatter('%(asctime)-15s %(name)s [%(levelname)s]'
'%(message)s')
fh = logging.FileHandler(logpath)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.major_only = major_only
self.n_personae = len(self.personae)
self.centralities = pd.DataFrame(index=[p for p in self.personae])
self.centralities.index.name = "name"
self.randomization = randomization
self.metrics = pd.DataFrame()
self.G = self.create_graph()
self.action = action
if action == "char_metrics":
self.analyze_characters()
self.get_character_frequencies()
self.get_character_speech_amounts()
self.get_character_ranks()
self.get_centrality_ranks()
self.get_rank_stability_measures()
self.add_rank_stability_metrics()
self.get_structural_ranking_measures()
self.get_quartiles()
self.export_char_metrics()
if action == "corpus_metrics":
self.graph_metrics = self.get_graph_metrics()
self.export_graph_metrics()
if action == "both":
self.graph_metrics = self.get_graph_metrics()
self.analyze_characters()
self.get_character_frequencies()
self.get_character_speech_amounts()
self.get_character_ranks()
self.get_centrality_ranks()
self.get_rank_stability_measures()
self.add_rank_stability_metrics()
self.get_structural_ranking_measures()
self.get_quartiles()
self.get_regression_metrics()
self.export_char_metrics()
self.export_graph_metrics()
def add_rank_stability_metrics(self):
self.graph_metrics["spearman_rho_avg"] = (self.rank_stability
.stack()
.mean())
self.graph_metrics["spearman_rho_std"] = (self.rank_stability
.stack()
.std())
(self.graph_metrics["top_rank_char_count"],
self.graph_metrics["top_rank_char_avg"],
self.graph_metrics["top_rank_char_std"]) = (
self.get_top_ranked_char_count())
def get_final_scene_size(self):
last_scene_size = len(self.segments[-1])
return last_scene_size / self.n_personae
def get_drama_change_rate_metrics(self):
change_rates = self.get_drama_change_rate()
cr_mean = np.mean(change_rates)
cr_std = np.std(change_rates)
return cr_mean, cr_std
def get_drama_change_rate(self):
change_rates = []
for x, y in zip_longest(self.segments[:-1], self.segments[1:]):
s = set(x)
t = set(y)
u = s.intersection(t)
cr = abs(len(s)-len(u)) + abs(len(u)-len(t))
cr_sum = len(s.union(t))
change_rates.append(cr/cr_sum)
return change_rates
def get_central_character_entry(self):
central_character = self.get_central_character()
for i, segment in enumerate(self.segments):
if central_character in segment:
i += 1
central_character_entry_index = float(i/len(self.segments))
return central_character_entry_index
def get_central_character(self):
# either that or hardcode
ranks = [c for c in self.centralities.columns if c.endswith("rank")]
# sum up all rank values per character, divide by nr. of rank metrics
avg_ranks = self.centralities[ranks].sum(axis=1)/len(ranks)
min_rank = min(avg_ranks)
central_chars = avg_ranks[avg_ranks == min_rank].index.tolist()
if len(central_chars) == 1:
return central_chars[0]
else:
return "SEVERAL"
def get_character_frequencies(self):
self.centralities['frequency'] = 0
frequencies = Counter(list(chain.from_iterable(self.segments)))
for char, freq in frequencies.items():
self.centralities.loc[char, 'frequency'] = freq
def get_character_speech_amounts(self):
for amount in ["speech_acts", "words", "lines", "chars"]:
self.centralities[amount] = 0
for name, person in self.personae.items():
self.centralities.loc[person.name, amount] = (person.amounts
.get(amount))
def get_top_ranked_chars(self):
top_ranked = {}
# check whether metric should be sorted asc(min) or desc(max)
for metric in ['degree', 'closeness', 'betweenness', 'frequency']:
cent_max = self.centralities[metric].max()
top_char = self.centralities[self.centralities[metric]
== cent_max].index.tolist()
if len(top_char) != 1:
top_ranked[metric] = "SEVERAL"
else:
top_ranked[metric] = top_char[0]
# top_ranked['central'] = self.get_central_character()
return top_ranked
def get_top_ranked_char_count(self):
avg_min = self.centralities['centrality_rank_avg'].min()
top_chars = self.centralities[self.centralities['centrality_rank_avg']
== avg_min].index.tolist()
top_std = self.centralities[self.centralities['centrality_rank_avg']
== avg_min]['centrality_rank_std']
return len(top_chars), avg_min, top_std
def get_character_ranks(self):
for metric in ['degree', 'closeness', 'betweenness',
'strength', 'eigenvector_centrality',
'frequency', 'speech_acts', 'words']:
# ascending: False for ranks by high (1) to low (N)
# check ascending value for each metric
self.centralities[metric+"_rank"] = (self.centralities[metric]
.rank(method='min',
ascending=False))
def get_quartiles(self):
metrics = ['degree', 'closeness', 'betweenness',
'strength', 'eigenvector_centrality',
'frequency', 'speech_acts', 'words']
index = ["q4", "q3", "q2", "q1"]
df = pd.DataFrame(columns=metrics, index=index)
for metric in metrics:
df[metric] = ((pd.cut(self.centralities[metric], 4)
.value_counts()
.sort_index(ascending=False) /
len(self.centralities))
.tolist())
self.quartile_quot = df.loc["q4"]/df.loc["q1"]
self.quartile_quot.name = self.ID
self.quartile_quot = self.quartile_quot.append(df.T.stack())
def get_centrality_ranks(self):
ranks = [c for c in self.centralities.columns if c.endswith("rank")]
self.centralities['centrality_rank_avg'] = (self.centralities[ranks]
.sum(axis=1) /
len(ranks))
self.centralities['centrality_rank_std'] = (self.centralities[ranks]
.std(axis=1) /
len(ranks))
for metric in ['centrality_rank_avg', 'centrality_rank_std']:
self.centralities[metric+"_rank"] = (self.centralities[metric]
.rank(method='min',
ascending=True))
def get_rank_stability_measures(self):
ranks = [c
for c in self.centralities.columns
if c.endswith("rank")][:8]
self.rank_stability = (self.centralities[ranks]
.corr(method='spearman'))
np.fill_diagonal(self.rank_stability.values, np.nan)
self.rank_stability.index.name = "rank_name"
def get_structural_ranking_measures(self):
graph_ranks = ['degree_rank', 'closeness_rank', 'betweenness_rank',
'strength_rank', 'eigenvector_centrality_rank']
content_ranks = ['frequency_rank', 'speech_acts_rank', 'words_rank']
avg_graph_rank = (self.centralities[graph_ranks]
.mean(axis=1)
.rank(method='min'))
avg_content_rank = (self.centralities[content_ranks]
.mean(axis=1)
.rank(method='min'))
self.centralities["avg_graph_rank"] = avg_graph_rank
self.centralities["avg_content_rank"] = avg_content_rank
self.centralities["overall_avg"] = (self.centralities[
["avg_graph_rank",
"avg_content_rank"]]
.mean(axis=1))
self.centralities["overall_avg_rank"] = (self.centralities[
"overall_avg"]
.rank(method='min'))
struct_corr = stats.spearmanr(avg_content_rank, avg_graph_rank)[0]
self.graph_metrics["spearman_rho_content_vs_network"] = struct_corr
top, bottom = np.split(self.centralities,
[int(.5*len(self.centralities))])
struct_corr_top = stats.spearmanr(top["avg_content_rank"],
top["avg_graph_rank"])[0]
self.graph_metrics["spearman_rho_content_vs_network_top"] = (
struct_corr_top)
struct_corr_bottom = stats.spearmanr(bottom["avg_content_rank"],
bottom["avg_graph_rank"])[0]
self.graph_metrics["spearman_rho_content_vs_network_bottom"] = (
struct_corr_bottom)
def get_characters_all_in_index(self):
appeared = set()
for i, speakers in enumerate(self.segments):
for sp in speakers:
appeared.add(sp)
if len(appeared) >= self.num_chars_total:
i += 1
all_in_index = float(i/len(self.segments))
return all_in_index
def export_char_metrics(self):
self.centralities.index.name = "name"
self.centralities.to_csv(
os.path.join(
self.outputfolder,
"%s_%s_chars.csv" % (self.ID, self.title)
))
self.rank_stability.to_csv(
os.path.join(
self.outputfolder,
"%s_%s_spearmanrho.csv" % (self.ID, self.title)
))
def export_graph_metrics(self):
self.graph_metrics.index.name = "ID"
self.graph_metrics.index = self.graph_metrics["ID"]
self.graph_metrics.to_csv(os.path.join(
self.outputfolder,
"%s_%s_graph.csv" % (self.ID, self.title)),
sep=";")
self.export_table(
self.get_drama_change_rate(),
"_".join([self.filepath, self.title, "change_rates"])+".csv")
nx.write_edgelist(
self.G,
os.path.join(self.outputfolder,
"_".join([str(self.ID),
self.title, "edgelist"])+".csv"),
delimiter=";",
data=["weight"])
plotGraph(
self.G,
filename=os.path.join(self.outputfolder,
"_".join([str(self.ID),
self.title])+".svg"))
def export_table(self, t, filepath):
with open(filepath, 'w') as f: # Just use 'w' mode in 3.x
csvwriter = csv.writer(f, delimiter=';')
csvwriter.writerow(["segment", "change_rate"])
for i, t in enumerate(t):
csvwriter.writerow([i+1, t])
def get_graph_metrics(self):
graph_metrics = self.analyze_graph()
graph_metrics["ID"] = self.ID
(graph_metrics["average_path_length_random"],
graph_metrics["clustering_coefficient_random"]) = (
self.randomize_graph(graph_metrics.get("charcount"),
graph_metrics.get("edgecount")))
graph_metrics["year"] = self.metadata.get("date_definite")
graph_metrics["author"] = self.metadata.get("author")
graph_metrics["title"] = self.title
graph_metrics["filename"] = self.metadata.get("filename")
graph_metrics["genretitle"] = self.metadata.get("genretitle")
graph_metrics["subtitle"] = self.metadata.get("subtitle")
graph_metrics["segment_count"] = self.metadata.get("segment_count")
graph_metrics["count_type"] = self.metadata.get("count_type")
graph_metrics["all_in_index"] = self.get_characters_all_in_index()
(graph_metrics["change_rate_mean"],
graph_metrics["change_rate_std"]) = (
self.get_drama_change_rate_metrics())
graph_metrics["final_scene_size_index"] = self.get_final_scene_size()
graph_metrics["characters_last_in"] = self.get_characters_last_in()
return pd.DataFrame.from_dict(graph_metrics, orient='index').T
def get_characters_last_in(self):
last_chars = self.segments[-1]
return ",".join(last_chars)
def create_graph(self):
"""
First creates a bipartite graph with scenes on the one hand,
and speakers in one scene on the other.
The graph is then projected into a unipartite graph of speakers,
which are linked if they appear in one scene together.
Returns a networkx weighted projected graph.
"""
speakerset = self.segments
B = nx.Graph()
labels = {}
for i, speakers in enumerate(speakerset):
# speakers are Character objects
source = str(i)
targets = speakers
# if args.debug:
# print("SOURCE, TARGET:", source, targets)
if source not in B.nodes():
B.add_node(source, bipartite=0)
labels[source] = source
for target in targets:
if target not in B.nodes():
B.add_node(target, bipartite=1)
B.add_edge(source, target)
scene_nodes = set(n
for n, d in B.nodes(data=True)
if d['bipartite'] == 0)
person_nodes = set(B) - scene_nodes
nx.is_bipartite(B)
G = nx.bipartite.weighted_projected_graph(B, person_nodes)
if self.major_only:
G = max(nx.connected_component_subgraphs(G), key=len)
return G
def analyze_graph(self):
"""
Computes various network metrics for a graph G,
returns a dictionary:
values =
{
"charcount" = len(G.nodes()),
"edgecount" = len(G.edges()),
"maxdegree" = max(G.degree().values()) or "NaN"
if ValueError: max() arg is an empty sequence,
"avgdegree" = sum(G.degree().values())/len(G.nodes()) or "NaN"
if ZeroDivisionError: division by zero,
"density" = nx.density(G) or "NaN",
"avgpathlength" = nx.average_shortest_path_length(G) or "NaN"
if NetworkXError: Graph is not connected,
then it tries to get the average shortest path
length from the giant component,
"avgpathlength" = nx.average_shortest_path_length(
max(nx.connected_component_subgraphs(G),
key=len))
except NetworkXPointlessConcept:
('Connectivity is undefined for the null graph.'),
"clustering_coefficient" = nx.average_clustering(G) or "NaN"
if ZeroDivisionError: float division by zero
}
"""
G = self.G
values = {}
values["charcount"] = len(G.nodes())
values["edgecount"] = len(G.edges())
try:
values["maxdegree"] = max(G.degree().values())
except:
self.logger.error(
"ID %s ValueError: max() arg is an empty sequence" % self.ID)
values["maxdegree"] = "NaN"
try:
values["avgdegree"] = sum(G.degree().values())/len(G.nodes())
except:
self.logger.error(
"ID %s ZeroDivisionError: division by zero" % self.ID)
values["avgdegree"] = "NaN"
try:
values["density"] = nx.density(G)
except:
values["density"] = "NaN"
try:
values["avgpathlength"] = nx.average_shortest_path_length(G)
except nx.NetworkXError:
self.logger.error(
"ID %s NetworkXError: Graph is not connected." % self.ID)
try:
self.randomization = 50
values["avgpathlength"] = nx.average_shortest_path_length(
max(nx.connected_component_subgraphs(G), key=len))
except:
values["avgpathlength"] = "NaN"
except:
self.logger.error("ID %s NetworkXPointlessConcept: ('Connectivity"
"is undefined for the null graph.')" % self.ID)
values["avgpathlength"] = "NaN"
try:
values["clustering_coefficient"] = nx.average_clustering(G)
except:
self.logger.error(
"ID %s ZeroDivisionError: float division by zero" % self.ID)
values["clustering_coefficient"] = "NaN"
values["connected_components"] = nx.number_connected_components(G)
components = nx.connected_component_subgraphs(G)
values["component_sizes"] = [len(c.nodes()) for c in components]
try:
values["diameter"] = nx.diameter(G)
except nx.NetworkXError:
self.logger.error(
"ID %s NetworkXError: Graph is not connected." % self.ID)
values["diameter"] = nx.diameter(
max(nx.connected_component_subgraphs(G), key=len))
return values
def analyze_characters(self):
"""
Computes per-character metrics of a graph G,
returns dictionary of dictionaries:
character_values =
{
"betweenness" = nx.betweenness_centrality(G),
"degree" = nx.degree(G),
"closeness" = nx.closeness_centrality(G)
}
"""
# initialize columns with 0
for metric in ['betweenness', 'degree',
'closeness', 'closeness_corrected',
'strength',
'eigenvector_centrality']:
self.centralities[metric] = 0
for char, metric in nx.betweenness_centrality(self.G).items():
self.centralities.loc[char, 'betweenness'] = metric
for char, metric in nx.degree(self.G).items():
self.centralities.loc[char, 'degree'] = metric
for char, metric in nx.degree(self.G, weight="weight").items():
self.centralities.loc[char, 'strength'] = metric
for char, metric in nx.closeness_centrality(self.G).items():
self.centralities.loc[char, 'closeness'] = metric
for g in nx.connected_component_subgraphs(self.G):
for char, metric in nx.closeness_centrality(g).items():
self.centralities.loc[char, 'closeness_corrected'] = metric
try:
for char, metric in nx.eigenvector_centrality(
self.G, max_iter=500).items():
self.centralities.loc[char, 'eigenvector_centrality'] = metric
except Exception as e:
self.logger.error(
"%s networkx.exception.NetworkXError:"
" eigenvector_centrality(): power iteration failed to converge"
" in 500 iterations." % self.ID)
self.centralities['avg_distance'] = 1/self.centralities['closeness']
self.centralities['avg_distance_corrected'] = (
1 / self.centralities['closeness_corrected'])
def transpose_dict(self, d):
"""
Transpose dict of character-network metrics to an exportable dict,
essentially transposes rows and columns of the character.csv.
"""
td = {}
try:
for cent, chars in d.items():
for char in chars:
td[char] = {}
except:
pass
try:
for cent, chars in d.items():
for char, value in chars.items():
td[char][cent] = value
except:
pass
return td
def randomize_graph(self, n, e):
"""
Creates 1000 random graphs with
networkx.gnm_random_graph(nodecount, edgecount),
and computes average_clustering_coefficient and
average_shortest_path_length, to compare with drama-graph.
Returns a tuple:
randavgpathl, randcluster = (float or "NaN", float or "NaN")
"""
randcluster = 0
randavgpathl = 0
# what is c, what is a, what is n, what is e?
# e=edges?, c=clustering_coefficient?, a=average_shortest_path_length?
c = 0
a = 0
if not self.randomization: # hack so that quartett poster works
self.randomization = 50
for i in tqdm(range(self.randomization), desc="Randomization",
mininterval=1):
R = nx.gnm_random_graph(n, e)
try:
randcluster += nx.average_clustering(R)
c += 1
except ZeroDivisionError:
pass
j = 0
while True:
j += 1
try:
R = nx.gnm_random_graph(n, e)
randavgpathl += nx.average_shortest_path_length(R)
a += 1
except:
pass
else:
break
if j > 50:
randavgpathl = "NaN"
break
try:
randcluster = randcluster / c
except:
randcluster = "NaN"
try:
randavgpathl = randavgpathl / a
except:
randavgpathl = "NaN"
return randavgpathl, randcluster
def get_regression_metrics(self):
metrics = ['degree', 'closeness', 'betweenness',
'strength', 'eigenvector_centrality',
'frequency', 'speech_acts', 'words']
metrics_dfs = []
for metric in metrics:
temp_df = pd.DataFrame(columns=[metric])
temp_df[metric+"_interval"] = [
i.mid for i in pd.cut(self.centralities[metric], 10)
.value_counts()
.index.tolist()]
temp_df[metric] = (pd.cut(self.centralities[metric], 10)
.value_counts()
.tolist())
temp_df.sort_values(metric+"_interval", inplace=True)
temp_df.reset_index(drop=True, inplace=True)
metrics_dfs.append(temp_df)
index = ["linear", "exponential", "powerlaw", "quadratic"]
reg_metrics = pd.DataFrame(columns=metrics, index=index)
# fit linear models
fig = plt.figure(figsize=(len(metrics)*4, len(index)*4))
gs = gridspec.GridSpec(len(index), len(metrics))
i = 0 # subplot enumerator
for metric, temp_df in zip(metrics, metrics_dfs):
X = np.array(temp_df[metric+"_interval"]).reshape(-1, 1)
y = np.array(temp_df[metric]).reshape(-1, 1)
model = linear_model.LinearRegression()
model.fit(X, y)
score = model.score(X, y)
reg_metrics.loc["linear", metric] = score
ax = plt.subplot(gs[i])
plt.scatter(X, y)
plt.plot(X, model.predict(X), 'r--',
label='coeff: %.3f, intercept: %.3f' % (model.coef_[0][0],
model.intercept_[0]))
# plt.legend(fontsize='x-small')
ax.set_title(metric + " linear R2 %.3f" % score, size='medium')
ax.set_xlabel(metric)
ax.set_ylabel("value counts")
i += 1
# fit quadratic models
for metric, temp_df in zip(metrics, metrics_dfs):
X = np.array(temp_df[metric+"_interval"]).reshape(-1, 1)
y = np.array(temp_df[metric]).reshape(-1, 1)
regr = linear_model.LinearRegression()
model = Pipeline(steps=[('polyfeatures', PolynomialFeatures(2)),
('reg', regr)])
model.fit(X, y)
score = model.score(X, y)
reg_metrics.loc["quadratic", metric] = score
ax = plt.subplot(gs[i])
plt.scatter(X, y)
plt.plot(X, model.predict(X), 'r--',
label='coeff: %s, intercept: %s' % (
str(model.named_steps['reg'].coef_),
str(model.named_steps['reg'].intercept_)))
# plt.legend(fontsize='x-small')
ax.set_title(metric + " quadratic R2 %.3f" % score, size='medium')
ax.set_xlabel(metric)
ax.set_ylabel("value counts")
i += 1
# fit exp models
for metric, temp_df in zip(metrics, metrics_dfs):
X = np.array(temp_df[metric+"_interval"]).reshape(-1, 1)
y = np.array(temp_df[metric]).reshape(-1, 1)
logy = ma.log(y).reshape(-1, 1)
model = linear_model.LinearRegression()
model.fit(X, logy)
score = model.score(X, logy)
reg_metrics.loc["exponential", metric] = score
ax = plt.subplot(gs[i])
plt.scatter(X, logy)
plt.plot(X, model.predict(X), 'r--')
# plt.legend(fontsize='x-small')
ax.set_title(metric + " exp. R2 %.3f" % score, size='medium')
ax.set_xlabel(metric)
ax.set_ylabel("value counts (log)")
i += 1
# fit power law models
for metric, temp_df in zip(metrics, metrics_dfs):
X = np.array(temp_df[metric+"_interval"])
y = np.array(temp_df[metric])
logx = ma.log(X).reshape(-1, 1)
logy = ma.log(y).reshape(-1, 1)
model = linear_model.LinearRegression()
model.fit(logx, logy)
score = model.score(logx, logy)
reg_metrics.loc["powerlaw", metric] = score
ax = plt.subplot(gs[i])
plt.scatter(logx, logy)
plt.plot(logx, model.predict(logx), 'r--',
label='coeff: %s, intercept: %s' % (str(model.coef_),
str(model.intercept_)))
# plt.legend(fontsize='x-small')
ax.set_title(metric + " power law R2 %.3f" % score, size='medium')
ax.set_xlabel(metric + " (log)")
ax.set_ylabel("value counts (log)")
i += 1
plt.tight_layout()
self.reg_metrics = reg_metrics.T
self.reg_metrics.index.name = "metrics"
self.reg_metrics["max_val"] = self.reg_metrics.apply(
lambda x: np.max(x), axis=1)
self.reg_metrics["max_type"] = self.reg_metrics.apply(
lambda x: np.argmax(x), axis=1)
for metric in metrics:
self.graph_metrics[metric+"_reg_type"] = (self.reg_metrics
.loc[metric,
'max_type'])
self.graph_metrics[metric+"_reg_val"] = (self.reg_metrics
.loc[metric,
'max_val'])
self.reg_metrics.to_csv(os.path.join(self.outputfolder,
"%s_%s_regression_table.csv"
% (self.ID, self.title)))
for temp_df in metrics_dfs:
temp_df.to_csv(os.path.join(os.path.join(self.outputfolder),
"%s_%s_regression_table.csv"
% (self.ID, self.title)),
mode='a', header=True)
fig.savefig(os.path.join(self.outputfolder,
'%s_%s_regression_plots.png'
% (self.ID, self.title)))
def exponential_func(t, a, b):
return a + t*np.log(t)
| mit |
jlegendary/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
jorisvandenbossche/numpy | doc/source/conf.py | 1 | 11293 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, os, re
# Minimum version, enforced by sphinx
needs_sphinx = '2.2.0'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = [
'sphinx.ext.autodoc',
'numpydoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.autosummary',
'sphinx.ext.graphviz',
'sphinx.ext.ifconfig',
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'sphinx.ext.imgmath',
]
imgmath_image_format = 'svg'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
master_doc = 'contents'
# General substitutions.
project = 'NumPy'
copyright = '2008-2019, The SciPy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
import numpy
# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = numpy.__version__
print("%s %s" % (version, release))
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
def setup(app):
# add a config value for `ifconfig` directives
app.add_config_value('python_version_major', str(sys.version_info.major), 'env')
app.add_lexer('NumPyC', NumPyLexer(stripnl=False))
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if not os.path.isdir(themedir):
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init && git submodule update")
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("https://scipy.org/", "Scipy.org"),
("https://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": [("https://numpy.org/", "NumPy.org"),
("https://numpy.org/doc", "Docs"),
]
}
html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
html_additional_pages = {
'index': 'indexcontent.html',
}
html_title = "%s v%s Manual" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_use_modindex = True
html_copy_source = False
html_domain_indices = False
html_file_suffix = '.html'
htmlhelp_basename = 'numpy'
if 'sphinx.ext.pngmath' in extensions:
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
plot_html_show_formats = False
plot_html_show_source_link = False
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the NumPy community'
latex_documents = [
('reference/index', 'numpy-ref.tex', 'NumPy Reference',
_stdauthor, 'manual'),
('user/index', 'numpy-user.tex', 'NumPy User Guide',
_stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
latex_elements = {
'fontenc': r'\usepackage[LGR,T1]{fontenc}'
}
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Texinfo output
# -----------------------------------------------------------------------------
texinfo_documents = [
("contents", 'numpy', 'NumPy Documentation', _stdauthor, 'NumPy',
"NumPy: array processing for numbers, strings, records, and objects.",
'Programming',
1),
]
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/dev', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org', None)
}
# -----------------------------------------------------------------------------
# NumPy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
import glob
autosummary_generate = True
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
import math
phi = (math.sqrt(5) + 1)/2
plot_rcparams = {
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print("NOTE: linkcode extension not found -- no links to source generated")
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# strip decorators, which would resolve to the source of the decorator
# possibly an upstream bug in getsourcefile, bpo-1764286
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(numpy.__file__))
if 'dev' in numpy.__version__:
return "https://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
fn, linespec)
else:
return "https://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
numpy.__version__, fn, linespec)
from pygments.lexers import CLexer
from pygments import token
import copy
class NumPyLexer(CLexer):
name = 'NUMPYLEXER'
tokens = copy.deepcopy(CLexer.tokens)
# Extend the regex for valid identifiers with @
for k, val in tokens.items():
for i, v in enumerate(val):
if isinstance(v, tuple):
if isinstance(v[0], str):
val[i] = (v[0].replace('a-zA-Z', 'a-zA-Z@'),) + v[1:]
| bsd-3-clause |
bmazin/SDR | Setup/DetectorAnalysis/histQ.py | 1 | 2303 | #!/usr/bin/python
import numpy as np
from matplotlib import pyplot as plt
#Plots Histogram of f, Q, and Distance of f to nearest neighbor, Q vs f, Dist to neigh vs f and saves it to a pdf. You need to change the File and pdftitle (and possibly the text position in line 79
File= '20121116/FL1-sci4a-DF-good-fits.txt'
pdftitle='/home/sean/data/fitshist/FL1-sci4a-DF-good.pdf'
autofit=np.loadtxt('/home/sean/data/%s'%File)
freqs=autofit[:,1]
Qs=autofit[:,2]
Qs=[x/1000 for x in Qs]
ds=[]
fs=[]
freq=sorted(freqs)
for i in xrange(1,len(freqs)-1):
x=abs(freq[i]-freq[i+1])
y=abs(freq[i]-freq[i-1])
if x>=y:
ds.append(y)
else:
ds.append(x)
fs.append(freq[i])
ds=[x*1000 for x in ds]
mf=np.median(freqs)
sf=np.std(freqs)
mq=np.median(Qs)
sq=np.std(Qs)
md=np.median(ds)
sd=np.std(ds)
nres=len(freqs)
fig = plt.figure(figsize=(6,8))
plt.subplots_adjust(left = 0.1, right= 0.96, bottom= .07, top= .96, wspace=0.3, hspace=0.4)
ax=fig.add_subplot(321)
ax.hist(freqs,bins=100, color='k')
ax.set_xlabel('Frequency (GHz)\nmedian=%f, std=%f'%(mf,sf), size=8)
ax.set_ylabel('Number', size=8)
ax.set_title('Histogram of Frequency', size=9)
ax.tick_params(labelsize=8)
ax2=fig.add_subplot(323)
ax2.hist(Qs, bins=100, color='k')
ax2.set_xlabel('Q(k)\nmedian=%f, std=%f'%(mq,sq), size=8)
ax2.set_ylabel('Number', size=8)
ax2.set_title('Histogram of Q', size=9)
ax2.set_xlim(0,300)
ax2.tick_params(labelsize=8)
ax3=fig.add_subplot(325)
ax3.hist(ds, bins=100, color='k')
ax3.set_xlabel('Distance to Nearest Neighbor (MHz)\nmedian=%f, std=%f'%(md,sd), size=8)
ax3.set_ylabel('Number', size=8)
ax3.set_title('Distance of f0 to Nearest Neighbor', size=9)
ax3.set_xlim(0,6)
ax3.tick_params(labelsize=8)
ax4=fig.add_subplot(322)
ax4.plot(freqs,Qs,'r,')
ax4.set_xlabel('Resonant Frequency (GHz)', size=8)
ax4.set_ylabel('Q(k)', size=8)
ax4.set_title('Q vs f0', size=9)
ax4.tick_params(labelsize=8)
ax4.set_ylim(0,300)
ax5=fig.add_subplot(324)
ax5.plot(fs,ds,'b,')
ax5.set_xlabel('Resonant Frequency (GHz)', size=8)
ax5.set_ylabel('Distance of f to Nearest Neighbor (MHz)', size=8)
ax5.set_title('Nearest Neighbor vs f0', size=9)
ax5.tick_params(labelsize=8)
ax5.set_ylim(0,20)
ax5.text(2.8,-15,'file name=\n%s\nnumber of resonators = %d'%(File, nres), size=8.5)
fig.savefig(pdftitle)
plt.show()
plt.close()
| gpl-2.0 |
arborworkflows/ProjectManager | tangelo/ArborWorksteps.py | 1 | 30922 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 09:00:25 2013
@author: clisle
"""
import pymongo
from bson import ObjectId
from pymongo import Connection
import json
import time
# needed to import R-based algorithms from Geiger and Picante
from ArborAlgorithmManagerAPI import ArborAlgorithmManager
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
# this is the abstract definition of a workstep inside a workflow. Worksteps have a name,
# an update method, and an execute method. The semantics are similar to the original VTK
# pipeline, where "update" is called on a workstep, and it will, invoke update on its predecessor
# filters, if needed.
class Workstep(object):
def __init__(self):
self.name = 'default'
self.projectName = 'default'
self.modifiedTime = -9999
# added parameters array so can be serialized for all classes
self.parameters = dict()
def setProjectName(self,projectname):
self.projectName = projectname
def setName(self,namestring):
self.name = namestring;
def getProjectName(self):
return self.projectName
# subclasses will have a variety of parameters, put them all in a named
# keystore for consistency. This will enable serialization of all parameters to storage
def setParameter(self,parameterName,parameterValue):
self.parameters[parameterName] = parameterValue
# when a workstep is serialized from storage, all of its parameters are available in a single dict,
# so just asssign the
def setParameters(self,parameterDictionary):
self.parameters = parameterDictionary
# method that is called to make sure the output of a workstep is current/
# this method will contain whatever tests are needed to determine if the filter
# needs to be run
def update(self):
raise NotImplementedError
self.execute()
# internal method that is invoked by the update method when the output needs to
# be generated. This is where the actual "processing" of a filter is done. When
# execute is finished, there should be output written into the output location,
# whatever that location is (may vary depending on the subclass?)
def execute(self):
self.writeOutput()
raise NotImplementedError
def writeOutput(self):
# write output here
pass
# define a custom class for exceptions
class WorkstepException(Exception):
def __init__(self,message):
Exception.__init__(self, message)
# utility class to represent information about flows between worksteps. It is expected that there will be a hierarchical
# class definition of data types
class WorkstepInformationObject:
def __init__(self):
self.type = 'data.arbor.any'
self.collectionName = 'default'
self.sourceObject = None
# this is set by the filter when output is generated
self.modifiedTime = -9999
def typeMatches(self,typedefinition):
return self.type == typedefinition
def setSourceObject(self,objectPtr):
self.sourceObject = objectPtr
def printSelf(self):
print "infoObject: ",self.modifiedTime, self.type, self.collectionName
# This is the workstep type which outputs a dataset spec, not the dataset itself. The spec is defined
# as a table with the tuple (project,datatype,datasetname) contained in the output. This class stores the spec
# table in the output collection when it executes. This source is used when server-side processing algorithms with the
# ability to directly read the data are the following steps in the workflow, as no actual dataset information comes out
# of this step, only the specification for which dataset the user is interested in processing.
class DatasetSpecTableSourceWorkstep(Workstep):
def __init__(self):
Workstep.__init__(self)
self.type = "arbor.analysis.spectablesource"
self.inputType = "data.table.spectable"
self.outputType = "data.table.spectable"
self.databaseName = ''
self.outputInformation = WorkstepInformationObject()
self.inputs = []
self.outputInitialized = False;
# utility class to create unique collection names for worksteps within workflows.
def getOutputCollectionNameForWorkstep(self):
newname = self.projectName+"."+self.name
return newname
# set the database to read/write to
def setDatabaseName(self,dbname):
self.databaseName = dbname;
# this is a source object, no action performed on addInput
def addInput(self,OutputInformation):
pass
# output the type and the collection value
def getOutput(self):
outinfo = self.outputInformation
outinfo.sourceObject = self
outinfo.type = self.outputType
outinfo.collectionName = self.getOutputCollectionNameForWorkstep()
#print self.name," sending outinfo object:"
#outinfo.printSelf()
self.outputInitialized = True;
return outinfo
def printSelf(self):
print "workstep: ",self.name, " time: ",self.modifiedTime
print " output collection: ",self.outputInformation.collectionName
for p in self.parameters:
print " paramter ", p, " = ",self.parameters[p]
def InputTypeMatches(self,informationObject):
# return true if the type passed is the type of data we are expecting
return informationObject.typeMatches(self.inputType)
# this method examines the input to see if any change has occurred. The
# input information objects are examined. The execute method is called if
# needed to generate updated output because input(s) have been modified.
def update(self):
# if this is the last chain in the fitlter, then force initialization
# by hand before the first update call
if not self.outputInitialized:
self.getOutput()
# since this only updates a spec, just run each time
self.execute()
# run the filter. This reads the parameters and outputs a table containing
# the parameter information
def execute(self):
print self.name+" executing"
# setup mongo connection and look through all input collections, copying
connection = Connection('localhost', 27017)
if len(self.databaseName)>0:
db = connection[self.databaseName]
else:
db = connection['arbor']
outputcoll = db[self.outputInformation.collectionName]
# clear out the output to prepare for running an algorithm
outputcoll.drop()
# loop through all parameters on this filter and output a line in the collection
# for each parameter as a key/value pair
for thisparam in self.parameters:
outdict = dict()
outdict['key'] = thisparam
outdict['value'] = self.parameters[thisparam]
# find all documents in this input (repeated for each input)
outputcoll.insert(outdict)
# pause the writer enough to allow the write to complete? Found example code here:
# http://sourceforge.net/u/rick446/random/ci/master/tree/lib.py
db.last_status()
# rest the filter's modified time and assign it to the output object
self.outputInformation.modifiedTime = self.modifiedTime = time.time()
connection.close()
# delete output collection so there is nothing left in the database. This is inherited to subclasses
# as well, so DatasetFilterWorksteps also have this defined.
def deleteOutput(self):
connection = Connection('localhost', 27017)
if len(self.databaseName)>0:
db = connection[self.databaseName]
else:
db = connection['arbor']
outputcoll = db[self.outputInformation.collectionName]
# clear out the output to prepare for running an algorithm
outputcoll.drop()
# This is a sample class that sets the profile for worksteps that process input to output
# invoking their processing on the data as it passes through
class DatasetCopyWorkstep(Workstep):
def __init__(self):
Workstep.__init__(self)
self.type = "arbor.analysis.datasetcopy"
self.inputType = "data.arbor.any"
self.outputType = "data.arbor.any"
self.databaseName = ''
self.outputInformation = WorkstepInformationObject()
self.inputs = []
self.outputInitialized = False;
# utility class to create unique collection names for worksteps within workflows.
def getOutputCollectionNameForWorkstep(self):
newname = self.projectName+"."+self.name
return newname
# set the database to read/write to
def setDatabaseName(self,dbname):
self.databaseName = dbname;
# define the workstep input to be connected to the output information of another step,
# The output information is a tuple: [parent_workstate_name,datatype_declaration,collection_name]
def addInput(self,OutputInformation):
if self.InputTypeMatches(OutputInformation):
self.inputs.append(OutputInformation)
print self.name," input list is now:"
for thisInput in self.inputs:
print " input: ",thisInput.sourceObject.name, " time: ",thisInput.sourceObject.modifiedTime
else:
raise WorkstepException("type mismatch at input of workstep "+self.name)
# output the type and the collection value
def getOutput(self):
outinfo = self.outputInformation
outinfo.sourceObject = self
outinfo.type = self.outputType
outinfo.collectionName = self.getOutputCollectionNameForWorkstep()
print self.name," sending outinfo object:"
outinfo.printSelf()
self.outputInitialized = True;
return outinfo
def printSelf(self):
print "workstep: ",self.name, " time: ",self.modifiedTime
for thisInput in self.inputs:
print "input: ",thisInput.sourceObject.name, " time: ",thisInput.sourceObject.modifiedTime
print "output collection: ",self.outputInformation.collectionName
def InputTypeMatches(self,informationObject):
# return true if the type passed is the type of data we are expecting
return informationObject.typeMatches(self.inputType)
# this method examines the input to see if any change has occurred. The
# input information objects are examined. The execute method is called if
# needed to generate updated output because input(s) have been modified.
# Execute methods might require access to the API to lookup datasets. If so,
# the method will be called with the Arbor API defined.
def update(self,arborapi=None):
# if this is the last chain in the filter, then force initialization
# by hand before the first update call
if not self.outputInitialized:
self.getOutput()
print self.name," update called"
filterNeedsToBeRun = False;
# go through all inputs and see if any of them have changed. If so,
# then we need to re-run this filter
for thisInput in self.inputs:
print "requesting update of input: ",thisInput.sourceObject.name
thisInput.sourceObject.update()
# if the previous step hasn't run yet, invoke it
print "comparing source time of ",thisInput.modifiedTime, " with ",self.modifiedTime
if thisInput.modifiedTime > self.modifiedTime:
# input has changed since this filter executed, so mark for execution
filterNeedsToBeRun = True;
# if this filter needs to run, then run it and clear out the modified flags on the inputs
if filterNeedsToBeRun:
if arborapi == None:
self.execute()
else:
self.execute(arborapi)
# run the filter. This base class is a copy filter. A separate mongo connection
# is opened and closed during the execution of the filter. The filter's modified time
# is updated to allow for pipeline execution behavior.
def execute(self, arborapi=None):
print self.name+" executing"
# setup mongo connection and look through all input collections, copying
connection = Connection('localhost', 27017)
if len(self.databaseName)>0:
db = connection[self.databaseName]
else:
db = connection['arbor']
outputcoll = db[self.outputInformation.collectionName]
# clear out the output to prepare for running an algorithm
outputcoll.drop()
# loop through all inputs and process all objects in the inputs
for thisInput in self.inputs:
inputcoll = db[thisInput.collectionName]
# find all documents in this input (repeated for each input)
queryResults = inputcoll.find()
print "found that ", thisInput.collectionName," has ",queryResults.count(), " records"
# write the documents into the output collection and indicate the output time changed
for result in queryResults:
outputcoll.insert(result)
# pause the writer enough to allow the write to complete? Found example code here:
# http://sourceforge.net/u/rick446/random/ci/master/tree/lib.py
db.last_status()
# rest the filter's modified time and assign it to the output object
self.outputInformation.modifiedTime = self.modifiedTime = time.time()
connection.close()
# delete output collection so there is nothing left in the database. This is inherited to subclasses
# as well, so DatasetFilterWorksteps also have this defined.
def deleteOutput(self):
connection = Connection('localhost', 27017)
if len(self.databaseName)>0:
db = connection[self.databaseName]
else:
db = connection['arbor']
outputcoll = db[self.outputInformation.collectionName]
# clear out the output to prepare for running an algorithm
outputcoll.drop()
# This workstep functions as a source that reads a collection and outputs the collection
# data when connected as the source of a pipeline. It has one parameter, which is the name of the
# dataset (stored in a mongo collection) to be steamed out from this workstep.
class DatasetSourceWorkstep(Workstep):
def __init__(self):
Workstep.__init__(self)
self.inputType = "none"
self.type = "arbor.analysis.datasetsource"
self.outputType = "data.arbor.any"
self.databaseName = ''
self.outputs = []
# setup the parameter for this filter with a enpty dataset name
self.parameters['dataset'] = ''
def setSourceCollectionName(self,collectionName):
self.parameters['dataset'] = collectionName
# utility class to create unique collection names for worksteps within workflows.
def getOutputCollectionNameForWorkstep(self):
if (self.parameters['dataset'] != ''):
return self.parameters['dataset']
else:
raise WorkstepException("unspecified dataset on Dataset Source workstep")
# set the database to read/write to
def setDatabaseName(self,dbname):
self.databaseName = dbname;
# AddInput doesn't do anything for a source object
def addInput(self,OutputInformation):
pass
# output the type and the collection value
def getOutput(self):
outinfo = WorkstepInformationObject()
outinfo.type = self.outputType
outinfo.sourceObject = self
outinfo.collectionName = self.getOutputCollectionNameForWorkstep()
outinfo.modifiedTime = time.time()
return outinfo
def printSelf(self):
print "dataset source workstep: ",self.name, " time: ",self.modifiedTime
print "output collection: ",self.getOutputCollectionNameForWorkstep()
# since this is a source pointing to an existing collections, then
# nothing is done by the update and execute methods.
def update(self):
pass
# run the filter. This base class is a copy filter.
def execute(self):
pass
# since this is a database source object, we don't want to accidentally delete the source
# collection, so this message doesn't do anything for the Dataset source workstep type
def deleteOutput(self):
pass
#-------------------------------------------------------------
# Filtering workstep - an attribute can be selected and filtered on for GreaterThan, LessThan, NotEqual
#-------------------------------------------------------------
class DatasetFilteringWorkstep(DatasetCopyWorkstep):
def __init__(self):
DatasetCopyWorkstep.__init__(self)
self.type = 'arbor.analysis.datasetfilter'
# these were replaced to use parameters instead
#self.filterAttribute = ""
#self.limitValue = 0.0
#self.testType = 'LessThan'
self.parameters['filterAttribute'] = ''
self.parameters['filterValue'] = 0.0
self.parameters['filterOperation'] = 'LessThan'
# test type is GreaterThan, LessThan, NotEqual
def setFilterTest(self,typestring):
self.parameters['filterOperation'] = typestring
# determine which document attribute should be used as a parameter
def setFilterAttribute(self,attrString):
self.parameters['filterAttribute'] = attrString
def setFilterValue(self,number):
self.parameters['filterValue'] = number
# run the filter. The filter attribute and filter value are used to build a query for the
# source datasets. Only documents matching the criteria are passed through the filter
def execute(self,arborapi=None):
print self.name+" executing"
# setup mongo connection and look through all input collections, copying
connection = Connection('localhost', 27017)
if len(self.databaseName)>0:
db = connection[self.databaseName]
else:
db = connection['arbor']
outputcoll = db[self.outputInformation.collectionName]
# clear out the output to prepare for running an algorithm
outputcoll.drop()
# loop through all inputs and process all objects in the inputs
for thisInput in self.inputs:
inputcoll = db[thisInput.collectionName]
# find all documents in this input (repeated for each input) that match the
# test criteria. If no criteria is specified, pass records through
for case in switch(self.parameters['filterOperation']):
if case('GreaterThan'):
query = {self.parameters['filterAttribute'] : {'$gt' : self.parameters['filterValue']}}
break
if case ('LessThan'):
query = {self.parameters['filterAttribute'] : {'$lt' : self.parameters['filterValue']}}
break
if case ('NotEqual'):
query = {self.parameters['filterAttribute'] : {'$ne' : self.parameters['filterValue']}}
break
if case ('Equal') or case('EqualTo'):
query = {self.parameters['filterAttribute'] : self.parameters['filterValue']}
break
print "query used was: ",query
queryResults = inputcoll.find(query)
# write the documents into the output collection and indicate the output time changed
for result in queryResults:
outputcoll.insert(result)
# rest the filter's modified time and assign it to the output object
self.outputInformation.modifiedTime = self.modifiedTime = time.time()
print self.name," passed ",outputcoll.count(), " records"
connection.close()
#-------------------------------------------------------------
# fitContinuous workstep - this expects two inputs. The first is the tree, the second is the matrix
# this needs to be expanded to have named ports but we want to try this first to evaluate if the
# approach works.
#-------------------------------------------------------------
class GeigerFitContinuousWorkstep(DatasetCopyWorkstep):
def __init__(self):
DatasetCopyWorkstep.__init__(self)
self.type = 'arbor.analysis.geigerfitcontinuous'
self.inputType = 'data.table.spectable'
# since this will receive spec tables as inputs describing the tree and table, check for the
# correct types.
# ACTION - replace with type checking of separate ports eventually (tree and characters, etc.)
def InputTypeMatches(self,informationObject):
# return true if the type passed is the type of data we are expecting
return informationObject.typeMatches(self.inputType)
def execute(self, arborapi=None):
print self.name+" executing"
# setup mongo connection and look through all input collections, copying
connection = Connection('localhost', 27017)
if len(self.databaseName)>0:
db = connection[self.databaseName]
else:
db = connection['arbor']
outputcoll = db[self.outputInformation.collectionName]
# clear out the output to prepare for running an algorithm
outputcoll.drop()
# check that we have both inputs needed (tree and matrix specs). If so, then read the
# dataset spec records
# from the input collections and use the dataset references to invoke the algorithm, which
# requires just the references (since the algorithm opens the collections directly).
if len(self.inputs) == 2:
treeSpecCollection = db[self.inputs[0].collectionName]
matrixSpecCollection = db[self.inputs[1].collectionName]
treequery = dict()
treequery['key'] = 'project'
treeProjectName = treeSpecCollection.find(treequery)[0]['value']
treequery2 = dict()
treequery2['key'] = 'dataset'
treeDatasetName = treeSpecCollection.find(treequery2)[0]['value']
print "treeProject:",treeProjectName," dataset:",treeDatasetName
matrixQuery = dict()
matrixQuery['key'] = 'dataset'
matrixDatasetName = matrixSpecCollection.find(treequery2)[0]['value']
# all datasets have to be in the same project for the algorithms to look them up
# so all we need to read is the dataset name for the character matrix, use the
# rest of the information from the tree dataset
print "matrix Project:",treeProjectName," dataset:",matrixDatasetName
# check that the two needed parameters (character and outputTree) are defined
# for this workstep instance, attempt processing only if they are defined. If everything
# is available, run the algorithm on the selected datasets. We use the pre-allocated version
# of the
if ('character' in self.parameters) and ('outputTree' in self.parameters):
print "fitContinuous: found selected character defined as: ",self.parameters['character']
print "fitContinuous: found outputTree defined as: ",self.parameters['outputTree']
algorithms = ArborAlgorithmManager()
algorithms.setProjectManagerAPI(arborapi)
algorithms.initAlgorithmLibrary()
# look to see if the user defined model parameters on this workstep. If so, use
# them to override the default OU model
if ('model' in self.parameters):
modelToUse = self.parameters['model']
else:
modelToUse = '"OU"'
# only attempt to run the analysis if the algorithm subsystem is defined. This relies
# on the global algorithms definition
if algorithms != None:
print "fitContinous: running algorithm"
algorithm_result = algorithms.fitContinuous(self.databaseName,treeProjectName,treeDatasetName, matrixDatasetName,
self.parameters['selectedCharacter'],self.parameters['outputTree'],modelToUse)
else:
print "fitContinous: couldn't connect with AlgorithmManager instance.. skipping"
else:
print "fitContinuous: Please define both a character parameter and an outputtree parameter before running fitContinuous"
else:
print "fitContinuous: Exactly two inputs (a treeSpec and matrixSpec) are required"
# write the output collection and indicate the output time changed. A tree is created and some
# output parameters are returned, put them in the standard key/value form of the table spec:
for result in algorithm_result:
record = dict()
record['key'] = result
record['value'] = algorithm_result[result]
outputcoll.insert(record)
# rest the filter's modified time and assign it to the output object
self.outputInformation.modifiedTime = self.modifiedTime = time.time()
print self.name," completed fit continuous workstep "
connection.close()
#-------------------------------------------------------------
# data integrator workstep - this expects two inputs. The first is the tree, the second is the matrix
# this needs to be expanded to have named ports but we want to try this first to evaluate if the
# approach works.
#-------------------------------------------------------------
class GeigerDataIntegratorWorkstep(DatasetCopyWorkstep):
def __init__(self):
DatasetCopyWorkstep.__init__(self)
self.type = 'arbor.analysis.geigerdataintegrator'
self.inputType = 'data.table.spectable'
# since this will receive spec tables as inputs describing the tree and table, check for the
# correct types.
# ACTION - replace with type checking of separate ports eventually (tree and characters, etc.)
def InputTypeMatches(self,informationObject):
# return true if the type passed is the type of data we are expecting
return informationObject.typeMatches(self.inputType)
def execute(self, arborapi=None):
print self.name+" executing"
# setup mongo connection and look through all input collections, copying
connection = Connection('localhost', 27017)
if len(self.databaseName)>0:
db = connection[self.databaseName]
else:
db = connection['arbor']
outputcoll = db[self.outputInformation.collectionName]
# clear out the output to prepare for running an algorithm
outputcoll.drop()
# check that we have both inputs needed (tree and matrix specs). If so, then read the
# dataset spec records
# from the input collections and use the dataset references to invoke the algorithm, which
# requires just the references (since the algorithm opens the collections directly).
if len(self.inputs) == 2:
treeSpecCollection = db[self.inputs[0].collectionName]
matrixSpecCollection = db[self.inputs[1].collectionName]
treequery = dict()
treequery['key'] = 'project'
treeProjectName = treeSpecCollection.find(treequery)[0]['value']
treequery2 = dict()
treequery2['key'] = 'dataset'
treeDatasetName = treeSpecCollection.find(treequery2)[0]['value']
print "treeProject:",treeProjectName," dataset:",treeDatasetName
matrixQuery = dict()
matrixQuery['key'] = 'dataset'
matrixDatasetName = matrixSpecCollection.find(treequery2)[0]['value']
# all datasets have to be in the same project for the algorithms to look them up
# so all we need to read is the dataset name for the character matrix, use the
# rest of the information from the tree dataset
print "matrix Project:",treeProjectName," dataset:",matrixDatasetName
# check that the two needed parameters (character and outputTree) are defined
# for this workstep instance, attempt processing only if they are defined. If everything
# is available, run the algorithm on the selected datasets. We use the pre-allocated version
# of the
if ('outputTree' in self.parameters):
print "dataIntegrator: found outputTree defined as: ",self.parameters['outputTree']
algorithms = ArborAlgorithmManager()
algorithms.setProjectManagerAPI(arborapi)
algorithms.initAlgorithmLibrary()
# only attempt to run the analysis if the algorithm subsystem is defined. This relies
# on the global algorithms definition
if algorithms != None:
print "dataIntegrator: running algorithm"
algorithm_result = algorithms.dataIntegrator(self.databaseName,treeProjectName,treeDatasetName, matrixDatasetName,
None,self.parameters['outputTree'])
else:
print "dataIntegrator: couldn't connect with AlgorithmManager instance.. skipping"
else:
print "dataIntegrator: Please define an outputtree parameter before running dataIntegrator"
else:
print "dataIntegrator: Exactly two inputs (a treeSpec and matrixSpec) are required"
# a character matrix that is still a data.frame is returned. May need to use pandas to convert back to
# python.
print "dataIntegrator: character matrix output is not yet generated"
for i in range(len(algorithm_result)):
#print i
record = algorithm_result[i]
#print record
#outputcoll.insert(record)
# rest the filter's modified time and assign it to the output object
self.outputInformation.modifiedTime = self.modifiedTime = time.time()
print self.name," completed fit continuous workstep "
connection.close()
| apache-2.0 |
bahrunnur/drivendata-women-healthcare | solution.py | 1 | 4439 | """Script of my solution to DrivenData Modeling Women's Health Care Decisions
Use this script in the following way:
python solution.py <name-of-submission>
Argument is optional, the script will assign default name.
"""
from __future__ import division
import sys
import pdb
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn import multiclass
from XGBoostClassifier import XGBoostClassifier
np.random.seed(17411)
def multiclass_log_loss(y_true, y_prob, eps=1e-15):
"""Multi class version of Logarithmic Loss metric.
https://www.kaggle.com/wiki/MultiClassLogLoss
Parameters
----------
y_true : array, shape = [n_samples, n_classes]
y_prob : array, shape = [n_samples, n_classes]
Returns
-------
loss : float
"""
predictions = np.clip(y_prob, eps, 1 - eps)
rows = y_prob.shape[0]
cols = y_prob.shape[1]
vsota = np.sum(y_true * np.log(predictions) + (1-y_true) * np.log(1-predictions))
vsota = vsota / cols
return -1.0 / rows * vsota
def load_train_data(path=None, train_size=0.8):
train_values = pd.read_csv('data/processed_train.csv')
train_labels = pd.read_csv('data/train_labels.csv')
df = pd.concat([train_values, train_labels], axis=1)
df = df.drop('id', axis=1)
X = df.values.copy()
np.random.shuffle(X)
X_train, X_valid, y_train, y_valid = train_test_split(
X[:, :-14], X[:, -14:], train_size=train_size,
)
print(" -- Data loaded.")
return (X_train.astype(float), X_valid.astype(float),
y_train.astype(int), y_valid.astype(int))
def load_test_data(path=None):
df = pd.read_csv('data/processed_train.csv')
X = df.values
X_test, ids = X[:, 1:], X[:, 0]
return X_test.astype(float), ids.astype(str)
def validate(clf, model, X_train, X_valid, y_train, y_valid):
"""My local validation.
My current best score is:
- `0.2529` in Local Validation.
- `0.2547` in Leaderboard.
"""
print(" --- Evaluating {}.".format(model))
clf.fit(X_train, y_train)
y_prob = clf.predict_proba(X_valid)
score = multiclass_log_loss(y_valid, y_prob)
print(" --- Multiclass logloss on validation set: {:.4f}".format(score))
def train():
X_train, X_valid, y_train, y_valid = load_train_data()
"""About Xgboost Parameters.
Because the distribution of each labels is not uniform. Each classifier may
have outstanding accuracy that lead to overfit. So, increasing gamma to
penalize that classifier to not overfit that label.
More information about xgboost parameters:
https://github.com/dmlc/xgboost/wiki/Parameters
So far, this parameters give score `0.2529` on local validation. And got
`0.2547` at LB score. Using experimentation datasets.
params =
- 'max_depth': 6
- 'num_round': 512
- 'gamma': 1.0
- 'min_child_weight': 4
- 'eta': 0.025
- 'objective': 'binary:logistic'
- 'eval_metric': 'logloss'
- 'nthread': 4
"""
model = "xgboost gbt"
params = {'max_depth': 6,
'num_round': 512,
'gamma': 1.0,
'min_child_weight': 4,
'eta': 0.025,
'objective': 'binary:logistic',
'eval_metric': 'logloss',
'nthread': 4}
clf = XGBoostClassifier(**params)
# Multilabel
clf = multiclass.OneVsRestClassifier(clf, n_jobs=1)
# Local Validation
validate(clf, model, X_train, X_valid, y_train, y_valid)
# Train whole set for submission.
X = np.concatenate((X_train, X_valid))
y = np.concatenate((y_train, y_valid))
print(" --- Start training {} Classifier on whole set.".format(model))
clf.fit(X, y)
print(" --- Finished training on whole set.")
print(" -- Finished training.")
return clf
def make_submission(clf, path='my_submission.csv'):
path = sys.argv[1] if len(sys.argv) > 1 else path
X_test, ids = load_test_data()
sample = pd.read_csv('data/SubmissionFormat.csv')
y_prob = clf.predict_proba(X_test)
preds = pd.DataFrame(y_prob, index=sample.id.values, columns=sample.columns[1:])
preds.to_csv(path, index_label='id')
print(" -- Wrote submission to file {}.".format(path))
def main():
print(" - Start.")
clf = train()
make_submission(clf)
print(" - Finished.")
if __name__ == '__main__':
main()
| mit |
gauravsc/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
jbloom/mapmuts | scripts/mapmuts_aafracsplots.py | 1 | 5017 | #!python
"""Makes plots for each residue summarizing fraction of mutant amino acids.
This mapmuts_aafracsplots.py script is an analysis script for the
mapmuts package.
For each residue in a protein, plots the fraction of non-wildtype
amino acids at each position for a set of different samples.
To run this script from the prompt, first create a text infile of the
format described below. Then simply type mapmuts_aafracsplots.py
followed by the infile name. For example, if the name is infile.txt,
type::
mapmuts_aafracsplots.py infile.txt
If the script is not executable on your system, you can instead type::
python mapmuts_aafracsplots.py infile.txt
This script is designed to be run after you have already run
mapmuts_parsecounts.py on several samples. Each run of that program
generates a *_codoncounts.txt file, which is used as input for this
script.
This script will only work if pylab / matplotlib are available.
"""
import sys
import os
import time
import mapmuts.plot
import mapmuts.io
import mapmuts.sequtils
def main():
"""Main body of script."""
print "Beginning execution of mapmuts_aafracsplots.py..."
mapmuts.io.PrintVersions(sys.stdout)
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument"\
+ ' specifying the name of the input file.')
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile of %s" % infilename)
lines = [line for line in open(infilename).readlines() if \
line[0] != '#' and not line.isspace()]
printprogress = plotfileprefix = None
infiles = []
names = []
for line in lines:
entries = line.split(None, 1)
if len(entries) != 2:
raise ValueError("line must contain at least two entries:\n%s" % line)
if entries[0].strip() == 'plotfileprefix':
if plotfileprefix != None:
raise ValueError("Duplicate plotfileprefix keys")
plotfileprefix = entries[1].strip()
elif entries[0].strip() == 'printprogress':
if printprogress != None:
raise IOError("Duplicate entries for printprogress")
printprogress = entries[1].strip()
if printprogress == 'False':
printprogress = False
elif printprogress == 'True':
printprogress = True
else:
raise ValueError("printprogress must be True or False")
else:
infile = entries[0].strip()
names.append(entries[1].strip())
if not os.path.isfile(infile):
raise IOError("Cannot find specified file %s" % infile)
infiles.append(infile)
assert len(infiles) == len(names)
if not infiles:
raise ValueError("infile fails to specify any samples")
if not mapmuts.plot.PylabAvailable():
raise OSError("pylab / matplotlib is not available, so cannot run this script.")
if not plotfileprefix:
raise ValueError("infile did not specify plotfileprefix")
if printprogress == None:
raise ValueError("infile did not specify printprogress")
dir = os.path.dirname(plotfileprefix)
if dir:
if not os.path.isdir(dir):
raise IOError("plotfileprefix specifies directory of %s" % dir\
+ ' but this directory does not exist. You must create'\
+ ' it manually.')
counts = []
maxcodon = None
for infile in infiles:
codon_counts = mapmuts.io.ReadCodonCounts(open(infile))
mapmuts.sequtils.ClassifyCodonCounts(codon_counts)
counts.append(codon_counts)
imaxcodon = max([i for i in codon_counts.keys() if isinstance(i, int)])
if maxcodon != None:
if imaxcodon != maxcodon:
raise ValueError("codoncounts files specify different lengths")
else:
maxcodon = imaxcodon
if not maxcodon:
raise ValueError("codoncounts files specify empty sequence")
for icodon in range(1, maxcodon + 1):
wtaas = {}
for codon_counts in counts:
wtcodon = codon_counts[icodon]['WT']
wtaa = mapmuts.sequtils.Translate([('head', wtcodon)])[0][1]
if not wtaa:
wtaa = 'STOP'
wtaas[wtaa] = True
for wtaa in wtaas.keys():
aas = [aa for aa in mapmuts.sequtils.AminoAcids() if aa != wtaa]
if wtaa != 'STOP':
aas.append('STOP')
title = "%s%d" % (wtaa, icodon)
plotfile = "%s_%s.pdf" % (plotfileprefix, title)
if printprogress:
print "Making plot %s..." % plotfile
sys.stdout.flush()
mapmuts.plot.PlotAAFracs(counts, names, icodon, plotfile, aas, title)
if printprogress:
print "Plotting completed."
print "Script complete."
if __name__ == '__main__':
main() # run the script
| gpl-3.0 |
Christoph/PythonTest | tagrefinery/spell.py | 2 | 1843 | # Lib imports
import pandas as pd
import numpy as np
import nltk as nlp
import matplotlib.pyplot as plt
import importlib
import re
# Custom modules
import database
# Reload custom modules
def reloadall():
global database
database = importlib.reload(database)
print("Begin Main")
# Initialize variables
db = database.InitializeDB()
# Import all stopwords from nltk
stopwords = set(nlp.corpus.stopwords.words())
# Derive the Tag table from Ancestor
tags = db.fetchData(db.query_Tag)
# Create connection character removing regex
# Compile improves speed through precompiling
# re.escape escapes all characters
# The list needs to be a string which looks like this [/_-]
chars_removed = ["-","_","/","."]
rx_cr = re.compile("["+re.escape("".join(chars_removed))+"]")
# String processing
# Creating a normlized text composed of all tags separated by points.
text = ". ".join([re.sub(rx_cr, " ", t.lower()) for t in tags["Name"]])
subset = text[:200]
# Porter is the oldest stemmer
#porter = nlp.PorterStemmer()
# Lancaster is an very aggressive one
#lancaster = nlp.LancasterStemmer()
# English snowball is a better porter stemmer. (From nltk documentation)
snow = nlp.stem.snowball.EnglishStemmer(ignore_stopwords=True)
# Uses wordnet wo lemmatize known words
wnl = nlp.WordNetLemmatizer()
tokens = nlp.word_tokenize(subset)
# Oldest stemmer
#ps = [porter.stem(t) for t in tokens]
# Aggressive stemmer
#ls = [lancaster.stem(t) for t in tokens]
# For english better than the porter stemmer (from nltk docu)
norm_t = [snow.stem(t) for t in tokens]
# Wordnet Lemmatizer: lemmatzies all known words
wn = [wnl.lemmatize(t) for t in tokens]
vocab = set(norm_t)
print(len(vocab))
#remove stopwords
vocab.discard(stopwords)
print(len(vocab))
ed = nlp.metrics.distance.edit_distance()
ed("a","b",transpositions=True)
| mit |
mne-tools/mne-tools.github.io | 0.12/_downloads/plot_label_from_stc.py | 31 | 3963 | """
=================================================
Generate a functional label from source estimates
=================================================
Threshold source estimates and produce a functional label. The label
is typically the region of interest that contains high values.
Here we compare the average time course in the anatomical label obtained
by FreeSurfer segmentation and the average time course from the
functional label. As expected the time course in the functional
label yields higher values.
"""
# Author: Luke Bloy <[email protected]>
# Alex Gramfort <[email protected]>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = 'sample'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Compute a label/ROI based on the peak power between 80 and 120 ms.
# The label bankssts-lh is used for the comparison.
aparc_label_name = 'bankssts-lh'
tmin, tmax = 0.080, 0.120
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src'] # get the source space
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori='normal')
# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()
# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
subjects_dir=subjects_dir, connected=True)
# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]
# load the anatomical ROI for comparison
anat_label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]
stc_func_label = stc.in_label(func_label)
pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
# flip the pca so that the max power between tmin and tmax is positive
pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))])
pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))])
###############################################################################
# plot the time courses....
plt.figure()
plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k',
label='Anatomical %s' % aparc_label_name)
plt.plot(1e3 * stc_func_label.times, pca_func, 'b',
label='Functional %s' % aparc_label_name)
plt.legend()
plt.show()
###############################################################################
# plot brain in 3D with PySurfer if available
brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# show both labels
brain.add_label(anat_label, borders=True, color='k')
brain.add_label(func_label, borders=True, color='b')
| bsd-3-clause |
dangeles/tissue_enrichment_tool_hypergeometric_test | setup.py | 4 | 2648 | """
A script to setup the package.
Created on Fri Mar 11 11:27:39 2016
@author: dangeles
"""
# -*- coding: utf-8 -*-
from distutils.core import setup
from setuptools import setup, find_packages
import os
import sys
version = '0.17.6'
# just type in python setup.py publish and
# this takes care of publishing to pypi!
# tag with git
if sys.argv[1] == 'tag':
if len(sys.argv) > 2:
if sys.argv[2] == '-m':
print(sys.argv[2])
print('please input your commit message')
message = sys.stdin.readline().strip()
print("git tag -a %s -m '%s'" % (version, message))
os.system("git tag -a %s -m '%s'" % (version, message))
else:
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
# publish to pypi
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
# print("You probably want to also tag the version now:")
# print(" git tag -a %s -m 'version %s'" % (version, version))
# print(" git push --tags")
sys.exit()
# for pytest.py purposes -- just type in python setup.py test!
if sys.argv[-1] == 'test':
test_requirements = [
'pytest',
'flake8',
'coverage'
]
try:
modules = map(__import__, test_requirements)
except ImportError as e:
err_msg = e.message.replace("No module named ", "")
msg = "%s is not installed. Install your test requirements." % err_msg
raise ImportError(msg)
os.system('py.test')
sys.exit()
# Below this point is the rest of the setup() function
def readme():
"""A function to open a readme.rst file."""
with open('README.rst') as f:
return f.read()
setup(
name='tissue_enrichment_analysis',
packages=find_packages(exclude=("tests",)),
version=version,
description='This package contains all the software used to implement\
TEA in WormBase and remotely',
author='David Angeles-Albores',
author_email='[email protected]',
url='https://github.com/dangeles/TissueEnrichmentAnalysis', # github repo
download_url='https://github.com/dangeles/TissueEnrichmentAnalysis/tarball/{0}'.format(version),
keywords=['tissue enrichment analysis', 'TEA',
'RNAseq', 'celegans', 'biology'], # arbitrary keywords
install_requires=[
'matplotlib', 'scipy', 'numpy', 'pandas', 'seaborn'
],
classifiers=['License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5'
],
licenses='MIT',
scripts=['bin/tea']
)
| mit |
Habasari/sms-tools | lectures/09-Sound-description/plots-code/features.py | 25 | 2965 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import os, sys
import json
from scipy.cluster.vq import vq, kmeans, whiten
def fetchDataDetails(inputDir, descExt = '.json'):
dataDetails = {}
for path, dname, fnames in os.walk(inputDir):
for fname in fnames:
if descExt in fname.lower():
rname, cname, sname = path.split('/')
if not dataDetails.has_key(cname):
dataDetails[cname]={}
fDict = json.load(open(os.path.join(rname, cname, sname, fname),'r'))
dataDetails[cname][sname]={'file': fname, 'feature':fDict}
return dataDetails
def plotFeatures(inputDir, descInput = ('',''), anotOn =0):
#mfcc descriptors are an special case for us as its a vector not a value
descriptors = ['', '']
mfccInd = [-1 , -1]
if "mfcc" in descInput[0]:
featType, featName, stats, ind = descInput[0].split('.')
descriptors[0] = featType+'.'+featName+'.'+stats
mfccInd[0] = int(ind)
else:
descriptors[0] = descInput[0]
if "mfcc" in descInput[1]:
featType, featName, stats, ind = descInput[1].split('.')
descriptors[1] = featType+'.'+featName+'.'+stats
mfccInd[1] = int(ind)
else:
descriptors[1] = descInput[1]
dataDetails = fetchDataDetails(inputDir)
colors = ['r', 'g', 'c', 'b', 'k', 'm', 'y']
plt.figure(1, figsize=(9.5, 6))
plt.hold(True)
legArray = []
catArray = []
for ii, category in enumerate(dataDetails.keys()):
catArray.append(category)
for soundId in dataDetails[category].keys():
filepath = os.path.join(inputDir, category, soundId, dataDetails[category][soundId]['file'])
descSound = json.load(open(filepath, 'r'))
if not descSound.has_key(descriptors[0]) or not descSound.has_key(descriptors[1]):
print "Please provide descriptors which are extracted and saved before"
return -1
if "mfcc" in descriptors[0]:
x_cord = descSound[descriptors[0]][0][mfccInd[0]]
else:
x_cord = descSound[descriptors[0]][0]
if "mfcc" in descriptors[1]:
y_cord = descSound[descriptors[1]][0][mfccInd[1]]
else:
y_cord = descSound[descriptors[1]][0]
plt.scatter(x_cord,y_cord, c = colors[ii], s=50, hold = True, alpha=0.75)
if anotOn==1:
plt.annotate(soundId, xy=(x_cord, y_cord), xytext=(x_cord, y_cord))
circ = Line2D([0], [0], linestyle="none", marker="o", alpha=0.75, markersize=10, markerfacecolor=colors[ii])
legArray.append(circ)
plt.ylabel(descInput[1], fontsize =16)
plt.xlabel(descInput[0], fontsize =16)
plt.legend(legArray, catArray ,numpoints=1,bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=len(catArray), mode="expand", borderaxespad=0.)
plt.savefig('features.png')
plt.show()
########################
plotFeatures('freesound-sounds', descInput = ('lowlevel.spectral_centroid.mean','lowlevel.mfcc.mean.2'), anotOn =0)
| agpl-3.0 |
pombredanne/bokeh | examples/models/trail.py | 3 | 4264 | # -*- coding: utf-8 -*-
from __future__ import print_function
from math import sin, cos, atan2, sqrt, radians
import numpy as np
import scipy.ndimage as im
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.util.browser import view
from bokeh.models.glyphs import Line, Patches
from bokeh.models.layouts import VBox
from bokeh.models import (
Plot, GMapPlot, GMapOptions,
DataRange1d, ColumnDataSource,
LinearAxis, Grid,
PanTool, WheelZoomTool, ResetTool)
from bokeh.sampledata.mtb import obiszow_mtb_xcm
def haversin(theta):
return sin(0.5 * theta) ** 2
def distance(p1, p2):
"""Distance between (lat1, lon1) and (lat2, lon2). """
R = 6371
lat1, lon1 = p1
lat2, lon2 = p2
phi1 = radians(lat1)
phi2 = radians(lat2)
delta_lat = radians(lat2 - lat1)
delta_lon = radians(lon2 - lon1)
a = haversin(delta_lat) + cos(phi1) * cos(phi2) * haversin(delta_lon)
return 2 * R * atan2(sqrt(a), sqrt(1 - a))
def prep_data(dataset):
df = dataset.copy()
latlon = list(zip(df.lat, df.lon))
dist = np.array([distance(latlon[i + 1], latlon[i]) for i in range(len((latlon[:-1])))])
df["dist"] = np.concatenate(([0], np.cumsum(dist)))
slope = np.abs(100 * np.diff(df.alt) / (1000 * dist))
slope[np.where( slope < 4) ] = 0 # "green"
slope[np.where((slope >= 4) & (slope < 6))] = 1 # "yellow"
slope[np.where((slope >= 6) & (slope < 10))] = 2 # "pink"
slope[np.where((slope >= 10) & (slope < 15))] = 3 # "orange"
slope[np.where( slope >= 15 )] = 4 # "red"
slope = im.median_filter(slope, 6)
colors = np.empty_like(slope, dtype=object)
colors[np.where(slope == 0)] = "green"
colors[np.where(slope == 1)] = "yellow"
colors[np.where(slope == 2)] = "pink"
colors[np.where(slope == 3)] = "orange"
colors[np.where(slope == 4)] = "red"
df["colors"] = list(colors) + [None] # NOTE: add [None] just make pandas happy
return df
title = "Obiszów MTB XCM"
def trail_map(data):
lon = (min(data.lon) + max(data.lon)) / 2
lat = (min(data.lat) + max(data.lat)) / 2
map_options = GMapOptions(lng=lon, lat=lat, zoom=13)
plot = GMapPlot(title="%s - Trail Map" % title, map_options=map_options, plot_width=800, plot_height=800)
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
line_source = ColumnDataSource(dict(x=data.lon, y=data.lat, dist=data.dist))
line = Line(x="x", y="y", line_color="blue", line_width=2)
plot.add_glyph(line_source, line)
return plot
def altitude_profile(data):
plot = Plot(title="%s - Altitude Profile" % title, plot_width=800, plot_height=400)
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
xaxis = LinearAxis(axis_label="Distance (km)")
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_label="Altitude (m)")
plot.add_layout(yaxis, 'left')
xgrid = Grid(plot=plot, dimension=0, ticker=xaxis.ticker)
ygrid = Grid(plot=plot, dimension=1, ticker=yaxis.ticker)
plot.renderers.extend([xgrid, ygrid])
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
X, Y = data.dist, data.alt
y0 = min(Y)
patches_source = ColumnDataSource(dict(
xs=[[X[i], X[i+1], X[i+1], X[i]] for i in range(len(X[:-1])) ],
ys=[[y0, y0, Y[i+1], Y[i]] for i in range(len(Y[:-1])) ],
color=data.colors[:-1]
))
patches = Patches(xs="xs", ys="ys", fill_color="color", line_color="color")
plot.add_glyph(patches_source, patches)
line_source = ColumnDataSource(dict(x=data.dist, y=data.alt))
line = Line(x='x', y='y', line_color="black", line_width=1)
plot.add_glyph(line_source, line)
return plot
data = prep_data(obiszow_mtb_xcm)
trail = trail_map(data)
altitude = altitude_profile(data)
layout = VBox(children=[altitude, trail])
doc = Document()
doc.add_root(layout)
if __name__ == "__main__":
filename = "trail.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Trail map and altitude profile"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
nmayorov/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
julienledem/arrow | python/pyarrow/tests/pandas_examples.py | 1 | 3522 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import numpy as np
import pandas as pd
import pyarrow as pa
def dataframe_with_arrays():
"""
Dataframe with numpy arrays columns of every possible primtive type.
Returns
-------
df: pandas.DataFrame
schema: pyarrow.Schema
Arrow schema definition that is in line with the constructed df.
"""
dtypes = [('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('f4', pa.float_()), ('f8', pa.double())]
arrays = OrderedDict()
fields = []
for dtype, arrow_dtype in dtypes:
fields.append(pa.field(dtype, pa.list_(arrow_dtype)))
arrays[dtype] = [
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
]
fields.append(pa.field('str', pa.list_(pa.string())))
arrays['str'] = [
np.array([u"1", u"ä"], dtype="object"),
None,
np.array([u"1"], dtype="object"),
np.array([u"1", u"2", u"3"], dtype="object")
]
fields.append(pa.field('datetime64', pa.list_(pa.timestamp('ms'))))
arrays['datetime64'] = [
np.array(['2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
None,
None,
np.array(['2007-07-13T02',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
]
df = pd.DataFrame(arrays)
schema = pa.Schema.from_fields(fields)
return df, schema
def dataframe_with_lists():
"""
Dataframe with list columns of every possible primtive type.
Returns
-------
df: pandas.DataFrame
schema: pyarrow.Schema
Arrow schema definition that is in line with the constructed df.
"""
arrays = OrderedDict()
fields = []
fields.append(pa.field('int64', pa.list_(pa.int64())))
arrays['int64'] = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4],
None,
[0]
]
fields.append(pa.field('double', pa.list_(pa.double())))
arrays['double'] = [
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.],
None,
[0.]
]
fields.append(pa.field('str_list', pa.list_(pa.string())))
arrays['str_list'] = [
[u"1", u"ä"],
None,
[u"1"],
[u"1", u"2", u"3"]
]
df = pd.DataFrame(arrays)
schema = pa.Schema.from_fields(fields)
return df, schema
| apache-2.0 |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/backends/backend_nbagg.py | 10 | 9723 | """Interactive figures in the IPython notebook"""
# Note: There is a notebook in
# lib/matplotlib/backends/web_backend/nbagg_uat.ipynb to help verify
# that changes made maintain expected behaviour.
import datetime
from base64 import b64encode
import json
import io
import os
import six
from uuid import uuid4 as uuid
import tornado.ioloop
from IPython.display import display, Javascript, HTML
try:
# Jupyter/IPython 4.x or later
from ipykernel.comm import Comm
except ImportError:
# Jupyter/IPython 3.x or earlier
from IPython.kernel.comm import Comm
from matplotlib import rcParams
from matplotlib.figure import Figure
from matplotlib import is_interactive
from matplotlib.backends.backend_webagg_core import (FigureManagerWebAgg,
FigureCanvasWebAggCore,
NavigationToolbar2WebAgg,
TimerTornado)
from matplotlib.backend_bases import (ShowBase, NavigationToolbar2,
FigureCanvasBase)
class Show(ShowBase):
def __call__(self, block=None):
from matplotlib._pylab_helpers import Gcf
managers = Gcf.get_all_fig_managers()
if not managers:
return
interactive = is_interactive()
for manager in managers:
manager.show()
# plt.figure adds an event which puts the figure in focus
# in the activeQue. Disable this behaviour, as it results in
# figures being put as the active figure after they have been
# shown, even in non-interactive mode.
if hasattr(manager, '_cidgcf'):
manager.canvas.mpl_disconnect(manager._cidgcf)
if not interactive and manager in Gcf._activeQue:
Gcf._activeQue.remove(manager)
show = Show()
def draw_if_interactive():
import matplotlib._pylab_helpers as pylab_helpers
if is_interactive():
manager = pylab_helpers.Gcf.get_active()
if manager is not None:
manager.show()
def connection_info():
"""
Return a string showing the figure and connection status for
the backend. This is intended as a diagnostic tool, and not for general
use.
"""
from matplotlib._pylab_helpers import Gcf
result = []
for manager in Gcf.get_all_fig_managers():
fig = manager.canvas.figure
result.append('{0} - {0}'.format((fig.get_label() or
"Figure {0}".format(manager.num)),
manager.web_sockets))
if not is_interactive():
result.append('Figures pending show: {0}'.format(len(Gcf._activeQue)))
return '\n'.join(result)
# Note: Version 3.2 and 4.x icons
# http://fontawesome.io/3.2.1/icons/
# http://fontawesome.io/
# the `fa fa-xxx` part targets font-awesome 4, (IPython 3.x)
# the icon-xxx targets font awesome 3.21 (IPython 2.x)
_FONT_AWESOME_CLASSES = {
'home': 'fa fa-home icon-home',
'back': 'fa fa-arrow-left icon-arrow-left',
'forward': 'fa fa-arrow-right icon-arrow-right',
'zoom_to_rect': 'fa fa-square-o icon-check-empty',
'move': 'fa fa-arrows icon-move',
'download': 'fa fa-floppy-o icon-save',
None: None
}
class NavigationIPy(NavigationToolbar2WebAgg):
# Use the standard toolbar items + download button
toolitems = [(text, tooltip_text,
_FONT_AWESOME_CLASSES[image_file], name_of_method)
for text, tooltip_text, image_file, name_of_method
in (NavigationToolbar2.toolitems +
(('Download', 'Download plot', 'download', 'download'),))
if image_file in _FONT_AWESOME_CLASSES]
class FigureManagerNbAgg(FigureManagerWebAgg):
ToolbarCls = NavigationIPy
def __init__(self, canvas, num):
self._shown = False
FigureManagerWebAgg.__init__(self, canvas, num)
def display_js(self):
# XXX How to do this just once? It has to deal with multiple
# browser instances using the same kernel (require.js - but the
# file isn't static?).
display(Javascript(FigureManagerNbAgg.get_javascript()))
def show(self):
if not self._shown:
self.display_js()
self._create_comm()
else:
self.canvas.draw_idle()
self._shown = True
def reshow(self):
"""
A special method to re-show the figure in the notebook.
"""
self._shown = False
self.show()
@property
def connected(self):
return bool(self.web_sockets)
@classmethod
def get_javascript(cls, stream=None):
if stream is None:
output = io.StringIO()
else:
output = stream
super(FigureManagerNbAgg, cls).get_javascript(stream=output)
with io.open(os.path.join(
os.path.dirname(__file__),
"web_backend",
"nbagg_mpl.js"), encoding='utf8') as fd:
output.write(fd.read())
if stream is None:
return output.getvalue()
def _create_comm(self):
comm = CommSocket(self)
self.add_web_socket(comm)
return comm
def destroy(self):
self._send_event('close')
# need to copy comms as callbacks will modify this list
for comm in list(self.web_sockets):
comm.on_close()
self.clearup_closed()
def clearup_closed(self):
"""Clear up any closed Comms."""
self.web_sockets = set([socket for socket in self.web_sockets
if socket.is_open()])
if len(self.web_sockets) == 0:
self.canvas.close_event()
def remove_comm(self, comm_id):
self.web_sockets = set([socket for socket in self.web_sockets
if not socket.comm.comm_id == comm_id])
class FigureCanvasNbAgg(FigureCanvasWebAggCore):
def new_timer(self, *args, **kwargs):
return TimerTornado(*args, **kwargs)
def start_event_loop(self, timeout):
FigureCanvasBase.start_event_loop_default(self, timeout)
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
from .._pylab_helpers import Gcf
def closer(event):
Gcf.destroy(num)
canvas = FigureCanvasNbAgg(figure)
if rcParams['nbagg.transparent']:
figure.patch.set_alpha(0)
manager = FigureManagerNbAgg(canvas, num)
if is_interactive():
manager.show()
figure.canvas.draw_idle()
canvas.mpl_connect('close_event', closer)
return manager
class CommSocket(object):
"""
Manages the Comm connection between IPython and the browser (client).
Comms are 2 way, with the CommSocket being able to publish a message
via the send_json method, and handle a message with on_message. On the
JS side figure.send_message and figure.ws.onmessage do the sending and
receiving respectively.
"""
def __init__(self, manager):
self.supports_binary = None
self.manager = manager
self.uuid = str(uuid())
# Publish an output area with a unique ID. The javascript can then
# hook into this area.
display(HTML("<div id=%r></div>" % self.uuid))
try:
self.comm = Comm('matplotlib', data={'id': self.uuid})
except AttributeError:
raise RuntimeError('Unable to create an IPython notebook Comm '
'instance. Are you in the IPython notebook?')
self.comm.on_msg(self.on_message)
manager = self.manager
self._ext_close = False
def _on_close(close_message):
self._ext_close = True
manager.remove_comm(close_message['content']['comm_id'])
manager.clearup_closed()
self.comm.on_close(_on_close)
def is_open(self):
return not (self._ext_close or self.comm._closed)
def on_close(self):
# When the socket is closed, deregister the websocket with
# the FigureManager.
if self.is_open():
try:
self.comm.close()
except KeyError:
# apparently already cleaned it up?
pass
def send_json(self, content):
self.comm.send({'data': json.dumps(content)})
def send_binary(self, blob):
# The comm is ascii, so we always send the image in base64
# encoded data URL form.
data = b64encode(blob)
if six.PY3:
data = data.decode('ascii')
data_uri = "data:image/png;base64,{0}".format(data)
self.comm.send({'data': data_uri})
def on_message(self, message):
# The 'supports_binary' message is relevant to the
# websocket itself. The other messages get passed along
# to matplotlib as-is.
# Every message has a "type" and a "figure_id".
message = json.loads(message['content']['data'])
if message['type'] == 'closing':
self.on_close()
self.manager.clearup_closed()
elif message['type'] == 'supports_binary':
self.supports_binary = message['value']
else:
self.manager.handle_json(message)
| apache-2.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/api/affine_image.py | 1 | 2412 | """
============================
Affine transform of an image
============================
For the backends that support draw_image with optional affine
transform (e.g., agg, ps backend), the image of the output should
have its boundary match the dashed yellow rectangle.
"""
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
def get_image():
delta = 0.25
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = Z2 - Z1 # difference of Gaussians
return Z
def do_plot(ax, Z, transform):
im = ax.imshow(Z, interpolation='none',
origin='lower',
extent=[-2, 4, -3, 2], clip_on=True)
trans_data = transform + ax.transData
im.set_transform(trans_data)
# display intended extent of the image
x1, x2, y1, y2 = im.get_extent()
ax.plot([x1, x2, x2, x1, x1], [y1, y1, y2, y2, y1], "y--",
transform=trans_data)
ax.set_xlim(-5, 5)
ax.set_ylim(-4, 4)
# prepare image and figure
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
Z = get_image()
# image rotation
do_plot(ax1, Z, mtransforms.Affine2D().rotate_deg(30))
# image skew
do_plot(ax2, Z, mtransforms.Affine2D().skew_deg(30, 15))
# scale and reflection
do_plot(ax3, Z, mtransforms.Affine2D().scale(-1, .5))
# everything and a translation
do_plot(ax4, Z, mtransforms.Affine2D().
rotate_deg(30).skew_deg(30, 15).scale(-1, .5).translate(.5, -1))
pltshow(plt)
| mit |
jimmycallin/master-thesis | architectures/conll16st-hd-sdp/cnn_class_micro_static_extended.py | 1 | 8623 | import sys
import numpy as np
import tensorflow as tf
from sklearn import cross_validation
from sklearn.cross_validation import KFold
from sklearn import metrics
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])
class TextCNN_Ext(object):
def __init__(self, train_dataset, train_labels, valid_dataset, valid_labels, embeddings, vocabulary, l2_reg_lambda,
num_steps, batch_size, num_filters, filter_sizes_1, filter_sizes_2, filter_sizes_3, dropout_keep_prob,
# lexical,
shuffling, num_classes):
# parameters
vocab_size = len(vocabulary)
# sequence_length = train_dataset.shape[1]
sequence_length = train_dataset.shape[1]
train_size = train_dataset.shape[0]
# num_classes = 3
filter_sizes = [filter_sizes_1, filter_sizes_2, filter_sizes_3]
num_filters_total = num_filters * len(filter_sizes)
embedding_size = embeddings.shape[1]
# more embeddings than words in vocab :/
embeddings_number = embeddings.shape[0]
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(10)
# variables and constants
input_x = tf.placeholder(tf.int32, shape=[batch_size, sequence_length], name="input_x")
input_y = tf.placeholder(tf.int32, shape=[batch_size, num_classes], name="input_y")
# self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
reg_coef = tf.placeholder(tf.float32)
l2_loss = tf.constant(0.0)
# Generate convolution weights. This should be more human readable
weights_conv = [tf.Variable(tf.truncated_normal([filter_size, embedding_size, 1, num_filters],
stddev=tf.sqrt(2.0 / (filter_size * embedding_size)),
seed=filter_size + i * num_filters)) for i, filter_size in
enumerate(filter_sizes)]
# weights_conv = [tf.Variable(tf.truncated_normal([filter_size, embedding_size, 1, num_filters], stddev = 0.1 , seed = filter_size + i*num_filters)) for i, filter_size in enumerate(filter_sizes)]
biases_conv = [tf.Variable(tf.constant(0.01, shape=[num_filters])) for filter_size in filter_sizes]
# biases_conv = [tf.Variable(tf.constant(0.1, shape=[num_filters])) for filter_size in filter_sizes]
weight_output = tf.Variable(tf.truncated_normal([num_filters_total, num_classes],
stddev=tf.sqrt(2.0 / (num_filters_total + num_classes)),
seed=0))
# weight_output = tf.Variable(tf.truncated_normal([num_filters_total, num_classes], stddev = 0.1, seed = 0))
bias_output = tf.Variable(tf.constant(0.01, shape=[num_classes]))
# bias_output = tf.Variable(tf.constant(0.1, shape=[num_classes]))
embeddings_const = tf.placeholder(tf.float32, shape=[embeddings_number, embedding_size])
# embeddings_tuned = tf.Variable(embeddings_placeholder)
embedded_chars = tf.nn.embedding_lookup(embeddings_const, input_x)
embedded_chars_expanded = tf.expand_dims(embedded_chars, -1)
# tf_valid_dataset = tf.constant(valid_dataset)
tf_valid_dataset = tf.placeholder(tf.int32, shape=[None, sequence_length])
embedded_chars_valid = tf.nn.embedding_lookup(embeddings_const, tf_valid_dataset)
embedded_chars_expanded_valid = tf.expand_dims(embedded_chars_valid, -1)
scores = self.train_model(embedded_chars_expanded,
dropout_keep_prob,
filter_sizes=filter_sizes,
weights_conv_i = weights_conv[i],
biases_conv_i = biases_conv[i],
sequence_length = sequence_length,
num_filters_total = num_filters_total,
weight_output = weight_output,
bias_output = bias_output)
train_prediction = tf.nn.softmax(scores)
losses = tf.nn.softmax_cross_entropy_with_logits(scores, tf.cast(input_y, tf.float32))
for i in range(len(weights_conv)):
l2_loss += tf.nn.l2_loss(weights_conv[i])
l2_loss += tf.nn.l2_loss(weight_output)
loss = tf.reduce_mean(losses) + reg_coef * l2_loss
# global_step = tf.Variable(0)
# learning_rate = tf.train.exponential_decay(1e-4, global_step * batch_size, tf.size(input_x), 0.95, staircase=True)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
global_step = tf.Variable(0, trainable=False)
# optimizer = tf.train.GradientDescentOptimizer(1e-4).minimize(loss)
optimizer = tf.train.AdamOptimizer(1e-4).minimize(loss)
valid_prediction = tf.nn.softmax(self.train_model(embedded_chars_expanded_valid, 1.0,
filter_sizes=filter_sizes,
weights_conv_i = weights_conv[i],
biases_conv_i = biases_conv[i],
sequence_length = sequence_length,
num_filters_total = num_filters_total,
weight_output = weight_output,
bias_output = bias_output))
with tf.Session(graph=graph) as session:
session.run(tf.initialize_all_variables())
print ("Initialized")
if (shuffling == "y"):
np.random.seed(42)
train = np.asarray(list(zip(train_dataset, train_labels)))
np.random.shuffle(train)
train_dataset, train_labels = zip(*train)
train_dataset = np.asarray(train_dataset)
train_labels = np.asarray(train_labels)
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size)]
batch_labels = train_labels[offset:(offset + batch_size)]
feed_dict = {input_x: batch_data, input_y: batch_labels, reg_coef: l2_reg_lambda,
embeddings_const: embeddings}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict)
if not step % 100:
print ("Minibatch loss at step", step, ":", l)
print ("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("\n")
self.valid_predictions = session.run([valid_prediction], feed_dict={embeddings_const: embeddings, tf_valid_dataset: valid_dataset})
self.valid_predictions = np.asarray(self.valid_predictions).reshape(valid_labels.shape)
self.valid_accuracy = accuracy(self.valid_predictions, np.asarray(valid_labels))
def train_model(self, data, dropout_prob, filter_sizes, weights_conv_i, biases_conv_i, sequence_length, num_filters_total, weight_output, bias_output):
pooled_outputs = []
# lookup table
for i, filter_size in enumerate(filter_sizes):
# convolution layer with different filter size
conv = tf.nn.conv2d(data, weights_conv_i, strides=[1, 1, 1, 1], padding="VALID")
# non-linearitye
h = tf.nn.relu(tf.nn.bias_add(conv, biases_conv_i))
pooled = tf.nn.max_pool(h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID')
pooled_outputs.append(pooled)
h_pool = tf.concat(3, pooled_outputs)
h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
h_drop = tf.nn.dropout(h_pool_flat, dropout_prob)
scores = tf.nn.xw_plus_b(h_drop, weight_output, bias_output)
return scores | mit |
jaantollander/Pointwise-Convergence | src_legacy/analysis/convergence.py | 8 | 1917 | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numba
import numpy as np
import pandas
from numba import float64, int64
@numba.jit(float64(float64, float64, float64, float64), nopython=True, cache=True)
def slope(x0, y0, x, y):
return (y - y0) / (x - x0)
@numba.jit(float64(float64, float64, float64), nopython=True, cache=True)
def intercept(x0, y0, k):
return y0 - k * x0
@numba.jit(int64(float64[:], float64[:], int64, int64),
locals=dict(old_slope=float64, index=int64, length=int64, i=int64,
new_slope=float64, x0=float64, y0=float64, x1=float64,
y1=float64), nopython=True, cache=True)
def find_max_slope(x, y, start_index, last_index):
# TODO: Filter slopes over theoretical estimate
x0 = x[0+start_index]
y0 = y[0+start_index]
x1 = x[1+start_index]
y1 = y[1+start_index]
old_slope = slope(x0, y0, x1, y1)
index = 1 + start_index
i = 2 + start_index
while i <= last_index:
new_slope = slope(x0, y0, x[i], y[i])
if new_slope >= old_slope:
old_slope = new_slope
index = i
i += 1
return index
def find_max_slopes(x, y):
indexes = []
index = 0
last_index = x.size - 1 # Exit condition.
indexes.append(index)
while index != last_index:
index = find_max_slope(x, y, start_index=index, last_index=last_index)
indexes.append(index)
return indexes
def max_slope(x):
"""
:param x: pandas.Series
:return: Indexes
"""
if isinstance(x, pandas.Series):
index = np.log(x.index.values)
values = np.log(x.values)
mask = find_max_slopes(index, values)
return mask
else:
raise ValueError('Argument needs to be {}'.format(pandas.Series))
| mit |
samuel1208/scikit-learn | sklearn/qda.py | 140 | 7682 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
nate-russell/MapMyVacation | scripts/tensorflow_model.py | 1 | 3656 | '''
Code borrowed heavily from:
http://warmspringwinds.github.io/tensorflow/tf-slim/2016/10/30/image-classification-and-segmentation-using-tensorflow-and-tf-slim/
'''
import sys
sys.path.append("C:\\Users\\nate\\models\\slim")
from matplotlib import pyplot as plt
import numpy as np
import os
import tensorflow as tf
import urllib
from time import time
from datasets import imagenet, dataset_utils
from nets import vgg, inception_resnet_v2
from preprocessing import vgg_preprocessing
'''
model_url = "http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz"
# Specify where you want to download the model to
print(checkpoints_dir)
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
dataset_utils.download_and_uncompress_tarball(model_url, checkpoints_dir)
'''
class TFModel:
def __init__(self, model_url, n_labels=5):
''' init Model, checkpoints dir, model properties'''
self.checkpoints_dir = os.path.join(os.path.dirname(__file__), 'checkpoints')
self.slim = tf.contrib.slim
self.image_size = vgg.vgg_16.default_image_size
self.names = imagenet.create_readable_names_for_imagenet_labels()
self.n_labels = n_labels
def url_2_x(self, url):
''' Pass url through network'''
with tf.Graph().as_default():
# Open specified url and load image as a string
with urllib.request.urlopen(url) as url_img:
image_string = url_img.read()
image = tf.image.decode_jpeg(image_string, channels=3)
processed_image = vgg_preprocessing.preprocess_image(image,
self.image_size,
self.image_size,
is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
with self.slim.arg_scope(vgg.vgg_arg_scope()):
logits, _ = vgg.vgg_16(processed_images,
num_classes=1000,
is_training=False)
probabilities = tf.nn.softmax(logits)
self.init_fn = self.slim.assign_from_checkpoint_fn(
os.path.join(self.checkpoints_dir, 'vgg_16.ckpt'),
self.slim.get_model_variables('vgg_16'))
with tf.Session() as self.sess:
self.init_fn(self.sess)
probabilities, logits = self.sess.run([probabilities, logits])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(enumerate(-probabilities),
key=lambda x: x[1])]
# Generate Top n_labels and their probs
label_list = []
for i in range(self.n_labels):
index = sorted_inds[i]
label_list.append({
"label": str(self.names[index + 1]),
"value": float(probabilities[index]),
"index": i,
})
label_list = sum(zip(reversed(label_list), label_list), ())[:len(label_list)]
return np.array(logits[0]), label_list
if __name__ == '__main__':
tfm = TFModel('url')
x, ldict = tfm.url_2_x("https://i.imgur.com/q5T3e5Km.jpg")
print(ldict)
x, ldict = tfm.url_2_x("https://i.imgur.com/uHz1RzCm.jpg")
print(ldict)
x, ldict = tfm.url_2_x("https://i.imgur.com/KG6QZbhm.jpg")
print(ldict)
x, ldict = tfm.url_2_x("https://i.imgur.com/yMa9Jwnm.jpg")
print(ldict)
| mit |
bogdan-kulynych/textfool | data_utils.py | 1 | 2743 | import spacy
import pickle
import pandas as pd
import numpy as np
from sklearn.model_selection import ShuffleSplit
from sklearn.preprocessing import LabelEncoder
raw_data_path = './data/twitter_gender_data.csv'
nlp = spacy.load('en')
def extract_features(docs, max_length):
docs = list(docs)
X = np.zeros((len(docs), max_length), dtype='int32')
for i, doc in enumerate(docs):
j = 0
for token in doc:
if token.has_vector and not token.is_punct and not token.is_space:
X[i, j] = token.rank + 1
j += 1
if j >= max_length:
break
return X
def load_twitter_gender_data(from_cache=False):
cached_data_path = raw_data_path + '.cached.pkl'
if from_cache:
print('Loading data from cache...')
with open(cached_data_path, 'rb') as f:
return pickle.load(f)
max_length = 1000
print('Loading and preparing data...')
raw_data = pd.read_csv(raw_data_path, encoding='latin1')
raw_data['text'] = raw_data['text'].apply(str)
raw_data['description'] = raw_data['description'].apply(str)
# Leave only those rows with 100% confidence,
# and throw away 'brand' and 'unknown' labels
raw_data = raw_data[raw_data['gender:confidence'] == 1]
raw_data = raw_data[raw_data['gender'].apply(
lambda val: val in ['male', 'female'])]
print('Raw data with 100% confidence:', raw_data.shape)
raw_data['combined_text'] = raw_data.apply(
lambda row: ' | '.join([row['text'], row['description']]), axis=1)
# Parse tweet texts
docs = list(nlp.pipe(raw_data['combined_text'], batch_size=5000, n_threads=2))
# Encode labels
label_encoder = LabelEncoder()
label_encoder.fit(raw_data['gender'])
y = label_encoder.transform(raw_data['gender'])
# Pull the raw_data into vectors
X = extract_features(docs, max_length=max_length)
# Split into train and test sets
rs = ShuffleSplit(n_splits=2, random_state=42, test_size=0.2)
train_indices, test_indices = next(rs.split(X))
X_train = X[train_indices]
y_train = y[train_indices]
X_test = X[test_indices]
y_test = y[test_indices]
docs = np.array(docs, dtype=object)
docs_train = docs[train_indices]
docs_test = docs[test_indices]
numeric_data = X_train, y_train, X_test, y_test
raw_data = docs_train, docs_test, label_encoder
with open(cached_data_path, 'wb') as f:
pickle.dump((numeric_data, raw_data), f)
return numeric_data, raw_data
def load(data_name, *args, **kwargs):
load_fn_map = {
'twitter_gender_data': load_twitter_gender_data
}
return load_fn_map[data_name](*args, **kwargs)
| mit |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/tests/test_streamplot.py | 3 | 1144 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
def velocity_field():
Y, X = np.mgrid[-3:3:100j, -3:3:100j]
U = -1 - X**2 + Y
V = 1 + X - Y**2
return X, Y, U, V
@image_comparison(baseline_images=['streamplot_colormap_test_image'])
def test_colormap():
X, Y, U, V = velocity_field()
plt.streamplot(X, Y, U, V, color=U, density=0.6, linewidth=2,
cmap=plt.cm.autumn)
plt.colorbar()
@image_comparison(baseline_images=['streamplot_linewidth_test_image'])
def test_linewidth():
X, Y, U, V = velocity_field()
speed = np.sqrt(U*U + V*V)
lw = 5*speed/speed.max()
plt.streamplot(X, Y, U, V, density=[0.5, 1], color='k', linewidth=lw)
@image_comparison(baseline_images=['streamplot_masks_and_nans_test_image'])
def test_masks_and_nans():
X, Y, U, V = velocity_field()
mask = np.zeros(U.shape, dtype=bool)
mask[40:60, 40:60] = 1
U = np.ma.array(U, mask=mask)
U[:20, :20] = np.nan
plt.streamplot(X, Y, U, V, color=U, cmap=plt.cm.Blues)
if __name__=='__main__':
import nose
nose.runmodule()
| gpl-3.0 |
netssfy/machine-learning-exercise | deep-learning/pomelo-predictor/pomelo_predictor.py | 1 | 5958 | #predict if my little pomelo is existence in a photo or not
import numpy as np
import matplotlib.pyplot as plt
import os
import time
import tensorflow as tf
IMAGE_WIDTH = 128
IMAGE_HEIGHT = 128
CHANNEL = 3
CONV1_KERNEL_SIZE = [8, 8]
CONV1_FILTER = 16
POOL1_SIZE = [4, 4]
CONV2_KERNEL_SIZE = [6, 6]
CONV2_FILTER = 32
POOL2_SIZE = [4, 4]
CONV3_KERNEL_SIZE = [8, 8]
CONV3_FILTER = 128
POOL3_SIZE = [2, 2]
DENSE_UNITS = 1024
home_dir = 'D:\\Projects\\machine-learning-exercise\\deep-learning\\pomelo-predictor'
image_dir = home_dir + '\\pomelos\\'
def cnn_model_fn(features, labels, mode):
print('input layer [{}, {}, {}, {}]'.format(-1, IMAGE_WIDTH, IMAGE_HEIGHT, CHANNEL))
input_layer = tf.reshape(features['x'], [-1, IMAGE_WIDTH, IMAGE_HEIGHT, CHANNEL])
print('actual {}'.format(input_layer.shape))
print('conv1 output [{}, {}, {}, {}]'.format(-1, IMAGE_WIDTH, IMAGE_HEIGHT, CONV1_FILTER))
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=CONV1_FILTER,
kernel_size=CONV1_KERNEL_SIZE,
padding='same',
activation=tf.nn.relu)
print('actual {}'.format(conv1.shape))
print('pool1 output [{}, {}, {}, {}]'.format(-1, IMAGE_WIDTH / POOL1_SIZE[0], IMAGE_HEIGHT / POOL1_SIZE[1], CONV1_FILTER))
pool1 = tf.layers.max_pooling2d(
inputs=conv1,
pool_size=POOL1_SIZE,
strides=POOL1_SIZE[0])
print('actual {}'.format(pool1.shape))
print('conv2 output [{}, {}, {}, {}]'.format(-1, IMAGE_WIDTH / POOL1_SIZE[0], IMAGE_HEIGHT / POOL1_SIZE[1], CONV2_FILTER))
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=CONV2_FILTER,
kernel_size=CONV2_KERNEL_SIZE,
padding='same',
activation=tf.nn.relu)
print('actual {}'.format(conv2.shape))
print('pool2 output [{}, {}, {}, {}]'.format(-1, IMAGE_WIDTH / POOL1_SIZE[0] / POOL2_SIZE[0], IMAGE_HEIGHT / POOL1_SIZE[1] / POOL2_SIZE[1], CONV2_FILTER))
pool2 = tf.layers.max_pooling2d(
inputs=conv2,
pool_size=POOL2_SIZE,
strides=POOL2_SIZE[0])
print('actual {}'.format(pool2.shape))
'''
#[*, 16, 16, 128]
conv3 = tf.layers.conv2d(
inputs=pool2,
filters=CONV3_FILTER,
kernel_size=CONV3_KERNEL_SIZE,
padding='same',
activation=tf.nn.relu)
#[*, 8, 8, 128]
pool3 = tf.layers.max_pooling2d(
inputs=conv3,
pool_size=POOL3_SIZE,
strides=2)
'''
flat = tf.reshape(pool2, [-1, pool2.shape[1] * pool2.shape[2] * pool2.shape[3]])
dense = tf.layers.dense(inputs=flat, units=DENSE_UNITS, activation=tf.nn.relu)
print('dense actual {}'.format(dense.shape))
dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
logits = tf.layers.dense(inputs=dropout, units=2)
predictions = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions['classes'])
}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
data = load_pomelo()
pomelo_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, model_dir=os.getcwd() + '/tmp/pomelo_model')
tensors_to_log = {'probabilities': 'softmax_tensor'}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=25)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': data['train_x']},
y=data['train_y'],
batch_size=50,
num_epochs=None,
shuffle=False)
startTick = time.time()
print('begin training at {}...'.format(startTick))
pomelo_classifier.train(input_fn=train_input_fn, steps=200, hooks=[logging_hook])
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': data['eval_x']},
y=data['eval_y'],
num_epochs=1,
shuffle=False)
print('begin eval...')
eval_results = pomelo_classifier.evaluate(input_fn=eval_input_fn)
print('I am done. {} ticks elapsed'.format(time.time() - startTick))
print(eval_results)
def load_pomelo():
#mnist = tf.contrib.learn.datasets.load_dataset('mnist');
#return { 'x': mnist.train.images, 'y': np.asarray(mnist.train.labels, dtype=np.int32) }
train_x = []
train_y = []
eval_x = []
eval_y = []
dataset = np.genfromtxt(image_dir + 'y.csv', delimiter=',', dtype='U8,i4')
index = 0
with tf.Session():
for row in dataset:
index = index + 1
fn = row[0]
image = tf.gfile.FastGFile(image_dir + fn, 'rb').read()
image = tf.image.decode_jpeg(image)
image = tf.image.resize_images(image, [IMAGE_WIDTH, IMAGE_HEIGHT], tf.image.ResizeMethod.NEAREST_NEIGHBOR)
if index <= 50:
train_x.append(processImage(image))
train_y.append(row[1])
else:
eval_x.append(processImage(image))
eval_y.append(row[1])
return {
'train_x': np.array(train_x),
'train_y': np.array(train_y),
'eval_x': np.array(eval_x),
'eval_y': np.array(eval_y)
}
def processImage(image):
imageArray = np.array(image.eval()).flatten()
return np.divide(imageArray, np.full(imageArray.shape, 255, dtype='float16'))
main({}) | mit |
fengzhanglab/GUIDES | static/data/pre_processed/generate_GRCm38_genes_2.py | 2 | 1344 | # produce list of genes in GRCm38
import pandas as pd
import json
# open refgene
refGeneFilename = '../gtex/gtex_mouse/refGene_mouse.txt'
refGene = pd.read_csv(refGeneFilename, sep="\t")
refGene.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
# open biomart
biomartFilename = 'mart_export_mus_2.txt'
biomart = pd.read_csv(biomartFilename, sep="\t")
seen = {}
results = []
total_len = len(refGene)
for index, row in refGene.iterrows():
ensembl_id = row['name']
if ensembl_id not in seen:
the_loc = biomart.loc[biomart['Gene ID'] == ensembl_id]
gene_name = list(the_loc['Associated Gene Name'])[0]
entrez = list(the_loc['EntrezGene ID'])[0]
if pd.isnull(entrez):
entrez = ''
print ensembl_id, gene_name, 'has no entrez'
else:
entrez = str(int(entrez))
if pd.isnull(gene_name):
gene_name = ''
print ensembl_id, 'has no gene_name'
results.append({
'name': gene_name,
'ensembl_id': ensembl_id,
'entrez_id': entrez,
'description': ""
})
seen[ensembl_id] = True
with open('genes_list_GRCm38_processed.txt', 'w') as output:
json.dump(results, output)
with open('genes_list_GRCm38.txt', 'w') as output:
json.dump(results, output)
| agpl-3.0 |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/matplotlib/tests/test_scale.py | 7 | 1102 | from __future__ import print_function
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import numpy as np
import io
@image_comparison(baseline_images=['log_scales'], remove_text=True)
def test_log_scales():
ax = plt.subplot(122, yscale='log', xscale='symlog')
ax.axvline(24.1)
ax.axhline(24.1)
@image_comparison(baseline_images=['logit_scales'], remove_text=True,
extensions=['png'])
def test_logit_scales():
ax = plt.subplot(111, xscale='logit')
# Typical extinction curve for logit
x = np.array([0.001, 0.003, 0.01, 0.03, 0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9, 0.97, 0.99, 0.997, 0.999])
y = 1.0 / x
ax.plot(x, y)
ax.grid(True)
@cleanup
def test_log_scatter():
"""Issue #1799"""
fig, ax = plt.subplots(1)
x = np.arange(10)
y = np.arange(10) - 1
ax.scatter(x, y)
buf = io.BytesIO()
fig.savefig(buf, format='pdf')
buf = io.BytesIO()
fig.savefig(buf, format='eps')
buf = io.BytesIO()
fig.savefig(buf, format='svg')
| mit |
ryfeus/lambda-packs | Skimage_numpy/source/scipy/integrate/quadrature.py | 33 | 28087 | from __future__ import division, print_function, absolute_import
import numpy as np
import math
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a numpy function.
from numpy import trapz
from scipy.special.orthogonal import p_roots
from scipy.special import gammaln
from scipy._lib.six import xrange
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
class AccuracyWarning(Warning):
pass
def _cached_p_roots(n):
"""
Cache p_roots results to speed up calls of the fixed_quad function.
"""
if n in _cached_p_roots.cache:
return _cached_p_roots.cache[n]
_cached_p_roots.cache[n] = p_roots(n)
return _cached_p_roots.cache[n]
_cached_p_roots.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
x, w = _cached_p_roots(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=0), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : int, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, uses this value as the first value in the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : {'avg', 'first', 'str'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in xrange(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in xrange(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="")
for i in xrange(k+1):
for j in xrange(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <[email protected]>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <[email protected]>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <[email protected]>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in xrange(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in xrange(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in xrange(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in xrange(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]`
and :math:`\\Delta x = \\frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| mit |
kazemakase/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
aetilley/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
PhdDone/AD3 | python/example_compression_budget.py | 2 | 4698 | import itertools
import numpy as np
import matplotlib.pyplot as plt
import pdb
import ad3.factor_graph as fg
length = 30
budget = 10
# Decide bigram_positions.
bigram_positions = []
for i in xrange(-1, length):
value = np.random.uniform()
if value < 0.4:
bigram_positions.append(i)
# Decide whether each position counts for budget.
counts_for_budget = []
for i in xrange(length):
value = np.random.uniform()
if value < 0.1:
counts_for_budget.append(False)
else:
counts_for_budget.append(True)
# 1) Build a factor graph using a SEQUENCE and a BUDGET factor.
factor_graph = fg.PFactorGraph()
multi_variables = []
for i in xrange(length):
multi_variable = factor_graph.create_multi_variable(2)
multi_variable.set_log_potential(0, 0.0)
value = np.random.normal()
multi_variable.set_log_potential(1, value)
multi_variables.append(multi_variable)
edge_log_potentials = []
for i in xrange(length+1):
if i == 0:
num_previous_states = 1
else:
num_previous_states = 2
if i == length:
num_current_states = 1
else:
num_current_states = 2
for k in xrange(num_previous_states):
for l in xrange(num_current_states):
if k == 1 and l == 1:
value = np.random.normal()
edge_log_potentials.append(value)
else:
edge_log_potentials.append(0.0)
# Create a sequential factor.
factors = []
variables = []
num_states = []
for i in xrange(length):
for state in xrange(2):
variables.append(multi_variables[i].get_state(state))
num_states.append(2)
num_factors = 0
factor = fg.PFactorSequence()
# Set True below to let the factor graph own the factor so that we
# don't need to delete it.
factor_graph.declare_factor(factor, variables, False)
factor.initialize(num_states)
factor.set_additional_log_potentials(edge_log_potentials)
factors.append(factor)
num_factors += 1
# Create a budget factor.
variables = []
for i in xrange(length):
if counts_for_budget[i]:
variables.append(multi_variables[i].get_state(1))
negated = [False] * len(variables)
factor_graph.create_factor_budget(variables, negated, budget)
num_factors += 1
# Run AD3.
#pdb.set_trace()
factor_graph.set_eta_ad3(.1)
factor_graph.adapt_eta_ad3(True)
factor_graph.set_max_iterations_ad3(1000)
value, posteriors, additional_posteriors, status = factor_graph.solve_lp_map_ad3()
# Print solution.
t = 0
best_states = []
for i in xrange(length):
local_posteriors = posteriors[t:(t+2)]
j = np.argmax(local_posteriors)
best_states.append(j)
t += num_states[i]
print best_states
#pdb.set_trace()
# 2) Build a factor graph using a COMPRESSION_BUDGET factor.
compression_factor_graph = fg.PFactorGraph()
variable_log_potentials = []
for i in xrange(length):
value = multi_variables[i].get_log_potential(1)
variable_log_potentials.append(value)
additional_log_potentials = []
index = 0
for i in xrange(length+1):
if i == 0:
num_previous_states = 1
else:
num_previous_states = 2
if i == length:
num_current_states = 1
else:
num_current_states = 2
for k in xrange(num_previous_states):
for l in xrange(num_current_states):
value = edge_log_potentials[index]
index += 1
if k == num_previous_states-1 and l == num_current_states-1 and i-1 in bigram_positions:
variable_log_potentials.append(value)
else:
additional_log_potentials.append(value)
binary_variables = []
factors = []
for i in xrange(len(variable_log_potentials)):
binary_variable = compression_factor_graph.create_binary_variable()
binary_variable.set_log_potential(variable_log_potentials[i])
binary_variables.append(binary_variable)
factor = fg.PFactorCompressionBudget()
variables = binary_variables
compression_factor_graph.declare_factor(factor, variables, True)
#bigram_positions = []
factor.initialize(length, budget, counts_for_budget, bigram_positions)
factor.set_additional_log_potentials(additional_log_potentials)
factors.append(factor)
# Run AD3.
compression_factor_graph.set_eta_ad3(.1)
compression_factor_graph.adapt_eta_ad3(True)
compression_factor_graph.set_max_iterations_ad3(1000)
print bigram_positions
#pdb.set_trace()
value, posteriors, additional_posteriors, status = compression_factor_graph.solve_lp_map_ad3()
# Print solution.
t = 0
best_states = []
for i in xrange(length):
if posteriors[i] > 0.5:
j = 1
else:
j = 0
best_states.append(j)
print best_states
#pdb.set_trace()
| lgpl-3.0 |
demiangomez/Parallel.GAMIT | com/TrajectoryFit.py | 1 | 18941 | """
Project: Parallel.Archive
Date: 02/16/2017
Author: Demian D. Gomez
"""
import argparse
import dbConnection
import Utils
import pyETM
from tqdm import tqdm
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import simplekml
from io import BytesIO
import base64
import os
from scipy.interpolate import griddata
def plot_station_param(NetworkCode, StationCode, parameter_name, unit, pn, pe):
fig = plt.figure(figsize=(5, 5))
fig.suptitle('Station %s for %s.%s' % (parameter_name, NetworkCode, StationCode))
plt.plot(0, 0, 'ok')
plt.xlim([-30, 30])
plt.ylim([-30, 30])
plt.quiver(0, 0, np.multiply(pe, 1000), np.multiply(pn, 1000), scale=1, scale_units='x', zorder=3)
plt.grid(True)
plt.xlabel('[' + unit + ']')
plt.ylabel('[' + unit + ']')
figfile = BytesIO()
fig.savefig(figfile, format='png')
# plt.show()
figfile.seek(0) # rewind to beginning of file
figdata_png = base64.b64encode(figfile.getvalue())
plt.close()
return figdata_png
def generate_kmz(kmz, stations, discarded):
tqdm.write(' >> Generating KML (see production directory)...')
kml = simplekml.Kml()
folder1 = kml.newfolder(name='velocity')
folder2 = kml.newfolder(name='discarded')
# define styles
styles_ok = simplekml.StyleMap()
styles_ok.normalstyle.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/placemark_square.png'
styles_ok.normalstyle.iconstyle.color = 'ff00ff00'
styles_ok.normalstyle.labelstyle.scale = 0
styles_ok.highlightstyle.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/placemark_square.png'
styles_ok.highlightstyle.iconstyle.color = 'ff00ff00'
styles_ok.highlightstyle.labelstyle.scale = 3
styles_nok = simplekml.StyleMap()
styles_nok.normalstyle.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/placemark_square.png'
styles_nok.normalstyle.iconstyle.color = 'ff0000ff'
styles_nok.normalstyle.labelstyle.scale = 0
styles_nok.highlightstyle.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/placemark_square.png'
styles_nok.highlightstyle.iconstyle.color = 'ff0000ff'
styles_nok.highlightstyle.labelstyle.scale = 3
for stn in tqdm(stations, ncols=160, disable=None, desc=' -- Included station list'):
plot = plot_station_param(stn['NetworkCode'], stn['StationCode'], 'velocity', 'mm/yr', stn['vn'], stn['ve'])
pt = folder1.newpoint(name=stn['NetworkCode'] + '.' + stn['StationCode'], coords=[(stn['lon'], stn['lat'])])
pt.stylemap = styles_ok
pt.description = """<strong>NE vel: %5.2f %5.2f [mm/yr]</strong><br><br>
<table width="880" cellpadding="0" cellspacing="0">
<tr>
<td align="center" valign="top">
<strong>Parameters:</strong><br>
<img src="data:image/png;base64, %s" alt="Observation information" height="300" width="300"/><br>
<strong>Trajectory model:</strong><br>
<img src="data:image/png;base64, %s" alt="Observation information" height="750" width="1100"/>
</p>
</tr>
</td>
</table>
""" % (stn['vn']*1000, stn['ve']*1000, plot, stn['etm'])
ls = folder1.newlinestring(name=stn['NetworkCode'] + '.' + stn['StationCode'])
ls.coords = [(stn['lon'], stn['lat']),
(stn['lon']+stn['ve']*1/0.025, stn['lat']+stn['vn']*1/0.025*np.cos(stn['lat']*np.pi/180))]
ls.style.linestyle.width = 3
ls.style.linestyle.color = 'ff0000ff'
for stn in tqdm(discarded, ncols=160, disable=None, desc=' -- Excluded station list'):
plot = plot_station_param(stn['NetworkCode'], stn['StationCode'], 'velocity', 'mm/yr', stn['vn'], stn['ve'])
pt = folder2.newpoint(name=stn['NetworkCode'] + '.' + stn['StationCode'], coords=[(stn['lon'], stn['lat'])])
pt.stylemap = styles_nok
pt.description = """<strong>NE vel: %5.2f %5.2f [mm/yr]</strong><br><br>
<table width="880" cellpadding="0" cellspacing="0">
<tr>
<td align="center" valign="top">
<strong>Parameters:</strong><br>
<img src="data:image/png;base64, %s" alt="Observation information" height="300" width="300"/><br>
<strong>Trajectory model:</strong><br>
<img src="data:image/png;base64, %s" alt="Observation information" height="750" width="1100"/>
</p>
</tr>
</td>
</table>
""" % (stn['vn']*1000, stn['ve']*1000, plot, stn['etm'])
ls = folder2.newlinestring(name=stn['NetworkCode'] + '.' + stn['StationCode'])
ls.coords = [(stn['lon'], stn['lat']),
(stn['lon']+stn['ve']*1/0.025, stn['lat']+stn['vn']*1/0.025*np.cos(stn['lat']*np.pi/180))]
ls.style.linestyle.width = 3
ls.style.linestyle.color = 'ff0000ff'
if not os.path.exists('production'):
os.makedirs('production')
tqdm.write(' >> Saving kmz...')
kml.savekmz(kmz)
def process_interseismic(cnn, stnlist, force_stnlist, stack, sigma_cutoff, vel_cutoff, lat_lim, filename, kmz):
# start by checking that the stations in the list have a linear start (no post-seismic)
# and more than 2 years of data until the first earthquake or non-linear behavior
tqdm.write(' >> Analyzing suitability of station list to participate in interseismic trajectory model...')
tqdm.write(' -- velocity cutoff: %.2f mm/yr; output filename: %s' % (vel_cutoff, filename))
use_station = []
discarded = []
velocities = []
min_lon = 9999
max_lon = -9999
min_lat = 9999
max_lat = -9999
for stn in tqdm(stnlist, ncols=160, disable=None):
try:
etm = pyETM.GamitETM(cnn, stn['NetworkCode'], stn['StationCode'], stack_name=stack)
use = True
# only check everything is station not included in the force list
if stn not in force_stnlist:
# check that station is within latitude range
if etm.gamit_soln.lat[0] < lat_lim[0] or etm.gamit_soln.lat[0] > lat_lim[1]:
tqdm.write(' -- %s.%s excluded because it is outside of the latitude limit'
% (stn['NetworkCode'], stn['StationCode']))
use = False
# check that station has at least 2 years of data
if etm.gamit_soln.date[-1].fyear - etm.gamit_soln.date[0].fyear < 2 and use:
tqdm.write(' -- %s.%s rejected due having less than two years of observations %s -> %s'
% (stn['NetworkCode'], stn['StationCode'],
etm.gamit_soln.date[0].yyyyddd(),
etm.gamit_soln.date[-1].yyyyddd()))
use = False
# other checks
if etm.A is not None:
if len(etm.Jumps.table) > 0 and use:
eq_jumps = [j for j in etm.Jumps.table
if j.p.jump_type == pyETM.CO_SEISMIC_JUMP_DECAY and j.fit]
for j in eq_jumps:
if j.magnitude >= 7 and j.date.fyear < etm.gamit_soln.date[0].fyear + 1.5:
tqdm.write(' -- %s.%s has a Mw %.1f in %s and data starts in %s'
% (stn['NetworkCode'], stn['StationCode'],
j.magnitude, j.date.yyyyddd(), etm.gamit_soln.date[0].yyyyddd()))
use = False
break
eq_jumps = [j for j in etm.Jumps.table
if j.p.jump_type == pyETM.CO_SEISMIC_DECAY and j.fit]
if len(eq_jumps) > 0 and use:
tqdm.write(' -- %s.%s has one or more earthquakes before data started in %s'
% (stn['NetworkCode'], stn['StationCode'], etm.gamit_soln.date[0].yyyyddd()))
use = False
if (etm.factor[0]*1000 > sigma_cutoff or etm.factor[1]*1000 > sigma_cutoff) and use:
tqdm.write(' -- %s.%s rejected due to large wrms %5.2f %5.2f %5.2f'
% (stn['NetworkCode'], stn['StationCode'],
etm.factor[0] * 1000, etm.factor[1] * 1000, etm.factor[2] * 1000))
use = False
norm = np.sqrt(np.sum(np.square(etm.Linear.p.params[0:2, 1]*1000)))
if norm > vel_cutoff and use:
tqdm.write(' -- %s.%s rejected due to large NEU velocity: %5.2f %5.2f %5.2f NE norm %5.2f'
% (stn['NetworkCode'], stn['StationCode'],
etm.Linear.p.params[0, 1] * 1000,
etm.Linear.p.params[1, 1] * 1000,
etm.Linear.p.params[2, 1] * 1000,
norm))
use = False
elif use:
tqdm.write(' -- %s.%s too few solutions to calculate ETM'
% (stn['NetworkCode'], stn['StationCode']))
use = False
else:
tqdm.write(' -- %s.%s was forced to be included in the list'
% (stn['NetworkCode'], stn['StationCode']))
if use:
tqdm.write(' -- %s.%s added NEU wrms: %5.2f %5.2f %5.2f NEU vel: %5.2f %5.2f %5.2f'
% (stn['NetworkCode'], stn['StationCode'],
etm.factor[0]*1000, etm.factor[1]*1000, etm.factor[2]*1000,
etm.Linear.p.params[0, 1]*1000,
etm.Linear.p.params[1, 1]*1000,
etm.Linear.p.params[2, 1]*1000))
use_station.append(stn)
velocities.append({'NetworkCode': etm.NetworkCode,
'StationCode': etm.StationCode,
'lat': etm.gamit_soln.lat[0],
'lon': etm.gamit_soln.lon[0],
'vn': etm.Linear.p.params[0, 1],
've': etm.Linear.p.params[1, 1],
'etm': etm.plot(plot_missing=False, plot_outliers=False, fileio=BytesIO())})
if etm.gamit_soln.lon[0] < min_lon:
min_lon = etm.gamit_soln.lon
if etm.gamit_soln.lon[0] > max_lon:
max_lon = etm.gamit_soln.lon
if etm.gamit_soln.lat[0] < min_lat:
min_lat = etm.gamit_soln.lat
if etm.gamit_soln.lat[0] > max_lat:
max_lat = etm.gamit_soln.lat
elif not use and etm.A is not None:
discarded.append({'NetworkCode': etm.NetworkCode,
'StationCode': etm.StationCode,
'lat': etm.gamit_soln.lat[0],
'lon': etm.gamit_soln.lon[0],
'vn': etm.Linear.p.params[0, 1],
've': etm.Linear.p.params[1, 1],
'etm': etm.plot(plot_missing=False, plot_outliers=False, fileio=BytesIO())})
except pyETM.pyETMException as e:
tqdm.write(' -- %s.%s: %s' % (stn['NetworkCode'], stn['StationCode'], str(e)))
tqdm.write(' >> Total number of stations for linear model: %i' % len(use_station))
map = Basemap(llcrnrlon=min_lon-2, llcrnrlat=min_lat-2, urcrnrlon=max_lon+2, urcrnrlat=max_lat+2,
resolution='i', projection='merc',
lon_0=(max_lon-min_lon)/2+min_lon,
lat_0=(max_lat-min_lat)/2+min_lat)
plt.figure(figsize=(15, 10))
map.drawcoastlines()
map.drawcountries()
# map.drawstates()
# map.fillcontinents(color='#cc9966', lake_color='#99ffff')
# draw parallels and meridians.
# map.drawparallels(np.arange(np.floor(min_lat), np.ceil(max_lat), 2.))
# map.drawmeridians(np.arange(np.floor(min_lon), np.ceil(max_lon), 2.))
# map.drawmapboundary(fill_color='#99ffff')
map.quiver([l['lon'] for l in velocities], [l['lat'] for l in velocities],
[l['ve'] for l in velocities], [l['vn'] for l in velocities], scale=0.25,
latlon=True, color='blue', zorder=3)
plt.title("Transverse Mercator Projection")
plt.savefig('production/test.png')
plt.close()
outvar = np.array([[v['lon'], v['lat'], v['ve'], v['vn']] for v in velocities])
np.savetxt(filename, outvar)
if kmz:
generate_kmz(kmz, velocities, discarded)
def process_postseismic(cnn, stnlist, force_stnlist, stack, interseimic_filename, events, sigma_cutoff, lat_lim,
filename, kmz):
tqdm.write(' >> Analyzing suitability of station list to participate in interseismic trajectory model...')
tqdm.write(' -- output filename: %s' % filename)
use_station = []
discarded = []
velocities = []
min_lon = 9999
max_lon = -9999
min_lat = 9999
max_lat = -9999
# load the interseismic model
model = np.loadtxt(interseimic_filename)
model[:, 0] -= 360
for stn in tqdm(stnlist, ncols=160, disable=None):
try:
lla = cnn.query_float('SELECT lat,lon FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (stn['NetworkCode'], stn['StationCode']), as_dict=True)[0]
ve = griddata(model[:, 0:2], model[:, 2] / 1000, (lla['lon'], lla['lat']), method='cubic')
vn = griddata(model[:, 0:2], model[:, 3] / 1000, (lla['lon'], lla['lat']), method='cubic')
etm = pyETM.GamitETM(cnn, stn['NetworkCode'], stn['StationCode'], stack_name=stack,
interseismic=[vn, ve, 0.])
etm.plot('production/%s.%s_.png' % (stn['NetworkCode'], stn['StationCode']))
# only check everything is station not included in the force list
#if stn not in force_stnlist:
except pyETM.pyETMException as e:
tqdm.write(' -- %s.%s: %s' % (stn['NetworkCode'], stn['StationCode'], str(e)))
def main():
parser = argparse.ArgumentParser(description='Archive operations Main Program')
parser.add_argument('stack', type=str, nargs=1, metavar='{stack name}',
help="Name of the GAMIT stack to use for the trajectories")
parser.add_argument('-stn', '--stations', nargs='+', type=str, metavar='{station list}', default=[],
help="Specify the list of networks/stations given in [net].[stnm] format or just [stnm] "
"that will be filtered using the selected field specifications. If [stnm] is "
"not unique in the database, all stations with that name will be processed."
"Alternatively, a file with the station list can be provided.")
parser.add_argument('-force_stn', '--force_stations', nargs='+', type=str, metavar='{station list}', default=[],
help="Force stations to be included in the selected field. "
"Specify the list of networks/stations given in [net].[stnm] format or just [stnm]. "
"If [stnm] is not unique in the database, all stations with that name will be processed."
"Alternatively, a file with the station list can be provided.")
parser.add_argument('-lat_lim', '--latitude_limits', nargs=2, type=float, metavar='{min_lat max_lat}',
help="Latitude limits (decimal degrees). Discard stations outside of this limit.")
parser.add_argument('-sigma', '--sigma_cutoff', nargs=1, type=float, metavar='{mm}', default=[2.5],
help="Reject stations based on the ETM's wrms (in mm). This filter does not apply for forced "
"station list.")
parser.add_argument('-vel', '--velocity_cutoff', nargs=1, type=float, metavar='{mm/yr}', default=[50],
help="ETM velocity cutoff value to reject stations for velocity interpolation "
"(norm of NE in mm/yr).")
parser.add_argument('-interseismic', '--interseismic_process', nargs='*', type=str,
metavar='[velocity_cutoff] [output_filename] [kmz_filename]',
help="Process stations for interseismic velocity field computation. Reject stations with "
"interseismic velocity > {velocity_cutoff} (default 50 mm/yr). Filename to output the "
"selected stations (default filename interseismic.txt). Optionally, specify a kmz "
"filename to output the selected and rejected stations with their velocity components and "
"ETMs embedded in the kmz (default no kmz).")
parser.add_argument('-postseismic', '--postseismic_process', nargs='+', type=str,
metavar='{velocity_field_grid} {event_date} [secondary_relaxation] [output_filename]',
help="Process stations for postseismic field computation. Reject stations with "
"interseismic velocity > {velocity_cutoff} (default 50 mm/yr). Filename to output the "
"selected stations (default filename interseismic.txt)")
args = parser.parse_args()
cnn = dbConnection.Cnn("gnss_data.cfg")
# station list
stnlist = Utils.process_stnlist(cnn, args.stations)
force_stnlist = Utils.process_stnlist(cnn, args.force_stations, summary_title='Forced station list:')
# get the station list
if args.interseismic_process is not None:
if len(args.interseismic_process) == 0:
args.interseismic_process = ['50.', 'interseismic.txt', None]
elif len(args.interseismic_process) == 1:
args.interseismic_process += ['interseismic.txt', None]
elif len(args.interseismic_process) == 2:
args.interseismic_process += [None]
process_interseismic(cnn, stnlist, force_stnlist, args.stack[0],
args.sigma_cutoff[0], float(args.interseismic_process[0]), args.latitude_limits,
args.interseismic_process[1], args.interseismic_process[2])
if args.postseismic_process is not None:
process_postseismic(cnn, stnlist, force_stnlist, args.stack[0], args.postseismic_process[0], [],
[], [], 'out.txt', 'sss.kmz')
if __name__ == '__main__':
main()
| gpl-3.0 |
0x0all/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/en/covariance/plot_robust_vs_empirical_covariance.py | 69 | 6473 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1])).astype(np.int)
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size // 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size // 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size // 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size // 5):(x_size // 2 - 1)],
err_cov_emp_full.mean(1)[(x_size // 5):(x_size // 2 - 1)],
color='green', ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.