repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kevinpetersavage/BOUT-dev | examples/elm-pb/Python/plotmode.py | 7 | 1518 | from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
from numpy import *;
#from scipy.io import readsav;
import matplotlib.pyplot as plt;
# Dynamic matplotlib settings
from matplotlib import rcParams;
rcParams['font.size'] = 20;
rcParams['legend.fontsize'] = 'small';
rcParams['legend.labelspacing'] = 0.1;
rcParams['lines.linewidth'] = 2;
rcParams['savefig.bbox'] = 'tight';
# Create image directory if not exists
import os;
if not os.path.exists('image'):
os.makedirs('image');
#fphi = transpose(readsav('fphi.idl.dat')['fphi'])[:,:,:,];
fphi = load('fp.npy')
plt.figure();
for i in range(1, 9):
print("Growth rate for mode number", i)
print(gradient(log(abs(fphi[34, 32, i, :]))))
plt.semilogy(((abs(fphi[34, 32, i, :]))), label = 'n=' + str(i * 5));
plt.legend(loc=2);
plt.xlabel('Time');
plt.savefig('image/plotmode.png');
plt.savefig('image/plotmode.eps');
plt.show(block=False);
plt.figure();
for i in range(1, 9):
plt.plot(abs(fphi[:, 32, i, -1]), label = 'n=' + str(i * 5));
plt.legend();
plt.xlabel('X index');
plt.savefig('image/plotmodeamp.png');
plt.savefig('image/plotmodeamp.eps');
plt.show(block=False);
plt.figure();
for i in range(1, 9):
plt.plot(old_div(abs(fphi[:, 32, i, -1]),abs(fphi[:, 32, i, -1]).max()), label = 'n=' + str(i * 5));
plt.legend();
plt.xlabel('X index');
plt.savefig('image/plotmodenorm.png');
plt.savefig('image/plotmodenorm.eps');
plt.show();
| gpl-3.0 |
vortex-ape/scikit-learn | sklearn/neighbors/unsupervised.py | 7 | 4764 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=None, **kwargs):
super(NearestNeighbors, self).__init__(
n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
hugobowne/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
anne-urai/RT_RDK | old_code/HDDM_joblib.py | 2 | 4893 | #!/usr/bin/env python
# encoding: utf-8
"""
Anne Urai, 2016
adapted from JW de Gee
uses joblib instead of parallel python
"""
# ============================================ #
# HDDM cheat sheet
# ============================================ #
# v = drift rate
# a = boundary separation
# t = nondecision time
# z = starting point
# dc = drift driterion
# sv = inter-trial variability in drift-rate
# st = inter-trial variability in non-decision time
# sz = inter-trial variability in starting-point
# ============================================ #
# define the function that will do the work
# ============================================ #
def run_model(mypath, model_version, trace_id, nr_samples=50000):
if model_version == 1:
model_name = 'stimcoding'
elif model_version == 2:
model_name = 'prevresp_z'
elif model_version == 3:
model_name = 'prevresp_dc'
elif model_version == 4:
model_name = 'prevresp_prevrt_z'
elif model_version == 5:
model_name = 'prevresp_prevrt_dc'
elif model_version == 6:
model_name = 'prevresp_prevpupil_z'
elif model_version == 7:
model_name = 'prevresp_prevpupil_dc'
import os
model_filename = os.path.join(mypath, model_name, 'modelfit-md%d.model'%trace_id)
modelExists = os.path.isfile(model_filename)
if modelExists:
print "model already exists, skipping"
else:
import hddm
# get the csv
mydata = hddm.load_csv(os.path.join(mypath, '2ifc_data_hddm.csv'))
# specify the model
m = hddm.HDDMStimCoding(mydata, stim_col='stimulus', split_param='v',
drift_criterion=True, bias=True,
include=('sv'), group_only_nodes=['sv'],
depends_on={'t':['sessionnr'], 'v':['sessionnr'],
'a':['sessionnr'], 'dc':['sessionnr'], 'z':['sessionnr', 'prevresp']},
p_outlier=.05)
# ============================================ #
# do the actual sampling
# ============================================ #
m.sample(nr_samples, burn=nr_samples/4, thin=3, db='pickle',
dbname=os.path.join(mypath, model_name, 'modelfit-md%d.db'%trace_id))
m.save(model_filename) # save the model to disk
# ============================================ #
# save the output values
# ============================================ #
results = m.gen_stats() # this seems different from print_stats??
results.to_csv(os.path.join(mypath, model_name, 'results-md%d.csv'%trace_id))
# save the DIC for this model
text_file = open(os.path.join(mypath, model_name, 'DIC-md%d.txt'%trace_id), 'w')
text_file.write("Model {}: {}\n".format(trace_id, m.dic))
text_file.close()
# dont return model object, can't be pickled so Parallel will hang
return trace_id
# ============================================ #
# set up this model
# ============================================ #
model_name = 'prevresp_z_stimcoding'
nr_samples = 50000 # 50.000 for real results?
nr_traces = 3
# find path depending on local/klimag
import os
usr = os.environ.get('USER')
if usr in ['anne']:
mypath = '/Users/anne/Data/projects/0/neurodec/Data/MEG-PL/Data/HDDM'
if usr in ['aurai']:
mypath = '/home/aurai/Data/MEG-PL/Data/HDDM'
# make a folder for the outputs, combine name and time
thispath = os.path.join(mypath, model_name)
if not os.path.exists(thispath):
os.mkdir(thispath)
# ============================================ #
# run models in parallel
# ============================================ #
from joblib import Parallel, delayed
Parallel(n_jobs=nr_traces, verbose=10) \
(delayed(run_model)(mypath, model_name, trace_id, nr_samples) \
for trace_id in range(nr_traces))
# ============================================ #
# post-processing
# ============================================ #
import hddm
import matplotlib.pyplot as plt
print "HDDM imported, starting post-processing"
models = []
for trace_id in range(nr_traces): # run the models serially
thism = hddm.load(os.path.join(mypath, model_name, 'modelfit-md%d.model'%trace_id))
print os.path.join(mypath, model_name, 'modelfit-md%d.model'%trace_id)
# plot some output stuff in figures subfolder
figpath = os.path.join(mypath, model_name, 'figures-md%d'%trace_id)
if not os.path.exists(figpath):
os.mkdir(figpath)
thism.plot_posteriors(save=True, path=figpath, format='pdf')
plt.close('all') # this will leave figures open, make sure to close them all
models.append(thism)
# gelman rubic on the list of models
gr = hddm.analyze.gelman_rubin(models)
text_file = open(os.path.join(mypath, model_name, 'gelman_rubic.txt'), 'w')
for p in gr.items():
text_file.write("%s:%s\n" % p)
text_file.close()
| mit |
kamcpp/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py | 30 | 3738 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NumpySource and PandasSource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class NumpySourceTestCase(tf.test.TestCase):
def testNumpySource(self):
batch_size = 3
iterations = 1000
array = np.arange(32).reshape([16, 2])
numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size)
index_column = numpy_source().index
value_column = numpy_source().value
cache = {}
with tf.Graph().as_default():
value_tensor = value_column.build(cache)
index_tensor = index_column.build(cache)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_value = get_rows(array, expected_index)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
class PandasSourceTestCase(tf.test.TestCase):
def testPandasFeeding(self):
if not HAS_PANDAS:
return
batch_size = 3
iterations = 1000
index = np.arange(100, 132)
a = np.arange(32)
b = np.arange(32, 64)
dataframe = pd.DataFrame({"a": a, "b": b}, index=index)
pandas_source = in_memory_source.PandasSource(dataframe,
batch_size=batch_size)
pandas_columns = pandas_source()
cache = {}
with tf.Graph().as_default():
pandas_tensors = [col.build(cache) for col in pandas_columns]
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
indices = [j % dataframe.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))]
expected_df_indices = dataframe.index[indices]
expected_rows = dataframe.iloc[indices]
actual_value = sess.run(pandas_tensors)
np.testing.assert_array_equal(expected_df_indices, actual_value[0])
for col_num, col in enumerate(dataframe.columns):
np.testing.assert_array_equal(expected_rows[col].values,
actual_value[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
yunfeilu/scikit-learn | sklearn/externals/joblib/parallel.py | 79 | 35628 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Under Python 3.4+ use the 'forkserver' start method by default: this makes it
# possible to avoid crashing 3rd party libraries that manage an internal thread
# pool that does not tolerate forking
if hasattr(mp, 'get_start_method'):
method = os.environ.get('JOBLIB_START_METHOD')
if (method is None and mp.get_start_method() == 'fork'
and 'forkserver' in mp.get_all_start_methods()):
method = 'forkserver'
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = DEFAULT_MP_CONTEXT
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
sanjanalab/GUIDES | static/data/gtex/gtex_mouse/generate_exons_2.py | 2 | 4937 | '''
1. Generate CCDS -> ENSG mapping.
2. Get ENSG -> CCDS intervals mapping from CCDS_coords.
3. Go through CCDS_coords.csv and change all CCDS to ENSG.
4. Remove duplicates on name column.
5. Change ENSG exon positions to the intervals we found above.
6. Add 5 to either side.
7. Save
'''
import pandas as pd
import pickle
import time
t0 = time.time()
def union_intervals(intervals):
sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0])
merged = []
for higher in sorted_by_lower_bound:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
# test for intersection between lower and higher:
# we know via sorting that lower[0] <= higher[0]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
merged[-1] = (lower[0], upper_bound) # replace by merged interval
else:
merged.append(higher)
return merged
# 1. Generate CCDS -> ENSG mapping.
ccds_ensg_map = {}
with open('ENSMUSG-CCDS.txt', 'r') as ensg_ccds:
for line in ensg_ccds:
comps = line.strip('\n').split('\t')
ensg = comps[0]
ccds = comps[1]
if len(ccds) > 0:
if ccds not in ccds_ensg_map:
ccds_ensg_map[ccds] = ensg
# 2. Get ENSG -> CCDS intervals mapping from CCDS_coords.
# set of starts/stops for an ensg
ensg_coords = {}
# CCDS -> range mapping (str |-> list)
with open('CCDS_coords.csv', 'r') as ccds_coords_file:
ccds_df = pd.read_csv(ccds_coords_file, sep="\t", header=0)
# go through each CCDS entry individually
print "going through each CCDS entry for ensg", time.time() - t0
for i, row in ccds_df.iterrows():
if i % (len(ccds_df) / 100) == 0: print i, '/', len(ccds_df), time.time() - t0
# find which ensembl gene it belongs to
ccds_name = row['name'].split('.')[0]
if ccds_name not in ccds_ensg_map:
print ccds_name, "not in ccds_ensg_map"
continue
ensg = ccds_ensg_map[ccds_name]
# Add the associated information to ensg_coords
starts = row['exonStarts']
stops = row['exonEnds']
starts_nums = [int(num) for num in starts.split(',')[:-1]]
stops_nums = [int(num) for num in stops.split(',')[:-1]]
new_intervals = [(int(a),int(b)) for a, b in zip(starts_nums, stops_nums)]
if ensg not in ensg_coords:
ensg_coords[ensg] = {
'df_data': row,
'intervals': union_intervals(new_intervals)
}
else:
all_intervals = ensg_coords[ensg]['intervals'] + new_intervals
ensg_coords[ensg]['intervals'] = union_intervals(all_intervals)
print "ready to go through results and move intervals into df", time.time() - t0
# go through results and move intervals into df
results_df = pd.DataFrame(columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames'])
exon_info_df = pd.DataFrame(columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames'])
for idx, ensg in enumerate(ensg_coords):
if idx % (len(ensg_coords) / 100) == 0: print idx, '/', len(ensg_coords), time.time() - t0
exonCount = len(ensg_coords[ensg]['intervals'])
if exonCount == 0:
continue
# expand intervals to include intronic sequences (5 each side)
starts_list = []
ends_list = []
for i in range(len(ensg_coords[ensg]['intervals'])):
starts_list.append(ensg_coords[ensg]['intervals'][i][0] - 5)
ends_list.append(ensg_coords[ensg]['intervals'][i][1] + 5)
# recombine into string
starts_list_str = (','.join([str(n) for n in starts_list])) + ','
ends_list_str = (','.join([str(n) for n in ends_list])) + ','
new_row = ensg_coords[ensg]['df_data']
ccdsStart = ensg_coords[ensg]['intervals'][0][0]
ccdsEnd = ensg_coords[ensg]['intervals'][-1][1]
new_row['name'] = ensg
new_row['txStart'] = ccdsStart
new_row['cdsStart'] = ccdsStart
new_row['txEnd'] = ccdsEnd
new_row['cdsEnd'] = ccdsEnd
new_row['exonCount'] = exonCount
new_row['exonStarts'] = starts_list_str
new_row['exonEnds'] = ends_list_str
new_row['chrom'] = new_row['chrom'].split('chr')[1]
results_df.loc[idx] = new_row
new_row['exonStarts'] = starts_list
new_row['exonEnds'] = ends_list
exon_info_df.loc[idx] = new_row
print "writing results", time.time() - t0
# Write results
exon_info = exon_info_df[["name", "chrom", "strand", "exonCount", "exonStarts", "exonEnds"]]
with open("exon_info.p", "wb") as f:
pickle.dump(exon_info, f)
results_df.to_csv('refGene_mouse.txt', sep="\t", index=False, header=False)
end_time = time.time()
hours, rem = divmod(end_time-t0, 3600)
minutes, seconds = divmod(rem, 60)
print "time elapsed"
print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
| bsd-3-clause |
UC3MSocialRobots/novelty-detection-in-hri | helper files/helper.py | 2 | 12942 | import pandas as pd
import numpy as np
import scipy
import arff as arff # Downloaded from: http://code.google.com/p/arff/
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from pylab import *
from itertools import cycle
from sklearn import metrics
from sklearn.svm import OneClassSVM
from sklearn.cluster import KMeans
from sklearn.mixture import GMM, DPGMM
from sklearn.hmm import GaussianHMM
from sklearn.covariance import MinCovDet
from sklearn.covariance import EllipticEnvelope
from scipy import stats
import user_data_loader as udl
from IPython.display import HTML
from IPython.html import widgets
from IPython.display import display, clear_output
import lsanomaly
import print_ske as pp
red = 'STAND_POINTING_RIGHT'
blue = 'STAND_POINTING_LEFT'
green = 'STAND_POINTING_FORWARD'
global GMM_n, one_n, lsa_n, K_n, GMM_s, one_s, lsa_s #Novelty Scores for each algorithm, those ''_n are for noise score, ''_s are for strangeness score
global K_GMM_n, K_KMeans_n, K_GMM_s, K_KMeans_s #K_GMM_n, K_KMeans_n are the noise curiosity factors for each algorithm
#K_GMM_s, K_KMeans_s are the strangeness curiosity factors for each algorithm
#Ks is a list containing the 4 above mentioned parameters
'''
---------
FUNTIONS TO RELOAD AND DISPLAY RESULTS
---------
'''
def compute_and_reload_figures(normal_users, queue, users_normal, users, Ks, name=''):
'''
Receives the list of normal_users and the queue. Also users_normal and users, with the form of [[number_user, pose]...]
Recevices Ks, the list of curiosity factors for the algorithms
Can receive a name to save the figure displayed
Calls compute scores to obtain the strangeness score and noise score for all the last entry in the queue
Computes the colors of the bars depending on the values of the scores
Plots a bar graph with the scores and the names
'''
global GMM_n, one_n, lsa_n, K_n, GMM_s, one_s, lsa_s #Novelty Scores for each algorithm, those ''_n are for noise score, ''_s are for strangeness score
GMM_n = []
one_n = []
lsa_n = []
K_n = []
GMM_s = []
one_s = []
lsa_s = []
K_s =[]
compute_scores(normal_users, queue, Ks) # Calls the function to compute scores, that updates GMM_n, one_n, lsa_n, K_n, GMM_s, one_s, lsa_s
scores_n = np.array([K_n[0],lsa_n[0]+0.01,one_n[0]+0.01,GMM_n[0]]) #Create a numpy array with the noise scores to display in the graph
names_n = ('KM','LSA', 'SVM1C','GMM' ) #names to display in the noise score graph
scores_s = np.array([K_s[0],lsa_s[0]+0.01,one_s[0]+0.01,GMM_s[0]]) #Create a numpy array with the noise scores to display in the graph
names_s = ('KM','LSA', 'SVM1C','GMM') #names to display in the strangeness score graph
print scores_s
# If the entry is detected as not interesting by all algorithms, the strangeness score is not displayed
if GMM_n[0]>=1 and one_n[0]>=1 and lsa_n[0]>=1 and K_n[0]>=1:
scores_s = np.array([0,0,0,0])
# Compute colors corresponding to the score value
# noise = red, interesting = green
# known = red, strange = green
colors_n = []
colors_s = []
for n in scores_n.tolist():
if n >= 1:
colors_n.append('red')
else:
colors_n.append('green')
for n in scores_s.tolist():
if n >= 1:
colors_s.append('green')
else:
colors_s.append('red')
#Plot the figures
f= plt.figure(figsize=(15,5))
# Print normal users and the last entry introduced to the system in black
ax1 = f.add_subplot(1,4,3, projection='3d')
users_normal_new = list(users_normal)
users_normal_new.append(users[-1])
pp.print_users(users_normal_new, ax1)
# Print all users, with the last entry introduced to the system in black
ax2 = f.add_subplot(1,4,1, projection='3d')
pp.print_users(users, ax2)
# Display names and scores of the algorithms
ax3 = f.add_subplot(1,4,2)
ax4 = f.add_subplot(1,4,4)
y_pos = np.arange(len(names_n))
ax3.barh(y_pos, scores_n, align='center', alpha=0.4, color = colors_n)
ax4.barh(y_pos, scores_s, align='center', alpha=0.4, color = colors_s)
ax3.set_yticks(y_pos)
ax3.set_yticklabels(names_n)
ax4.set_yticks(y_pos)
ax4.set_yticklabels(names_s)
ax3.set_title('noise score')
ax4.set_title('strangeness score')
#f.savefig('/Users/_USER_/Images/'+name+'.pdf', format='pdf')
'''
---------
FUNTIONS TO CALCULATE NOVELTY SCORES
---------
'''
def compute_scores(normal_users, queue, Ks=[]):
'''
Calculates the novelty scores (noise and strangeness) for the 4 algotithms
Receives the list of normal users and the queue (all users) and the list of curiosity factors Ks
Updates the global variables GMM_n, one_n, lsa_n, K_n, GMM_s, one_s, lsa_s with the results
'''
global GMM_n, one_n, lsa_n, K_n, GMM_s, one_s, lsa_s, K_s #Novelty Scores for each algorithm, those ''_n are for noise score, ''_s are for strangeness score
GMM_n = []
one_n = []
lsa_n = []
K_n = []
GMM_s = []
one_s = []
lsa_s = []
K_s = []
K_GMM_n, K_KMeans_n, K_GMM_s, K_KMeans_s = Ks #K_GMM_n, K_KMeans_n are the noise curiosity factors for each algorithm
#K_GMM_s, K_KMeans_s are the strangeness curiosity factors for each algorithm
#Ks is a list containing the 4 above mentioned parameters
'''
For One_class_SVM and LSA, when asked to predict the new entry, a label is directly returned
LSA: 'anomaly' or '0' (normal)
One One_class_SVM: -1 (anomaly) or 1 (normal)
GMM and K means predict a fitting score. The novelty score is obtained calculating the zscore of the entry compared with the scores of all other entries, calling
the function get_score_last_item
If the zscore returned >= 1 the new entry is anomalous
'''
'''
Noise scores are computed with the queue as the base of knowledge, fitting all the entries but the last to the algorithm
'''
B = GMM(covariance_type='full', n_components = 1)
B.fit(queue[0:-1])
x = [B.score([i]).mean() for i in queue]
GMM_n.append(get_score_last_item(x, K_GMM_n))
K = KMeans(n_clusters=1)
K.fit(queue[0:-1])
x = [K.score([i]) for i in queue]
K_n.append(get_score_last_item(x, K_KMeans_n))
oneClassSVM = OneClassSVM(nu=0.1)
oneClassSVM.fit(queue[0:-1])
x = oneClassSVM.predict(np.array([queue[-1]]))
if x == -1:
one_n.append(1)
if x == 1:
one_n.append(0)
X = np.array(queue[0:-1])
anomalymodel = lsanomaly.LSAnomaly()
anomalymodel.fit(X)
x = anomalymodel.predict(np.array([queue[-1]]))
if x == ['anomaly']:
lsa_n.append(1)
if x == [0]:
lsa_n.append(0)
'''
Strangeness scores are computed with the normal users as the base of knowledge, fitting normal users to the algorithm
'''
normal_and_new = normal_users + [queue[-1]] #List to be passed to get_score_last_item to calculate the zscore of the last item, the new entry
B = GMM(covariance_type='full', n_components = 1)
B.fit(normal_users)
x = [B.score([i]).mean() for i in normal_and_new]
GMM_s.append(get_score_last_item(x, K_GMM_s))
K = KMeans(n_clusters=1)
K.fit(normal_users)
x = [K.score([i]) for i in normal_and_new]
K_s.append(get_score_last_item(x, K_KMeans_s))
oneClassSVM = OneClassSVM(nu=0.1)
oneClassSVM.fit(normal_users)
x = oneClassSVM.predict(np.array([queue[-1]]))
if x == -1:
one_s.append(1)
if x == 1:
one_s.append(0)
anomalymodel = lsanomaly.LSAnomaly()
X = np.array(normal_users)
anomalymodel.fit(X)
x = anomalymodel.predict(np.array([queue[-1]]))
if x == ['anomaly']:
lsa_s.append(1)
if x == [0]:
lsa_s.append(0)
return GMM_n, one_n, lsa_n, K_n, GMM_s, one_s, lsa_s, K_s
def get_score_last_item(x, K_curiosity):
''' Obtains a normalized (z) score of the last item of a list, with respect to the other items'''
ser = pd.Series(x, dtype=float)
old = ser[0:-1]
new = ser[ser.size-1]
return abs((new-old.mean())/(old.std()*K_curiosity))
'''
-------------
FUNTIONS TO START THE SYSTEM
-------------
'''
def start_users(number_users, pose, indexes=[]):
'''
Starts the system creating
users has a [number_user, pose] form
normal_users and queue are lists of users in data form
Uses a number of users, number_users, all posing in the same direction, initial_pose
The users selected are random
It takes the median value of the users
'''
import random
if indexes == []:
indexes = [i for i in np.arange(30)]
indexes.pop(indexes.index(0)) # There is no 'user00' data, so we remove it
indexes.pop(indexes.index(6)) # There is no 'user06' data, so we remove it
indexes.pop(indexes.index(12)) # There is no 'user12' data, POINTING LEFT so we remove it
users = []
pose = pose
for i in xrange(1,number_users):
j = random.choice(indexes)
users.append([j, pose])
indexes.pop(indexes.index(j))
normal_users = get_median_users(users)
queue = normal_users
return users, normal_users, queue
def get_median_users(users):
'''
Returns a list of the median values for each user in users
users = [[number, pose],[number2, pose]...]
'''
list_users_median = []
for u in users:
n = divide_user_by_pose('data/exp03-user'+str(u[0]).zfill(2)+'.arff', u[1])
mean = np.median(n, axis=0)
list_users_median.append(mean)
return list_users_median
'''
--------------
FUNTIONS TO LEARN POSES
--------------
'''
def add_user_median(l_users, new_user_index, pose):
'''
Adds the median of the new user to the passed list and returns it
'''
l_new = list(l_users)
new_user = divide_user_by_pose('data/exp03-user'+str(new_user_index).zfill(2)+'.arff', pose)
median = np.median(new_user,axis =0)
l_new.append(median)
return l_new
'''
--------------
FUNCTIONS TO EXTRACT AND NORMALIZE DATA
--------------
'''
def divide_user_by_pose(file, pose):
'''
Returns the normalized data of a desired pose from a user file, in an numpy array
'''
uf = udl.load_user_file(file)
multiind_first, multiind_second = udl.make_multiindex(udl.joints, udl.attribs)
uf.columns = pd.MultiIndex.from_arrays([list(multiind_first), list(multiind_second)], names=['joint', 'attrib'])
orig_torso, df_normalized = udl.normalize_joints(uf, 'torso')
uf.update(df_normalized)
uf.torso = uf.torso - uf.torso
uf.columns = udl.index
drops = list(uf.columns[84:123])+['h_seqNum', 'h_stamp', 'user_id']
uf2 = uf.drop(drops,1).groupby('pose')
group_pose_with_label = uf2.get_group(pose)
group_pose = group_pose_with_label.drop('pose',1)
return group_pose.values
'''
-------------
OTHER FUNCTIONS NOT USED IN THE NOTEBOOK
-------------
'''
def compute_print_scores(normal_users, queue):
K_GMM_n, K_KMeans_n, K_GMM_s, K_KMeans_s = Ks
print 'novelty score GMM'
B = GMM(covariance_type='full', n_components = 1)
B.fit(queue)
x = [B.score([i]).mean() for i in queue]
print get_score_last_item(x, K_GMM_n)
print 'novelty score OneClassSVM'
x = anom_one_class(queue, [queue[-1]])
print x[-1]
print 'novelty score LSA'
anomalymodel = lsanomaly.LSAnomaly()
X = np.array(queue)
anomalymodel.fit(X)
print anomalymodel.predict(np.array([queue[-1]]))
print 'novelty score degree K_means'
K = KMeans(n_clusters=1)
K.fit(queue)
x = [K.score([i]) for i in queue]
print get_score_last_item(x, K_KMeans_n)
normal_and_new = normal_users + [queue[-1]]
print 'degree of belonging to known class GMM'
B = GMM(covariance_type='full', n_components = 1)
B.fit(normal_users)
x = [B.score([i]).mean() for i in normal_and_new]
print get_score_last_item(x, K_GMM_s)
print 'degree of belonging to known class OneClassSVM'
x = anom_one_class(normal_users, [queue[-1]])
print x[-1]
print 'degree of belonging to known class LSA'
anomalymodel = lsanomaly.LSAnomaly()
X = np.array(normal_users)
anomalymodel.fit(X)
print anomalymodel.predict(np.array([queue[-1]]))
print 'degree of belonging to known class K_means'
K = KMeans(n_clusters=1)
K.fit(normal_users)
x = [K.score([i]) for i in normal_and_new]
print get_score_last_item(x, K_KMeans_s) | gpl-3.0 |
ThisIsSoSteve/Project-Tensorflow-Cars | plot_course_data.py | 1 | 2616 | #import os
import pickle
import glob
#import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
#from data_control_no_images.read import Read
listing = glob.glob('F:/Project_Cars_Data/1lap-fullspeed/Watkins Glen International - Short Circuit' + '/*.pkl')
x = []
y = []
throttle = []
raw_throttle = []
brake = []
raw_brake = []
steering = []
raw_steering = []
xy = []
for filename in tqdm(listing):
with open(filename, 'rb') as file_data:
project_cars_state = pickle.load(file_data)
controller_state = pickle.load(file_data)
#remove none flying lap data
if project_cars_state.mParticipantInfo[0].mCurrentLapDistance == 0.0:
continue
position = project_cars_state.mParticipantInfo[0].mWorldPosition
x.append(round(position[0]))
y.append(round(position[2]))
throttle.append(controller_state['right_trigger']/255)# 0 - 255
brake.append(controller_state['left_trigger']/255) #0 - 255
steering.append(controller_state['thumb_lx']/32767) #-32768 - 32767
#steering.append(project_cars_state.mSteering)
raw_steering.append(project_cars_state.mUnfilteredSteering)
raw_brake.append(project_cars_state.mUnfilteredBrake)
raw_throttle.append(project_cars_state.mUnfilteredThrottle)
xy.append([position[0], position[2]])
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=steering)
plt.colorbar()
plt.axis('equal')
plt.title('position and controller steering')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=raw_steering)
plt.colorbar()
plt.axis('equal')
plt.title('position and raw steering')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=throttle)
plt.colorbar()
plt.axis('equal')
plt.title('position and controller throttle')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=raw_throttle)
plt.colorbar()
plt.axis('equal')
plt.title('position and raw throttle')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=brake)
plt.colorbar()
plt.axis('equal')
plt.title('position and controller brake')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=raw_brake)
plt.colorbar()
plt.axis('equal')
plt.title('position and raw brake')
plt.show()
plt.close()
# get_data = Read(True)
# mean, std = get_data.load_mean_and_std('F:/Project_Cars_Data/Full_Speed_Training_none_image')
# print(mean)
# print(std)
# xy = (xy - mean) / std
# print(np.array(xy[:,0]).shape)
# plt.scatter(xy[:,0], xy[:,1])
# plt.axis('equal')
# plt.show()
| mit |
Clyde-fare/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
HeraclesHX/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
aminert/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 47 | 8095 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
datapythonista/pandas | pandas/tests/extension/base/printing.py | 4 | 1193 | import io
import pytest
import pandas as pd
from pandas.tests.extension.base.base import BaseExtensionTests
class BasePrintingTests(BaseExtensionTests):
"""Tests checking the formatting of your EA when printed."""
@pytest.mark.parametrize("size", ["big", "small"])
def test_array_repr(self, data, size):
if size == "small":
data = data[:5]
else:
data = type(data)._concat_same_type([data] * 5)
result = repr(data)
assert type(data).__name__ in result
assert f"Length: {len(data)}" in result
assert str(data.dtype) in result
if size == "big":
assert "..." in result
def test_array_repr_unicode(self, data):
result = str(data)
assert isinstance(result, str)
def test_series_repr(self, data):
ser = pd.Series(data)
assert data.dtype.name in repr(ser)
def test_dataframe_repr(self, data):
df = pd.DataFrame({"A": data})
repr(df)
def test_dtype_name_in_info(self, data):
buf = io.StringIO()
pd.DataFrame({"A": data}).info(buf=buf)
result = buf.getvalue()
assert data.dtype.name in result
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/feature_selection/tests/test_base.py | 98 | 3681 | import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
from sklearn.utils.testing import assert_raises, assert_equal
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
yaonepiece/AtariBreakoutAI | Double DQN/src/Atari_Breakout.py | 1 | 13065 | import sys
import time
import pygame
import numpy as np
from enum import Enum
from math import cos, sin, pi
from skimage.transform import resize
from skimage.color import rgb2gray
from sklearn.preprocessing import normalize as norm
class Colors(Enum):
Black = (0, 0, 0, 255)
Blue = (0, 100, 255, 255)
Orange = (255, 180, 70, 255)
Red = (255, 0, 0, 255)
White = (255, 255, 255, 255)
class AtariGame:
def __init__(self, _speed=1, _brickx=8, _bricky=5, _white=False, _banner=False, _screen=True):
# Hyperparameters
self.windowWidth = 400
self.windowHeight = 300
self.runspeed = _speed
self.fps = 50
self.fpsClock = pygame.time.Clock()
self.brickx, self.bricky = _brickx, _bricky
self.brick = np.ones((_bricky, _brickx), dtype=int)
self.brickRect = [[pygame.Rect(self.windowWidth * j // _brickx, 20 * i + 40,
(self.windowWidth * (j + 1) // _brickx) -
(self.windowWidth * j // _brickx), 20)
for j in range(_brickx)] for i in range(_bricky)]
self.ballPos = np.array([200.0, 200.0])
self.ballSpeed = 5
self.ballVector = norm([[-1, -1]]).ravel()
self.ballSize = 8
self.barLength = 50
self.barWidth = 10
self.barinitX, self.barinitY = 200.0, 295.0
self.barRect = pygame.Rect(self.barinitX - self.barLength // 2, self.barinitY - self.barWidth // 2,
self.barLength, self.barWidth)
self.seconds = 0
self.ticks = 0
self.barSpeed = 8
self.barMove = 0
self.start = True
self.end = False
self.white = _white
self.banner = _banner
self.screen = _screen
self.n_features = 5
self.actions = 3
self.ball_rand()
pygame.init()
self.surface = pygame.display.set_mode((self.windowWidth, self.windowHeight))
pygame.display.set_caption('Atari Breakout')
def get_state(self):
a = np.empty(5, dtype=float) # bricks[40] ballPos[2] ballVec[2] barX
a[0] = self.ballPos[0]
a[1] = self.ballPos[1]
a[2] = self.ballVector[0]
a[3] = self.ballVector[1]
a[4] = self.barRect.centerx
return a
def die(self):
self.draw()
fontsize = 50
font = pygame.font.SysFont(None, fontsize)
if self.white:
text = font.render('You lose!', True, Colors.Red)
else:
text = font.render('You lose!', True, Colors.White)
text_rect = text.get_rect() # Set the center of the text box
text_rect.centerx = self.surface.get_rect().centerx
text_rect.centery = self.surface.get_rect().centery
self.surface.blit(text, text_rect)
pygame.display.update()
def gameclear(self):
self.draw()
fontsize = 50
font = pygame.font.SysFont(None, fontsize)
if self.white:
text = font.render('You win!', True, Colors.Red)
else:
text = font.render('You Win!!', True, (255, 255, 255))
text_rect = text.get_rect() # Set the center of the text box
text_rect.centerx = self.surface.get_rect().centerx
text_rect.centery = self.surface.get_rect().centery
self.surface.blit(text, text_rect)
pygame.display.update()
def timesup(self):
self.draw()
fontsize = 50
font = pygame.font.SysFont(None, fontsize)
if self.white:
text = font.render('Time\'s up!', 1, Colors.Red)
else:
text = font.render('Time\'s up!', 1, Colors.White)
text_rect = text.get_rect() # Set the center of the text box
text_rect.centerx = self.surface.get_rect().centerx
text_rect.centery = self.surface.get_rect().centery
self.surface.blit(text, text_rect)
pygame.display.update()
def restart(self):
self.brick = np.ones((self.bricky, self.brickx), dtype=int)
self.ballPos = np.array([200.0, 200.0])
self.ball_rand()
self.barRect = pygame.Rect(self.barinitX - self.barLength // 2, self.barinitY - self.barWidth // 2,
self.barLength, self.barWidth)
self.seconds = 0
self.ticks = 0
self.start = True
self.end = False
self.draw()
image_data = pygame.surfarray.array3d(pygame.display.get_surface())
image_data = rgb2gray(resize(image_data, (80, 80), mode='reflect'))
image_stack = np.stack([image_data for _ in range(4)])
return image_stack
def draw(self):
self.surface.fill(Colors.Black.value)
if self.white:
pygame.draw.circle(self.surface, Colors.White.value, tuple(self.ballPos.astype(int)), self.ballSize)
else:
pygame.draw.circle(self.surface, Colors.Red.value, tuple(self.ballPos.astype(int)), self.ballSize)
for i in range(self.bricky):
for j in range(self.brickx):
if self.brick[i][j] == 1:
if not self.white:
if (i + j) % 2 == 0:
rectcolor = Colors.Orange.value
else:
rectcolor = Colors.Blue.value
else:
rectcolor = Colors.White.value
pygame.draw.rect(self.surface, rectcolor, self.brickRect[i][j])
pygame.draw.rect(self.surface, Colors.White.value, self.barRect)
pygame.display.update()
def startpause(self):
fontsize = 50
font = pygame.font.SysFont(None, fontsize)
self.draw()
if self.white:
text = font.render('Ready?', 1, Colors.Red)
else:
text = font.render('Ready?', 1, (255, 255, 255, 255))
text_rect = text.get_rect() # Set the center of the text box
text_rect.centerx = self.surface.get_rect().centerx
text_rect.centery = self.surface.get_rect().centery
self.surface.blit(text, text_rect)
pygame.display.update()
time.sleep(0.3)
def ball_rand(self):
angle = (np.random.rand() * 0.5 + 1.27) * pi
self.ballVector = np.array([cos(angle), sin(angle)])
def ball_move(self):
tmp_reward = 0.01
# move
pre_pos = np.copy(self.ballPos)
self.ballPos += self.ballSpeed * self.ballVector
# touchwall
if self.ballPos[0] <= self.ballSize:
self.ballPos[0] = float(self.ballSize)
self.ballVector[0] *= -1
elif self.ballPos[0] >= (self.windowWidth - self.ballSize):
self.ballPos[0] = self.windowWidth - self.ballSize
self.ballVector[0] *= -1
if self.ballPos[1] <= self.ballSize:
self.ballPos[1] = float(self.ballSize)
self.ballVector[1] *= -1
elif self.ballPos[1] >= (self.windowHeight - self.ballSize):
return True, -5
# breakbrick
for i in range(self.bricky):
for j in range(self.brickx):
if self.brick[i][j] == 1:
# touch from right
if (self.brickRect[i][j].right - self.ballSize) < self.ballPos[0] <= (
self.brickRect[i][j].right + self.ballSize) and pre_pos[0] > self.ballPos[0] and \
self.brickRect[i][j].top <= self.ballPos[1] < self.brickRect[i][j].bottom:
self.ballVector[0] *= -1
self.brick[i][j] = 0
# touch from left
if (self.brickRect[i][j].right + self.ballSize) > self.ballPos[0] >= (
self.brickRect[i][j].left - self.ballSize) \
and pre_pos[0] < self.ballPos[0] and self.brickRect[i][j].bottom > self.ballPos[1] >= \
self.brickRect[i][j].top:
self.ballVector[0] *= -1
self.brick[i][j] = 0
# touch from bottom
if (self.brickRect[i][j].bottom - self.ballSize) < self.ballPos[1] <= (
self.brickRect[i][j].bottom + self.ballSize) \
and pre_pos[1] > self.ballPos[1] and self.brickRect[i][j].right > self.ballPos[0] >= \
self.brickRect[i][j].left:
self.ballVector[1] *= -1
self.brick[i][j] = 0
# touch from top
if (self.brickRect[i][j].top + self.ballSize) > self.ballPos[1] >= (
self.brickRect[i][j].top - self.ballSize) \
and pre_pos[1] < self.ballPos[1] and self.brickRect[i][j].right > self.ballPos[0] >= \
self.brickRect[i][j].left:
self.ballVector[1] *= -1
self.brick[i][j] = 0
tmp_vector = np.copy(self.ballVector)
# touchpad
ball_rect = pygame.Rect(
self.ballPos[0] - self.ballSize,
self.ballPos[1] - self.ballSize,
self.ballSize * 2,
self.ballSize * 2
)
if pygame.Rect.colliderect(ball_rect, self.barRect):
tmp_vector = norm([self.ballPos - np.array(self.barRect.center)]).ravel()
if tmp_vector[1] >= 0:
tmp_vector = norm([[tmp_vector[0], 1]]).ravel()
elif tmp_vector[1] > -0.5:
if tmp_vector[0] > 0:
tmp_vector += norm(np.array([[0.86, -0.5]])).ravel() - tmp_vector
else:
tmp_vector += norm(np.array([[-0.86, -0.5]])).ravel() - tmp_vector
tmp_reward = 1
self.ballVector = tmp_vector
if np.sum(self.brick) > 0:
return False, tmp_reward
else:
return True, 100
def move_bar(self):
self.barRect.left += self.barSpeed * self.barMove
if self.barRect.left >= 400 - self.barLength:
self.barRect.left = int(400.0 - self.barLength)
if self.barRect.left <= 0:
self.barRect.left = 0
def render(self, _move=0):
# check valid
if self.end:
return
terminal = False
# startPause
if self.start:
if self.banner:
self.startpause()
self.start = False
# draw things
self.draw()
# update
if _move == 0:
self.barMove = 0
elif _move == 1:
self.barMove = -1
else:
self.barMove = 1
self.move_bar()
preb = np.sum(self.brick)
self.end, tmp_reward = self.ball_move()
aftb = np.sum(self.brick)
if not self.end and aftb < preb and aftb < self.brickx * self.bricky - 1 and preb < self.brickx * self.bricky:
tmp_reward = 5 * (preb - aftb)
# time step
self.ticks += 1
if self.ticks == self.fps:
self.seconds, self.ticks = self.seconds + 1, 0
if self.seconds == 180:
self.end = True
if self.banner:
self.timesup()
terminal = True
# check gameover
elif self.end:
if self.banner:
if np.sum(self.brick) > 0:
self.die()
else:
self.gameclear()
terminal = True
pygame.event.clear()
pygame.display.update()
image_data = pygame.surfarray.array2d(pygame.display.get_surface())
image_data = rgb2gray(resize(image_data, (80, 80), mode='reflect'))
return image_data, tmp_reward, terminal
if __name__ == '__main__':
# Game start
atari = AtariGame(_speed=1, _brickx=16, _bricky=5, _white=False, _banner=True)
end = False
gamepoint = 0
while True:
# getkey
pressedKeys = pygame.key.get_pressed()
if pressedKeys[ord('q')]:
pygame.quit()
sys.exit()
elif pressedKeys[ord('a')] and not pressedKeys[ord('d')]:
move = 1
elif pressedKeys[ord('d')] and not pressedKeys[ord('a')]:
move = 2
else:
move = 0
screen, reward, end = atari.render(_move=move)
if reward > 1:
gamepoint += reward
print('Point:', gamepoint)
elif reward < 0:
gamepoint = 0
if end:
time.sleep(0.5)
atari.restart()
else:
atari.fpsClock.tick(atari.fps * atari.runspeed)
| apache-2.0 |
harshnisar/EvoML | evoml/evaluators.py | 1 | 11238 | import numpy as np
from sklearn.metrics import mean_squared_error
from util import centroid_df
from util import distance
def evalOneMax_KNN(individual, df_test, base_estimator):
"""
This will have the code for testing the model on unseen data.
Parameters
----------
individual : list, required
List of dataframes with each dataframes last column the
dependent column.
df_test : DataFrame, required
The test dataframe against which you are evaluating your
model in the fitness function.
model : Estimator, required
The estimator to use in the model.
Side note: We can even start with just one partial part of the dataset, keep trying to
increase its cv score in fitness function. To get the optimal dataset. I am fearing,
it will make a dataset of exactly same values in the end. That will have the best cv error.
Therefore need to add a component of unseen data performance.
"""
total_mse = []
ensembles = []
centroids = []
for df_ in individual:
# clf = LinearRegression()
clf = base_estimator()
clf = clf.fit(df_.iloc[:, 0:-1], df_.iloc[:,-1])
ensembles.append(clf)
centroids.append(centroid_df(df_.iloc[:,0:-1]))
## for sum of mse return uncomment these
#y_pred = clf.predict(df_test[x_columns])
#mse = mean_squared_error(y_pred, df_test[y_column])
#total_mse.append(mse)
#print total_mse
y_preds_ensemble = []
for row in df_test.iloc[:, 0:-1].values:
distances = [distance(row, centroid) for centroid in centroids]
model_index = np.argmin(distances)
#todo: optional use the average of the 2 min distances ka prediction.
#todo: parameter.
y_pred = ensembles[model_index].predict(row)[0]
y_preds_ensemble.append(y_pred)
rmse_ensemble = np.sqrt(mean_squared_error(y_preds_ensemble, df_test.iloc[:,-1]))
return (rmse_ensemble),
def evalOneMax_KNN_EG(individual, df_test, base_estimator, n_votes):
"""
This will have the code for testing the model on unseen data.
Parameters
----------
individual : list, required
List of dataframes with each dataframes last column the
dependent column.
df_test : DataFrame, required
The test dataframe against which you are evaluating your
model in the fitness function.
model : Estimator, required
The estimator to use in the model.
Side note: We can even start with just one partial part of the dataset, keep trying to
increase its cv score in fitness function. To get the optimal dataset. I am fearing,
it will make a dataset of exactly same values in the end. That will have the best cv error.
Therefore need to add a component of unseen data performance.
"""
total_mse = []
ensembles = []
centroids = []
for eg_ in individual:
# clf = LinearRegression()
clf = eg_.estimator
ensembles.append(clf)
centroids.append(centroid_df(eg_.X))
## for sum of mse return uncomment these
#y_pred = clf.predict(df_test[x_columns])
#mse = mean_squared_error(y_pred, df_test[y_column])
#total_mse.append(mse)
#print total_mse
y_preds_ensemble = []
ensembles = np.array(ensembles)
# df_test = df_test.sample(, replace = False)
for row in df_test.iloc[:, 0:-1].values:
distances = np.array([distance(row, centroid) for centroid in centroids])
# model_index = np.argmin(distances)
#todo: optional use the average of the 2 min distances ka prediction
#todo: parameter.
model_ixs = distances.argsort()[:n_votes]
models = ensembles[model_ixs]
# mean of all predictions.
y_pred = np.nanmedian([mdl.predict(row)[0] for mdl in models])
# y_pred = ensembles[model_index].predict(row)[0]
y_preds_ensemble.append(y_pred)
# rmse_ensemble = np.sqrt(mean_squared_error(y_preds_ensemble, df_test.iloc[:,-1]))
rmse_ensemble = np.sqrt(mean_squared_error(y_preds_ensemble, df_test.iloc[:,-1]))
return (rmse_ensemble),
def eval_ensemble_oob_KNN_EG(individual, df, base_estimator, n_votes):
"""
Fitness is based on entire ensembles performance on Out of Bag samples
calculated using the union of the all samples used while training each child model.
Used in FEGO.
Parameters
----------
individual : list, required
List of dataframes with each dataframes last column the
dependent column.
df: DataFrame, required
The entire dataframe given to model to train - X and y combined.
model : Estimator, required
The estimator to use in the model.
#todo: not really needed. double check.
Side note: We can even start with just one partial part of the dataset, keep trying to
increase its cv score in fitness function. To get the optimal dataset. I am fearing,
it will make a dataset of exactly same values in the end. That will have the best cv error.
Therefore need to add a component of unseen data performance.
"""
total_mse = []
ensembles = []
centroids = []
# All the indices used in each child model.
bag_idx = []
for eg_ in individual:
# clf = LinearRegression()
clf = eg_.estimator
ensembles.append(clf)
idx = eg_.X.index.tolist()
# print len(idx)
bag_idx.append(idx)
centroids.append(centroid_df(eg_.X))
## for sum of mse return uncomment these
#y_pred = clf.predict(df_test[x_columns])
#mse = mean_squared_error(y_pred, df_test[y_column])
#total_mse.append(mse)
#print total_mse
# flattening and converting to set.
# print bag_idx
bag_idx = set(sum(bag_idx, []))
# print len(bag_idx)
out_bag_idx = list(set(df.index.tolist()) - bag_idx)
print 'Size of OOB', len(out_bag_idx)
df_test = df.loc[out_bag_idx]
y_preds_ensemble = []
ensembles = np.array(ensembles)
for row in df_test.iloc[:, 0:-1].values:
distances = np.array([distance(row, centroid) for centroid in centroids])
# model_index = np.argmin(distances)
#todo: optional use the average of the 2 min distances ka prediction.
#todo: parameter.
model_ixs = distances.argsort()[:n_votes]
models = ensembles[model_ixs]
# mean of all predictions.
y_pred = np.mean([mdl.predict(row)[0] for mdl in models])
# y_pred = ensembles[model_index].predict(row)[0]
y_preds_ensemble.append(y_pred)
rmse_ensemble = np.sqrt(mean_squared_error(y_preds_ensemble, df_test.iloc[:,-1]))
return (rmse_ensemble),
def eval_each_model_oob_KNN_EG(individual, df, base_estimator, n_votes):
"""
Fitness is based on average of each constituent model's RMSE over its own
private oob.
Used in FEMPO.
Replicates how a random forest works.
Does not consider voting in the fitness. (How will it figure out to make better distinguished
segments?)
Parameters
----------
individual : list, required
List of dataframes with each dataframes last column the
dependent column.
df: DataFrame, required
The entire dataframe given to model to train - X and y combined.
model : Estimator, required
The estimator to use in the model.
#todo: not really needed. double check.
Side note: We can even start with just one partial part of the dataset, keep trying to
increase its cv score in fitness function. To get the optimal dataset. I am fearing,
it will make a dataset of exactly same values in the end. That will have the best cv error.
Therefore need to add a component of unseen data performance.
"""
total_rmses = []
ensembles = []
centroids = []
# All the indices used in each child model.
bag_idx = []
for eg_ in individual:
# Generate Private OOB.
bag_idx = set(eg_.X.index.tolist())
out_bag_idx = list(set(df.index.tolist()) - bag_idx)
df_test = df.loc[out_bag_idx]
clf = eg_.estimator
ensembles.append(clf)
bag_idx = set(list(eg_.X.index.tolist()))
p_out_bag_idx = list(set(df.index.tolist()) - bag_idx)
p_out_bag = df.loc[p_out_bag_idx]
# print len(p_out_bag.columns)
p_out_bag_X = p_out_bag.iloc[:,:-1]
p_out_bag_y = p_out_bag.iloc[:,-1]
# will test on p_out_bag
if p_out_bag_y.shape[0] == 0:
print 'OOB ran-out'
#Should we then use rmse on itself?
continue
preds = clf.predict(p_out_bag_X)
rmse = np.sqrt(mean_squared_error(p_out_bag_y, preds))
# rmse = mean_squared_error(p_out_bag_y, preds)
total_rmses.append(rmse)
## for sum of mse return uncomment these
#y_pred = clf.predict(df_test[x_columns])
#mse = mean_squared_error(y_pred, df_test[y_column])
#total_mse.append(mse)
#print total_mse
# flattening and converting to set.
return (np.nanmean(total_rmses)),
def eval_each_model_PT_KNN_EG(individual, df, base_estimator, n_votes):
"""
Fitness is based on average of each constituent model's RMSE over its own
private test_set
Used in FEMPT.
Replicates how a random forest works.
Does not consider voting in the fitness. (How will it figure out to make better distinguished
segments?)
- Makes each segment robust.
Parameters
----------
individual : list, required
List of dataframes with each dataframes last column the
dependent column.
df: DataFrame, required
The entire dataframe given to model to train - X and y combined.
model : Estimator, required
The estimator to use in the model.
#todo: not really needed. double check.
Side note: We can even start with just one partial part of the dataset, keep trying to
increase its cv score in fitness function. To get the optimal dataset. I am fearing,
it will make a dataset of exactly same values in the end. That will have the best cv error.
Therefore need to add a component of unseen data performance.
"""
total_rmses = []
ensembles = []
centroids = []
# All the indices used in each child model.
bag_idx = []
for eg_ in individual:
# Generate Private OOB.
clf = eg_.estimator
ensembles.append(clf)
# rmse is precalculated in EG if private_test = True
rmse = eg_.rmse
total_rmses.append(rmse)
## for sum of mse return uncomment these
#y_pred = clf.predict(df_test[x_columns])
#mse = mean_squared_error(y_pred, df_test[y_column])
#total_mse.append(mse)
#print total_mse
# flattening and converting to set.
return (np.nanmean(total_rmses)),
| gpl-3.0 |
fabianp/scikit-learn | sklearn/tests/test_naive_bayes.py | 142 | 17496 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
JackThorp/ComputationalNeurodynamics | Exercise_2/Run2L.py | 3 | 2347 | """
Computational Neurodynamics
Exercise 2
Simulates two layers of Izhikevich neurons. Layer 0 is stimulated
with a constant base current and layer 1 receives synaptic input
of layer 0.
(C) Murray Shanahan et al, 2015
"""
from Connect2L import Connect2L
import numpy as np
import matplotlib.pyplot as plt
N1 = 4
N2 = 4
T = 500 # Simulation time
Ib = 5 # Base current
net = Connect2L(N1, N2)
## Initialise layers
for lr in xrange(len(net.layer)):
net.layer[lr].v = -65 * np.ones(net.layer[lr].N)
net.layer[lr].u = net.layer[lr].b * net.layer[lr].v
net.layer[lr].firings = np.array([])
v1 = np.zeros([T, N1])
v2 = np.zeros([T, N2])
u1 = np.zeros([T, N1])
u2 = np.zeros([T, N2])
## SIMULATE
for t in xrange(T):
# Deliver a constant base current to layer 1
net.layer[0].I = Ib * np.ones(N1)
net.layer[1].I = np.zeros(N2)
net.Update(t)
v1[t] = net.layer[0].v
v2[t] = net.layer[1].v
u1[t] = net.layer[0].u
u2[t] = net.layer[1].u
## Retrieve firings and add Dirac pulses for presentation
firings1 = net.layer[0].firings
firings2 = net.layer[1].firings
if firings1.size != 0:
v1[firings1[:, 0], firings1[:, 1]] = 30
if firings2.size != 0:
v2[firings2[:, 0], firings2[:, 1]] = 30
## Plot membrane potentials
plt.figure(1)
plt.subplot(211)
plt.plot(range(T), v1)
plt.title('Population 1 membrane potentials')
plt.ylabel('Voltage (mV)')
plt.ylim([-90, 40])
plt.subplot(212)
plt.plot(range(T), v2)
plt.title('Population 2 membrane potentials')
plt.ylabel('Voltage (mV)')
plt.ylim([-90, 40])
plt.xlabel('Time (ms)')
## Plot recovery variable
plt.figure(2)
plt.subplot(211)
plt.plot(range(T), u1)
plt.title('Population 1 recovery variables')
plt.ylabel('Voltage (mV)')
plt.subplot(212)
plt.plot(range(T), u2)
plt.title('Population 2 recovery variables')
plt.ylabel('Voltage (mV)')
plt.xlabel('Time (ms)')
## Raster plots of firings
if firings1.size != 0:
plt.figure(3)
plt.subplot(211)
plt.scatter(firings1[:, 0], firings1[:, 1] + 1, marker='.')
plt.xlim(0, T)
plt.ylabel('Neuron number')
plt.ylim(0, N1+1)
plt.title('Population 1 firings')
if firings2.size != 0:
plt.subplot(212)
plt.scatter(firings2[:, 0], firings2[:, 1] + 1, marker='.')
plt.xlim(0, T)
plt.ylabel('Neuron number')
plt.ylim(0, N2+1)
plt.xlabel('Time (ms)')
plt.title('Population 2 firings')
plt.show()
| gpl-3.0 |
CVML/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 221 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
dpshelio/sunpy | sunpy/instr/rhessi.py | 1 | 11794 | # -*- coding: utf-8 -*-
"""
Provides programs to process and analyze RHESSI data.
.. warning:: This module is in development.
"""
import re
import csv
import numpy as np
import astropy.units as u
from astropy.time import TimeDelta
from astropy.time import Time
import sunpy.io
from sunpy.time import TimeRange, parse_time
from sunpy.coordinates import sun
__all__ = ['parse_observing_summary_hdulist', 'backprojection', 'parse_observing_summary_dbase_file']
# Measured fixed grid parameters
grid_pitch = (4.52467, 7.85160, 13.5751, 23.5542, 40.7241, 70.5309, 122.164,
211.609, 366.646)
grid_orientation = (3.53547, 2.75007, 3.53569, 2.74962, 3.92596, 2.35647,
0.786083, 0.00140674, 1.57147)
lc_linecolors = ('black', 'pink', 'green', 'blue', 'brown', 'red',
'navy', 'orange', 'green')
def parse_observing_summary_dbase_file(filename):
"""
Parse the RHESSI observing summary database file. This file lists the
name of observing summary files for specific time ranges along with other
info
Parameters
----------
filename : `str`
The filename of the obssumm dbase file.
Returns
-------
out : `dict`
Return a `dict` containing the parsed data in the dbase file.
Examples
--------
>>> import sunpy.instr.rhessi as rhessi
>>> rhessi.parse_observing_summary_dbase_file(fname) # doctest: +SKIP
References
----------
| https://hesperia.gsfc.nasa.gov/ssw/hessi/doc/guides/hessi_data_access.htm#Observing%20Summary%20Data
.. note::
This API is currently limited to providing data from whole days only.
"""
# An example dbase file can be found at:
# https://hesperia.gsfc.nasa.gov/hessidata/dbase/hsi_obssumm_filedb_200311.txt
with open(filename) as fd:
reader = csv.reader(fd, delimiter=' ', skipinitialspace=True)
_ = next(reader) # skip 'HESSI Filedb File:' row
_ = next(reader) # skip 'Created: ...' row
_ = next(reader) # skip 'Number of Files: ...' row
column_names = next(reader) # ['Filename', 'Orb_st', 'Orb_end',...]
obssumm_filename = []
orbit_start = []
orbit_end = []
start_time = []
end_time = []
status_flag = []
number_of_packets = []
for row in reader:
obssumm_filename.append(row[0])
orbit_start.append(int(row[1]))
orbit_end.append(int(row[2]))
start_time.append(Time.strptime(row[3], '%d-%b-%y')) # skip time
end_time.append(Time.strptime(row[5], '%d-%b-%y')) # skip time
status_flag.append(int(row[7]))
number_of_packets.append(int(row[8]))
return {
column_names[0].lower(): obssumm_filename,
column_names[1].lower(): orbit_start,
column_names[2].lower(): orbit_end,
column_names[3].lower(): start_time,
column_names[4].lower(): end_time,
column_names[5].lower(): status_flag,
column_names[6].lower(): number_of_packets
}
def parse_observing_summary_hdulist(hdulist):
"""
Parse a RHESSI observation summary file.
Parameters
----------
hdulist : list
The HDU list from the fits file.
Returns
-------
out : `dict`
Returns a dictionary.
"""
header = hdulist[0].header
reference_time_ut = parse_time(hdulist[5].data.field('UT_REF')[0],
format='utime')
time_interval_sec = hdulist[5].data.field('TIME_INTV')[0]
# label_unit = fits[5].data.field('DIM1_UNIT')[0]
# labels = fits[5].data.field('DIM1_IDS')
labels = ['3 - 6 keV', '6 - 12 keV', '12 - 25 keV', '25 - 50 keV',
'50 - 100 keV', '100 - 300 keV', '300 - 800 keV',
'800 - 7000 keV', '7000 - 20000 keV']
# The data stored in the fits file are "compressed" countrates stored as
# one byte
compressed_countrate = np.array(hdulist[6].data.field('countrate'))
countrate = uncompress_countrate(compressed_countrate)
dim = np.array(countrate[:, 0]).size
time_array = parse_time(reference_time_ut) + \
TimeDelta(time_interval_sec * np.arange(dim) * u.second)
# TODO generate the labels for the dict automatically from labels
data = {'time': time_array, 'data': countrate, 'labels': labels}
return header, data
def uncompress_countrate(compressed_countrate):
"""Convert the compressed count rate inside of observing summary file from
a compressed byte to a true count rate
Parameters
----------
compressed_countrate : byte array
A compressed count rate returned from an observing summary file.
References
----------
Hsi_obs_summ_decompress.pro `<https://hesperia.gsfc.nasa.gov/ssw/hessi/idl/qlook_archive/hsi_obs_summ_decompress.pro>`_
"""
# Ensure uncompressed counts are between 0 and 255
if (compressed_countrate.min() < 0) or (compressed_countrate.max() > 255):
raise ValueError(
'Exepected uncompressed counts {} to in range 0-255'.format(compressed_countrate))
# TODO Must be a better way than creating entire lookup table on each call
ll = np.arange(0, 16, 1)
lkup = np.zeros(256, dtype='int')
_sum = 0
for i in range(0, 16):
lkup[16 * i:16 * (i + 1)] = ll * 2 ** i + _sum
if i < 15:
_sum = lkup[16 * (i + 1) - 1] + 2 ** i
return lkup[compressed_countrate]
def hsi_linecolors():
"""Define discrete colors to use for RHESSI plots
Parameters
----------
None
Returns
-------
tuple : matplotliblib color list
References
----------
hsi_linecolors.pro `<https://hesperia.gsfc.nasa.gov/ssw/hessi/idl/gen/hsi_linecolors.pro>`_
"""
return ('black', 'magenta', 'lime', 'cyan', 'y', 'red', 'blue', 'orange',
'olive')
def _backproject(calibrated_event_list, detector=8, pixel_size=(1., 1.),
image_dim=(64, 64)):
"""
Given a stacked calibrated event list fits file create a back
projection image for an individual detectors. This function is used by
backprojection.
Parameters
----------
calibrated_event_list : str
filename of a RHESSI calibrated event list
detector : int
the detector number
pixel_size : 2-tuple
the size of the pixels in arcseconds. Default is (1,1).
image_dim : 2-tuple
the size of the output image in number of pixels
Returns
-------
out : ndarray
Return a backprojection image.
Examples
--------
>>> import sunpy.instr.rhessi as rhessi
"""
# info_parameters = fits[2]
# detector_efficiency = info_parameters.data.field('cbe_det_eff$$REL')
afits = sunpy.io.read_file(calibrated_event_list)
fits_detector_index = detector + 2
detector_index = detector - 1
grid_angle = np.pi/2. - grid_orientation[detector_index]
harm_ang_pitch = grid_pitch[detector_index]/1
phase_map_center = afits[fits_detector_index].data.field('phase_map_ctr')
this_roll_angle = afits[fits_detector_index].data.field('roll_angle')
modamp = afits[fits_detector_index].data.field('modamp')
grid_transmission = afits[fits_detector_index].data.field('gridtran')
count = afits[fits_detector_index].data.field('count')
tempa = (np.arange(image_dim[0] * image_dim[1]) % image_dim[0]) - (image_dim[0]-1)/2.
tempb = tempa.reshape(image_dim[0], image_dim[1]).transpose().reshape(image_dim[0]*image_dim[1])
pixel = np.array(list(zip(tempa, tempb)))*pixel_size[0]
phase_pixel = (2 * np.pi/harm_ang_pitch) *\
(np.outer(pixel[:, 0], np.cos(this_roll_angle - grid_angle)) -
np.outer(pixel[:, 1], np.sin(this_roll_angle - grid_angle))) + phase_map_center
phase_modulation = np.cos(phase_pixel)
gridmod = modamp * grid_transmission
probability_of_transmission = gridmod * phase_modulation + grid_transmission
bproj_image = np.inner(probability_of_transmission, count).reshape(image_dim)
return bproj_image
@u.quantity_input
def backprojection(calibrated_event_list, pixel_size: u.arcsec=(1., 1.) * u.arcsec,
image_dim: u.pix=(64, 64) * u.pix):
"""
Given a stacked calibrated event list fits file create a back
projection image.
.. warning:: The image is not in the right orientation!
Parameters
----------
calibrated_event_list : str
filename of a RHESSI calibrated event list
pixel_size : `~astropy.units.Quantity` instance
the size of the pixels in arcseconds. Default is (1,1).
image_dim : `~astropy.units.Quantity` instance
the size of the output image in number of pixels
Returns
-------
out : RHESSImap
Return a backprojection map.
Examples
--------
This example is broken.
>>> import sunpy.data
>>> import sunpy.data.sample # doctest: +REMOTE_DATA
>>> import sunpy.instr.rhessi as rhessi
>>> map = rhessi.backprojection(sunpy.data.sample.RHESSI_EVENT_LIST) # doctest: +SKIP
>>> map.peek() # doctest: +SKIP
"""
# import sunpy.map in here so that net and timeseries don't end up importing map
import sunpy.map
pixel_size = pixel_size.to(u.arcsec)
image_dim = np.array(image_dim.to(u.pix).value, dtype=int)
afits = sunpy.io.read_file(calibrated_event_list)
info_parameters = afits[2]
xyoffset = info_parameters.data.field('USED_XYOFFSET')[0]
time_range = TimeRange(info_parameters.data.field('ABSOLUTE_TIME_RANGE')[0], format='utime')
image = np.zeros(image_dim)
# find out what detectors were used
det_index_mask = afits[1].data.field('det_index_mask')[0]
detector_list = (np.arange(9)+1) * np.array(det_index_mask)
for detector in detector_list:
if detector > 0:
image = image + _backproject(calibrated_event_list, detector=detector,
pixel_size=pixel_size.value, image_dim=image_dim)
dict_header = {
"DATE-OBS": time_range.center.strftime("%Y-%m-%d %H:%M:%S"),
"CDELT1": pixel_size[0],
"NAXIS1": image_dim[0],
"CRVAL1": xyoffset[0],
"CRPIX1": image_dim[0]/2 + 0.5,
"CUNIT1": "arcsec",
"CTYPE1": "HPLN-TAN",
"CDELT2": pixel_size[1],
"NAXIS2": image_dim[1],
"CRVAL2": xyoffset[1],
"CRPIX2": image_dim[0]/2 + 0.5,
"CUNIT2": "arcsec",
"CTYPE2": "HPLT-TAN",
"HGLT_OBS": 0,
"HGLN_OBS": 0,
"RSUN_OBS": sun.angular_radius(time_range.center).value,
"RSUN_REF": sunpy.sun.constants.radius.value,
"DSUN_OBS": sun.earth_distance(time_range.center).value * sunpy.sun.constants.au.value
}
result_map = sunpy.map.Map(image, dict_header)
return result_map
def _build_energy_bands(label, bands):
"""
Parameters
----------
label: `str`
bands: `list` of `str`
Returns
-------
bands_with_units: `list` of `str`
Each `str` item is an energy band and its unit
Example
-------
>>> from sunpy.instr.rhessi import _build_energy_bands
>>> _build_energy_bands('Energy bands (keV)', ['3 - 6', '6 - 12', '12 - 25'])
['3 - 6 keV', '6 - 12 keV', '12 - 25 keV']
"""
unit_pattern = re.compile(r'^.+\((?P<UNIT>\w+)\)$')
matched = unit_pattern.match(label)
if matched is None:
raise ValueError("Unable to find energy unit in '{0}' "
"using REGEX '{1}'".format(label, unit_pattern.pattern))
unit = matched.group('UNIT').strip()
return ['{energy_band} {unit}'.format(energy_band=band, unit=unit) for band in bands]
| bsd-2-clause |
jblackburne/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 13 | 26241 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean scikit-learn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
jakevdp/multiband_LS | figures/fig06_multiband_models.py | 1 | 2434 | """
Here we plot a typical approach to the multi-band periodogram: treating each
band separately, and taking a majority vote between the bands.
"""
import numpy as np
import matplotlib.pyplot as plt
# Use seaborn settings for plot styles
import seaborn; seaborn.set()
from gatspy.datasets import RRLyraeGenerated
from gatspy.periodic import (LombScargleAstroML, LombScargleMultiband,
NaiveMultiband)
# Choose a Sesar 2010 object to base our fits on
lcid = 1019544
rrlyrae = RRLyraeGenerated(lcid, random_state=0)
print("Extinction A_r = {0:.4f}".format(rrlyrae.obsmeta['rExt']))
# Generate data in a 6-month observing season
Nobs = 60
rng = np.random.RandomState(0)
nights = np.arange(180)
rng.shuffle(nights)
nights = nights[:Nobs]
# Find a subset of the simulated data. This is the same procedure as in
# fig_multiband_sim
t = 57000 + nights + 0.05 * rng.randn(Nobs)
dy = 0.06 + 0.01 * rng.randn(Nobs)
mags = np.array([rrlyrae.generated(band, t, err=dy, corrected=False)
for band in 'ugriz'])
filts = np.array([f for f in 'ugriz'])
# Here's our subset
filts = np.take(list('ugriz'), np.arange(Nobs), mode='wrap')
mags = mags[np.arange(Nobs) % 5, np.arange(Nobs)]
masks = [(filts == band) for band in 'ugriz']
fig, ax = plt.subplots(5, sharex=True, sharey=True)
fig.subplots_adjust(left=0.1, right=0.93, hspace=0.1)
periods = np.linspace(0.2, 1.4, 1000)
combos = [(1, 0), (0, 1), (2, 0), (2, 1), (2, 2)]
for axi, (Nbase, Nband) in zip(ax, combos):
LS_multi = LombScargleMultiband(Nterms_base=Nbase,
Nterms_band=Nband)
LS_multi.fit(t, mags, dy, filts)
P_multi = LS_multi.periodogram(periods)
axi.plot(periods, P_multi, lw=1)
text = ('$N_{{base}}={0},\ N_{{band}}={1}\ \ (M^{{eff}}={2})$'
''.format(Nbase, Nband, (2 * max(0, Nbase - Nband)
+ 5 * (2 * Nband + 1))))
if (Nbase, Nband) == (1, 0):
text += ' "shared-phase model"'
elif (Nbase, Nband) == (0, 1):
text += ' "multi-phase model"'
axi.text(0.21, 0.98, text, fontsize=10, ha='left', va='top')
axi.yaxis.set_major_locator(plt.MultipleLocator(0.5))
axi.yaxis.set_major_formatter(plt.NullFormatter())
ax[0].set_title('Periodograms for Multiterm Models')
ax[-1].set_xlabel('Period (days)')
ax[2].set_ylabel('multiterm model power')
fig.savefig('fig06.pdf')
plt.show()
| bsd-2-clause |
dilawar/moogli | scripts/moogli_purkinje_simulation_with_rm_and_graph.py | 4 | 8730 | """
This script demonstrates how to use moogli to carry out a simulation and
simultaneously update the visualizer. Also another viewer shows the compartment
rm values. The visualizers remain active while the simulation is running.
Once the simulation finishes, a graph comes up, showing the soma vm values.
"""
import moogli
import moose
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import sys
import os
import random
import numpy as np
import math
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
# The QApplication class manages the GUI application's
# control flow and main settings
app = QtGui.QApplication(sys.argv)
# Load model from the neuroml file into moose
filename = os.path.join( os.path.split(os.path.realpath(__file__))[0]
, "../neuroml/PurkinjeCellPassivePulseInput/PurkinjePassive.net.xml"
)
popdict, projdict = moose.neuroml.loadNeuroML_L123(filename)
# setting up hsolve object for each neuron
for popinfo in popdict.values():
for cell in popinfo[1].values():
solver = moose.HSolve(cell.path + "/hsolve")
solver.target = cell.path
# create a table to store vm values
table = moose.Table("/soma_table")
moose.connect( table
, 'requestOut'
, moose.element("/cells[0]/BigCellCML_0[0]/Seg0_soma_0[0]")
, "getVm"
)
# reinit moose to bring to a reliable initial state.
moose.reinit()
SIMULATION_DELTA = 0.001
SIMULATION_TIME = 0.03
ALL_COMPARTMENTS = map( lambda x : x.path
, moose.wildcardFind("/cells[0]/##[ISA=CompartmentBase]")
)
BASE_RM_VALUE = min( map( lambda x : moose.element(x).Rm
, ALL_COMPARTMENTS
)
)
PEAK_RM_VALUE = max( map( lambda x : moose.element(x).Rm
, ALL_COMPARTMENTS
)
)
BASE_RM_COLOR = [0.0, 1.0, 0.0, 0.1]
PEAK_RM_COLOR = [1.0, 0.0, 0.0, 1.0]
BASE_VM_VALUE = -0.065
PEAK_VM_VALUE = -0.060
BASE_VM_COLOR = [1.0, 0.0, 0.0, 0.1]
PEAK_VM_COLOR = [0.0, 0.0, 1.0, 1.0]
"""Create a class for plotting vm"""
class PlotWidget(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
# We want the axes cleared every time plot() is called
self.axes.hold(False)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def create_vm_visualizer():
# Moogli requires a morphology object. Create a morphology object
# by reading the geometry details from all objects of type CompartmentBase
# inside /cells[0]
vm_morphology = moogli.read_morphology_from_moose(name = "", path = "/cells[0]")
# Create a named group of compartments called 'group-all'
# which will contain all the compartments of the model.
# Each group has a strict upper and lower limit for the
# variable which is being visualized.
# Both limits map to colors provided to the api.
# The value of the variable is linearly mapped to a color value
# lying between the upper and lower color values.
vm_morphology.create_group( "group-all" # group name
, ALL_COMPARTMENTS # sequence of compartments belonging to this group
, BASE_VM_VALUE # base value of variable
, PEAK_VM_VALUE # peak value of variable
, BASE_VM_COLOR # color corresponding to base value
, PEAK_VM_COLOR # color corresponding to peak value
)
# set initial color of all compartments in accordance with their vm
vm_morphology.set_color( "group-all"
, map( lambda x : moose.element(x).Vm
, ALL_COMPARTMENTS
)
)
# instantiate the visualizer with the morphology object created earlier
vm_viewer = moogli.DynamicMorphologyViewerWidget(vm_morphology)
# Callback function will be called by the visualizer at regular intervals.
# The callback can modify both the morphology and viewer object's properties
# since they are passed as arguments.
def callback(morphology, viewer):
# run simulation for 1 ms
moose.start(SIMULATION_DELTA)
# change color of all the compartments according to their vm values.
# a value higher than peak value will be clamped to peak value
# a value lower than base value will be clamped to base value.
morphology.set_color( "group-all"
, map( lambda x : moose.element(x).Vm
, ALL_COMPARTMENTS
)
)
# if the callback returns true, it will be called again.
# if it returns false it will not be called again.
# the condition below ensures that simulation runs for 1 sec
if moose.element("/clock").currentTime < SIMULATION_TIME : return True
else:
plot_widget = PlotWidget()
xs = np.linspace(0, moose.element("/clock").runTime, len(table.vector))
ys = table.vector.copy()
plot_widget.axes.plot(xs, ys)
plot_widget.draw()
plot_widget.show()
plot_widget.setWindowTitle("Soma Vm")
return False
# set the callback function to be called after every idletime milliseconds
vm_viewer.set_callback(callback, idletime = 0)
# make sure that entire model is visible
vm_viewer.pitch(math.pi / 2)
vm_viewer.zoom(0.25)
vm_viewer.setWindowTitle("Vm Visualization")
return vm_viewer
def create_rm_visualizer():
# Moogli requires a morphology object. Create a morphology object
# by reading the geometry details from all objects of type CompartmentBase
# inside /cells[0]
morphology = moogli.read_morphology_from_moose(name = "", path = "/cells[0]")
# Create a named group of compartments called 'group-all'
# which will contain all the compartments of the model.
# Each group has a strict upper and lower limit for the
# variable which is being visualized.
# Both limits map to colors provided to the api.
# The value of the variable is linearly mapped to a color value
# lying between the upper and lower color values.
morphology.create_group( "group-all" # group name
, ALL_COMPARTMENTS # sequence of compartments belonging to this group
, BASE_RM_VALUE # base value of variable
, PEAK_RM_VALUE # peak value of variable
, BASE_RM_COLOR # color corresponding to base value
, PEAK_RM_COLOR # color corresponding to peak value
)
# set initial color of all compartments in accordance with their rm
morphology.set_color( "group-all"
, map( lambda x : moose.element(x).Rm
, ALL_COMPARTMENTS
)
)
# instantiate the visualizer with the morphology object created earlier
rm_viewer = moogli.MorphologyViewerWidget(morphology)
# make sure that entire model is visible
rm_viewer.pitch(math.pi / 2)
rm_viewer.zoom(0.25)
rm_viewer.setWindowTitle("Rm")
return rm_viewer
vm_visualizer = create_vm_visualizer()
rm_visualizer = create_rm_visualizer()
vm_visualizer.show()
rm_visualizer.show()
# http://stackoverflow.com/questions/15861839/error-upon-app-shutdown-qglcontextmakecurrent-cannot-make-invalid-context-cu
def delete_gl_widget():
global vm_visualizer
global rm_visualizer
vm_visualizer.setParent(None)
del vm_visualizer
rm_visualizer.setParent(None)
del rm_visualizer
QApplication.instance().aboutToQuit.connect( delete_gl_widget )
# Enter the main event loop and wait until exit() is called.
# It is necessary to call this function to start event handling.
# The main event loop receives events from the window system and
# dispatches these to the application widgets.
app.exec_()
| gpl-2.0 |
phdowling/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
tomlof/scikit-learn | sklearn/tests/test_base.py | 11 | 14443 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
import sklearn
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
from sklearn.utils import deprecated
from sklearn.base import TransformerMixin
from sklearn.utils.mocking import MockDataFrame
import pickle
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class ModifyInitParams(BaseEstimator):
"""Deprecated behavior.
Equal parameters but with a type cast.
Doesn't fulfill a is a
"""
def __init__(self, a=np.array([0])):
self.a = a.copy()
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""scikit-learn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_clone_copy_init_params():
# test for deprecation warning when copying or casting an init parameter
est = ModifyInitParams()
message = ("Estimator ModifyInitParams modifies parameters in __init__. "
"This behavior is deprecated as of 0.18 and support "
"for this behavior will be removed in 0.20.")
assert_warns_message(DeprecationWarning, message, clone, est)
def test_clone_sparse_matrices():
sparse_matrix_classes = [
getattr(sp, name)
for name in dir(sp) if name.endswith('_matrix')]
for cls in sparse_matrix_classes:
sparse_matrix = cls(np.eye(5))
clf = MyEstimator(empty=sparse_matrix)
clf_cloned = clone(clf)
assert_true(clf.empty.__class__ is clf_cloned.empty.__class__)
assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray())
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline(
[('svc_cv', GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
def test_clone_pandas_dataframe():
class DummyEstimator(BaseEstimator, TransformerMixin):
"""This is a dummy class for generating numerical features
This feature extractor extracts numerical features from pandas data
frame.
Parameters
----------
df: pandas data frame
The pandas data frame parameter.
Notes
-----
"""
def __init__(self, df=None, scalar_param=1):
self.df = df
self.scalar_param = scalar_param
def fit(self, X, y=None):
pass
def transform(self, X, y=None):
pass
# build and clone estimator
d = np.arange(10)
df = MockDataFrame(d)
e = DummyEstimator(df, scalar_param=1)
cloned_e = clone(e)
# the test
assert_true((e.df == cloned_e.df).values.all())
assert_equal(e.scalar_param, cloned_e.scalar_param)
def test_pickle_version_warning_is_not_raised_with_matching_version():
iris = datasets.load_iris()
tree = DecisionTreeClassifier().fit(iris.data, iris.target)
tree_pickle = pickle.dumps(tree)
assert_true(b"version" in tree_pickle)
tree_restored = assert_no_warnings(pickle.loads, tree_pickle)
# test that we can predict with the restored decision tree classifier
score_of_original = tree.score(iris.data, iris.target)
score_of_restored = tree_restored.score(iris.data, iris.target)
assert_equal(score_of_original, score_of_restored)
class TreeBadVersion(DecisionTreeClassifier):
def __getstate__(self):
return dict(self.__dict__.items(), _sklearn_version="something")
pickle_error_message = (
"Trying to unpickle estimator {estimator} from "
"version {old_version} when using version "
"{current_version}. This might "
"lead to breaking code or invalid results. "
"Use at your own risk.")
def test_pickle_version_warning_is_issued_upon_different_version():
iris = datasets.load_iris()
tree = TreeBadVersion().fit(iris.data, iris.target)
tree_pickle_other = pickle.dumps(tree)
message = pickle_error_message.format(estimator="TreeBadVersion",
old_version="something",
current_version=sklearn.__version__)
assert_warns_message(UserWarning, message, pickle.loads, tree_pickle_other)
class TreeNoVersion(DecisionTreeClassifier):
def __getstate__(self):
return self.__dict__
def test_pickle_version_warning_is_issued_when_no_version_info_in_pickle():
iris = datasets.load_iris()
# TreeNoVersion has no getstate, like pre-0.18
tree = TreeNoVersion().fit(iris.data, iris.target)
tree_pickle_noversion = pickle.dumps(tree)
assert_false(b"version" in tree_pickle_noversion)
message = pickle_error_message.format(estimator="TreeNoVersion",
old_version="pre-0.18",
current_version=sklearn.__version__)
# check we got the warning about using pre-0.18 pickle
assert_warns_message(UserWarning, message, pickle.loads,
tree_pickle_noversion)
def test_pickle_version_no_warning_is_issued_with_non_sklearn_estimator():
iris = datasets.load_iris()
tree = TreeNoVersion().fit(iris.data, iris.target)
tree_pickle_noversion = pickle.dumps(tree)
try:
module_backup = TreeNoVersion.__module__
TreeNoVersion.__module__ = "notsklearn"
assert_no_warnings(pickle.loads, tree_pickle_noversion)
finally:
TreeNoVersion.__module__ = module_backup
class DontPickleAttributeMixin(object):
def __getstate__(self):
data = self.__dict__.copy()
data["_attribute_not_pickled"] = None
return data
def __setstate__(self, state):
state["_restored"] = True
self.__dict__.update(state)
class MultiInheritanceEstimator(BaseEstimator, DontPickleAttributeMixin):
def __init__(self, attribute_pickled=5):
self.attribute_pickled = attribute_pickled
self._attribute_not_pickled = None
def test_pickling_when_getstate_is_overwritten_by_mixin():
estimator = MultiInheritanceEstimator()
estimator._attribute_not_pickled = "this attribute should not be pickled"
serialized = pickle.dumps(estimator)
estimator_restored = pickle.loads(serialized)
assert_equal(estimator_restored.attribute_pickled, 5)
assert_equal(estimator_restored._attribute_not_pickled, None)
assert_true(estimator_restored._restored)
def test_pickling_when_getstate_is_overwritten_by_mixin_outside_of_sklearn():
try:
estimator = MultiInheritanceEstimator()
text = "this attribute should not be pickled"
estimator._attribute_not_pickled = text
old_mod = type(estimator).__module__
type(estimator).__module__ = "notsklearn"
serialized = estimator.__getstate__()
assert_dict_equal(serialized, {'_attribute_not_pickled': None,
'attribute_pickled': 5})
serialized['attribute_pickled'] = 4
estimator.__setstate__(serialized)
assert_equal(estimator.attribute_pickled, 4)
assert_true(estimator._restored)
finally:
type(estimator).__module__ = old_mod
class SingleInheritanceEstimator(BaseEstimator):
def __init__(self, attribute_pickled=5):
self.attribute_pickled = attribute_pickled
self._attribute_not_pickled = None
def __getstate__(self):
data = self.__dict__.copy()
data["_attribute_not_pickled"] = None
return data
def test_pickling_works_when_getstate_is_overwritten_in_the_child_class():
estimator = SingleInheritanceEstimator()
estimator._attribute_not_pickled = "this attribute should not be pickled"
serialized = pickle.dumps(estimator)
estimator_restored = pickle.loads(serialized)
assert_equal(estimator_restored.attribute_pickled, 5)
assert_equal(estimator_restored._attribute_not_pickled, None)
| bsd-3-clause |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/pandas/tseries/tests/test_offsets.py | 2 | 143864 | import os
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from pandas.compat import range, iteritems
from pandas import compat
import nose
from nose.tools import assert_raises
import numpy as np
from pandas.core.datetools import (
bday, BDay, CDay, BQuarterEnd, BMonthEnd,
CBMonthEnd, CBMonthBegin,
BYearEnd, MonthEnd, MonthBegin, BYearBegin, CustomBusinessDay,
QuarterBegin, BQuarterBegin, BMonthBegin, DateOffset, Week,
YearBegin, YearEnd, Hour, Minute, Second, Day, Micro, Milli, Nano, Easter,
WeekOfMonth, format, ole2datetime, QuarterEnd, to_datetime, normalize_date,
get_offset, get_offset_name, get_standard_freq)
from pandas import Series
from pandas.tseries.frequencies import _offset_map
from pandas.tseries.index import _to_m8, DatetimeIndex, _daterange_cache, date_range
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
from pandas.io.pickle import read_pickle
from pandas.tslib import NaT, Timestamp, Timedelta
import pandas.tslib as tslib
from pandas.util.testing import assertRaisesRegexp
import pandas.util.testing as tm
from pandas.tseries.offsets import BusinessMonthEnd, CacheableOffset, \
LastWeekOfMonth, FY5253, FY5253Quarter, WeekDay
from pandas.tseries.holiday import USFederalHolidayCalendar
_multiprocess_can_split_ = True
def test_monthrange():
import calendar
for y in range(2000, 2013):
for m in range(1, 13):
assert tslib.monthrange(y, m) == calendar.monthrange(y, m)
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(ValueError, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
def test_to_m8():
valb = datetime(2007, 10, 1)
valu = _to_m8(valb)
tm.assert_isinstance(valu, np.datetime64)
# assert valu == np.datetime64(datetime(2007,10,1))
# def test_datetime64_box():
# valu = np.datetime64(datetime(2007,10,1))
# valb = _dt_box(valu)
# assert type(valb) == datetime
# assert valb == datetime(2007,10,1)
#####
### DateOffset Tests
#####
class Base(tm.TestCase):
_offset = None
_offset_types = [getattr(offsets, o) for o in offsets.__all__]
timezones = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']
@property
def offset_types(self):
return self._offset_types
def _get_offset(self, klass, value=1, normalize=False):
# create instance from offset class
if klass is FY5253 or klass is FY5253Quarter:
klass = klass(n=value, startingMonth=1, weekday=1,
qtr_with_extra_week=1, variation='last',
normalize=normalize)
elif klass is LastWeekOfMonth:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is WeekOfMonth:
klass = klass(n=value, week=1, weekday=5, normalize=normalize)
elif klass is Week:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is DateOffset:
klass = klass(days=value, normalize=normalize)
else:
try:
klass = klass(value, normalize=normalize)
except:
klass = klass(normalize=normalize)
return klass
def test_apply_out_of_range(self):
if self._offset is None:
return
# try to create an out-of-bounds result timestamp; if we can't create the offset
# skip
try:
offset = self._get_offset(self._offset, value=10000)
result = Timestamp('20080101') + offset
self.assertIsInstance(result, datetime)
self.assertIsNone(result.tzinfo)
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
# Check tz is preserved
for tz in self.timezones:
t = Timestamp('20080101', tz=tz)
result = t + offset
self.assertIsInstance(result, datetime)
self.assertEqual(t.tzinfo, result.tzinfo)
except (tslib.OutOfBoundsDatetime):
raise
except (ValueError, KeyError) as e:
raise nose.SkipTest("cannot create out_of_range offset: {0} {1}".format(str(self).split('.')[-1],e))
class TestCommon(Base):
def setUp(self):
# exected value created by Base._get_offset
# are applied to 2011/01/01 09:00 (Saturday)
# used for .apply and .rollforward
self.expecteds = {'Day': Timestamp('2011-01-02 09:00:00'),
'DateOffset': Timestamp('2011-01-02 09:00:00'),
'BusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'CustomBusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthBegin': Timestamp('2011-02-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthEnd': Timestamp('2011-01-31 09:00:00'),
'BusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'YearBegin': Timestamp('2012-01-01 09:00:00'),
'BYearBegin': Timestamp('2011-01-03 09:00:00'),
'YearEnd': Timestamp('2011-12-31 09:00:00'),
'BYearEnd': Timestamp('2011-12-30 09:00:00'),
'QuarterBegin': Timestamp('2011-03-01 09:00:00'),
'BQuarterBegin': Timestamp('2011-03-01 09:00:00'),
'QuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BQuarterEnd': Timestamp('2011-03-31 09:00:00'),
'WeekOfMonth': Timestamp('2011-01-08 09:00:00'),
'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'),
'FY5253Quarter': Timestamp('2011-01-25 09:00:00'),
'FY5253': Timestamp('2011-01-25 09:00:00'),
'Week': Timestamp('2011-01-08 09:00:00'),
'Easter': Timestamp('2011-04-24 09:00:00'),
'Hour': Timestamp('2011-01-01 10:00:00'),
'Minute': Timestamp('2011-01-01 09:01:00'),
'Second': Timestamp('2011-01-01 09:00:01'),
'Milli': Timestamp('2011-01-01 09:00:00.001000'),
'Micro': Timestamp('2011-01-01 09:00:00.000001'),
'Nano': Timestamp(np.datetime64('2011-01-01T09:00:00.000000001Z'))}
def test_return_type(self):
for offset in self.offset_types:
offset = self._get_offset(offset)
# make sure that we are returning a Timestamp
result = Timestamp('20080101') + offset
self.assertIsInstance(result, Timestamp)
# make sure that we are returning NaT
self.assertTrue(NaT + offset is NaT)
self.assertTrue(offset + NaT is NaT)
self.assertTrue(NaT - offset is NaT)
self.assertTrue((-offset).apply(NaT) is NaT)
def _check_offsetfunc_works(self, offset, funcname, dt, expected,
normalize=False):
offset_s = self._get_offset(offset, normalize=normalize)
func = getattr(offset_s, funcname)
result = func(dt)
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected)
result = func(Timestamp(dt))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected)
# test nano second is preserved
result = func(Timestamp(dt) + Nano(5))
self.assertTrue(isinstance(result, Timestamp))
if normalize is False:
self.assertEqual(result, expected + Nano(5))
else:
self.assertEqual(result, expected)
if isinstance(dt, np.datetime64):
# test tz when input is datetime or Timestamp
return
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
tz_obj = tslib.maybe_get_tz(tz)
dt_tz = tslib._localize_pydatetime(dt, tz_obj)
result = func(dt_tz)
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected_localize)
result = func(Timestamp(dt, tz=tz))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected_localize)
# test nano second is preserved
result = func(Timestamp(dt, tz=tz) + Nano(5))
self.assertTrue(isinstance(result, Timestamp))
if normalize is False:
self.assertEqual(result, expected_localize + Nano(5))
else:
self.assertEqual(result, expected_localize)
def test_apply(self):
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np.datetime64('2011-01-01 09:00Z')
for offset in self.offset_types:
for dt in [sdt, ndt]:
expected = self.expecteds[offset.__name__]
self._check_offsetfunc_works(offset, 'apply', dt, expected)
expected = Timestamp(expected.date())
self._check_offsetfunc_works(offset, 'apply', dt, expected,
normalize=True)
def test_rollforward(self):
expecteds = self.expecteds.copy()
# result will not be changed if the target is on the offset
no_changes = ['Day', 'MonthBegin', 'YearBegin', 'Week', 'Hour', 'Minute',
'Second', 'Milli', 'Micro', 'Nano', 'DateOffset']
for n in no_changes:
expecteds[n] = Timestamp('2011/01/01 09:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2011-01-02 00:00:00'),
'DateOffset': Timestamp('2011-01-02 00:00:00'),
'MonthBegin': Timestamp('2011-02-01 00:00:00'),
'YearBegin': Timestamp('2012-01-01 00:00:00'),
'Week': Timestamp('2011-01-08 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np.datetime64('2011-01-01 09:00Z')
for offset in self.offset_types:
for dt in [sdt, ndt]:
expected = expecteds[offset.__name__]
self._check_offsetfunc_works(offset, 'rollforward', dt, expected)
expected = norm_expected[offset.__name__]
self._check_offsetfunc_works(offset, 'rollforward', dt, expected,
normalize=True)
def test_rollback(self):
expecteds = {'BusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthEnd': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthBegin': Timestamp('2010-12-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2010-12-01 09:00:00'),
'MonthEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BYearBegin': Timestamp('2010-01-01 09:00:00'),
'YearEnd': Timestamp('2010-12-31 09:00:00'),
'BYearEnd': Timestamp('2010-12-31 09:00:00'),
'QuarterBegin': Timestamp('2010-12-01 09:00:00'),
'BQuarterBegin': Timestamp('2010-12-01 09:00:00'),
'QuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BQuarterEnd': Timestamp('2010-12-31 09:00:00'),
'WeekOfMonth': Timestamp('2010-12-11 09:00:00'),
'LastWeekOfMonth': Timestamp('2010-12-25 09:00:00'),
'FY5253Quarter': Timestamp('2010-10-26 09:00:00'),
'FY5253': Timestamp('2010-01-26 09:00:00'),
'Easter': Timestamp('2010-04-04 09:00:00')}
# result will not be changed if the target is on the offset
for n in ['Day', 'MonthBegin', 'YearBegin', 'Week', 'Hour', 'Minute',
'Second', 'Milli', 'Micro', 'Nano', 'DateOffset']:
expecteds[n] = Timestamp('2011/01/01 09:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2010-12-31 00:00:00'),
'DateOffset': Timestamp('2010-12-31 00:00:00'),
'MonthBegin': Timestamp('2010-12-01 00:00:00'),
'YearBegin': Timestamp('2010-01-01 00:00:00'),
'Week': Timestamp('2010-12-25 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np.datetime64('2011-01-01 09:00Z')
for offset in self.offset_types:
for dt in [sdt, ndt]:
expected = expecteds[offset.__name__]
self._check_offsetfunc_works(offset, 'rollback', dt, expected)
expected = norm_expected[offset.__name__]
self._check_offsetfunc_works(offset, 'rollback',
dt, expected, normalize=True)
def test_onOffset(self):
for offset in self.offset_types:
dt = self.expecteds[offset.__name__]
offset_s = self._get_offset(offset)
self.assertTrue(offset_s.onOffset(dt))
# when normalize=True, onOffset checks time is 00:00:00
offset_n = self._get_offset(offset, normalize=True)
self.assertFalse(offset_n.onOffset(dt))
date = datetime(dt.year, dt.month, dt.day)
self.assertTrue(offset_n.onOffset(date))
def test_add(self):
dt = datetime(2011, 1, 1, 9, 0)
for offset in self.offset_types:
offset_s = self._get_offset(offset)
expected = self.expecteds[offset.__name__]
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected)
tm._skip_if_no_pytz()
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected_localize)
# normalize=True
offset_s = self._get_offset(offset, normalize=True)
expected = Timestamp(expected.date())
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected)
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, expected_localize)
def test_pickle_v0_15_2(self):
offsets = {'DateOffset': DateOffset(years=1),
'MonthBegin': MonthBegin(1),
'Day': Day(1),
'YearBegin': YearBegin(1),
'Week': Week(1)}
pickle_path = os.path.join(tm.get_data_path(),
'dateoffset_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
# with open(pickle_path, 'wb') as f: pickle.dump(offsets, f)
#
tm.assert_dict_equal(offsets, read_pickle(pickle_path))
class TestDateOffset(Base):
_multiprocess_can_split_ = True
def setUp(self):
self.d = Timestamp(datetime(2008, 1, 2))
_offset_map.clear()
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
def test_eq(self):
offset1 = DateOffset(days=1)
offset2 = DateOffset(days=365)
self.assertNotEqual(offset1, offset2)
class TestBusinessDay(Base):
_multiprocess_can_split_ = True
_offset = BDay
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
# equivalent in this special case
offset = BDay()
offset2 = BDay()
offset2.normalize = True
self.assertEqual(offset, offset2)
def test_repr(self):
self.assertEqual(repr(self.offset), '<BusinessDay>')
assert repr(self.offset2) == '<2 * BusinessDays>'
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10 * self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5 * BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(
BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(
BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = BDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 14))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 17))
offset = offsets.Day()
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 15))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 15))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2 * bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2 * bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + BDay(10)
self.assertEqual(result, datetime(2012, 11, 6))
result = dt + BDay(100) - BDay(100)
self.assertEqual(result, dt)
off = BDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
self.assertEqual(rs, xp)
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
self.assertEqual(rs, xp)
off = BDay() * 10
rs = datetime(2014, 1, 5) + off # see #5890
xp = datetime(2014, 1, 17)
self.assertEqual(rs, xp)
def test_apply_corner(self):
self.assertRaises(TypeError, BDay().apply, BMonthEnd())
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BDay()
offset2 = BDay()
self.assertFalse(offset1 != offset2)
class TestCustomBusinessDay(Base):
_multiprocess_can_split_ = True
_offset = CDay
def setUp(self):
self.d = datetime(2008, 1, 1)
self.nd = np.datetime64('2008-01-01 00:00:00Z')
tm._skip_if_no_cday()
self.offset = CDay()
self.offset2 = CDay(2)
def test_different_normalize_equals(self):
# equivalent in this special case
offset = CDay()
offset2 = CDay()
offset2.normalize = True
self.assertEqual(offset, offset2)
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessDay>'
assert repr(self.offset2) == '<2 * CustomBusinessDays>'
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
self.assertEqual(self.offset2(self.nd), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + CDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10 * self.offset, self.d + CDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5 * CDay(-10)),
self.d + CDay(50))
def testRollback1(self):
self.assertEqual(CDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(
CDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(CDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(
CDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = CDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 14))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 17))
offset = offsets.Day()
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 15))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 15))
def test_onOffset(self):
tests = [(CDay(), datetime(2008, 1, 1), True),
(CDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
from pandas.core.datetools import cday
tests = []
tests.append((cday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2 * cday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-cday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2 * cday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((CDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CDay(10)
self.assertEqual(result, datetime(2012, 11, 6))
result = dt + CDay(100) - CDay(100)
self.assertEqual(result, dt)
off = CDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
self.assertEqual(rs, xp)
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
self.assertEqual(rs, xp)
def test_apply_corner(self):
self.assertRaises(Exception, CDay().apply, BMonthEnd())
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = CDay()
offset2 = CDay()
self.assertFalse(offset1 != offset2)
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
tday = CDay(holidays=holidays)
for year in range(2012, 2015):
dt = datetime(year, 4, 30)
xp = datetime(year, 5, 2)
rs = dt + tday
self.assertEqual(rs, xp)
def test_weekmask(self):
weekmask_saudi = 'Sat Sun Mon Tue Wed' # Thu-Fri Weekend
weekmask_uae = '1111001' # Fri-Sat Weekend
weekmask_egypt = [1,1,1,1,0,0,1] # Fri-Sat Weekend
bday_saudi = CDay(weekmask=weekmask_saudi)
bday_uae = CDay(weekmask=weekmask_uae)
bday_egypt = CDay(weekmask=weekmask_egypt)
dt = datetime(2013, 5, 1)
xp_saudi = datetime(2013, 5, 4)
xp_uae = datetime(2013, 5, 2)
xp_egypt = datetime(2013, 5, 2)
self.assertEqual(xp_saudi, dt + bday_saudi)
self.assertEqual(xp_uae, dt + bday_uae)
self.assertEqual(xp_egypt, dt + bday_egypt)
xp2 = datetime(2013, 5, 5)
self.assertEqual(xp2, dt + 2 * bday_saudi)
self.assertEqual(xp2, dt + 2 * bday_uae)
self.assertEqual(xp2, dt + 2 * bday_egypt)
def test_weekmask_and_holidays(self):
weekmask_egypt = 'Sun Mon Tue Wed Thu' # Fri-Sat Weekend
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime(2013, 4, 30)
xp_egypt = datetime(2013, 5, 5)
self.assertEqual(xp_egypt, dt + 2 * bday_egypt)
def test_calendar(self):
calendar = USFederalHolidayCalendar()
dt = datetime(2014, 1, 17)
assertEq(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = self.round_trip_pickle(obj)
self.assertEqual(unpickled, obj)
_check_roundtrip(self.offset)
_check_roundtrip(self.offset2)
_check_roundtrip(self.offset*2)
def test_pickle_compat_0_14_1(self):
hdays = [datetime(2013,1,1) for ele in range(4)]
pth = tm.get_data_path()
cday0_14_1 = read_pickle(os.path.join(pth, 'cday-0.14.1.pickle'))
cday = CDay(holidays=hdays)
self.assertEqual(cday, cday0_14_1)
class CustomBusinessMonthBase(object):
_multiprocess_can_split_ = True
def setUp(self):
self.d = datetime(2008, 1, 1)
tm._skip_if_no_cday()
self.offset = self._object()
self.offset2 = self._object(2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2,
self.d + self._object(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10 * self.offset,
self.d + self._object(10))
def testMult2(self):
self.assertEqual(self.d + (-5 * self._object(-10)),
self.d + self._object(50))
def test_offsets_compare_equal(self):
offset1 = self._object()
offset2 = self._object()
self.assertFalse(offset1 != offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = self.round_trip_pickle(obj)
self.assertEqual(unpickled, obj)
_check_roundtrip(self._object())
_check_roundtrip(self._object(2))
_check_roundtrip(self._object()*2)
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_object = CBMonthEnd
def test_different_normalize_equals(self):
# equivalent in this special case
offset = CBMonthEnd()
offset2 = CBMonthEnd()
offset2.normalize = True
self.assertEqual(offset, offset2)
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthEnd>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthEnds>'
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 2, 29))
def testRollback1(self):
self.assertEqual(
CDay(10).rollback(datetime(2007, 12, 31)), datetime(2007, 12, 31))
def testRollback2(self):
self.assertEqual(CBMonthEnd(10).rollback(self.d),
datetime(2007,12,31))
def testRollforward1(self):
self.assertEqual(CBMonthEnd(10).rollforward(self.d), datetime(2008,1,31))
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 8, 31))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 28))
offset = offsets.Day()
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 15))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 15))
def test_onOffset(self):
tests = [(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
cbm = CBMonthEnd()
tests = []
tests.append((cbm,
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
tests.append((2 * cbm,
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31)}))
tests.append((-cbm,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31)}))
tests.append((-2 * cbm,
{datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31)}))
tests.append((CBMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
self.assertEqual(result, datetime(2013, 7, 31))
result = dt + CDay(100) - CDay(100)
self.assertEqual(result, dt)
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
self.assertEqual(rs, xp)
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
self.assertEqual(rs, xp)
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-01-31', datetime(2012, 2, 28),
np.datetime64('2012-02-29')]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012,1,1)
self.assertEqual(dt + bm_offset,datetime(2012,1,30))
self.assertEqual(dt + 2*bm_offset,datetime(2012,2,27))
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
self.assertEqual(DatetimeIndex(start='20120101',end='20130101',
freq=freq).tolist()[0],
datetime(2012,1,31))
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_object = CBMonthBegin
def test_different_normalize_equals(self):
# equivalent in this special case
offset = CBMonthBegin()
offset2 = CBMonthBegin()
offset2.normalize = True
self.assertEqual(offset, offset2)
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthBegin>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthBegins>'
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 3, 3))
def testRollback1(self):
self.assertEqual(
CDay(10).rollback(datetime(2007, 12, 31)), datetime(2007, 12, 31))
def testRollback2(self):
self.assertEqual(CBMonthBegin(10).rollback(self.d),
datetime(2008,1,1))
def testRollforward1(self):
self.assertEqual(CBMonthBegin(10).rollforward(self.d), datetime(2008,1,1))
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 3))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 10, 1))
offset = offsets.Day()
result = offset.rollback(dt)
self.assertEqual(result, datetime(2012, 9, 15))
result = offset.rollforward(dt)
self.assertEqual(result, datetime(2012, 9, 15))
def test_onOffset(self):
tests = [(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
cbm = CBMonthBegin()
tests = []
tests.append((cbm,
{datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3)}))
tests.append((2 * cbm,
{datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1)}))
tests.append((-cbm,
{datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1)}))
tests.append((-2 * cbm,
{datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1)}))
tests.append((CBMonthBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
self.assertEqual(result, datetime(2013, 8, 1))
result = dt + CDay(100) - CDay(100)
self.assertEqual(result, dt)
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
self.assertEqual(rs, xp)
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
self.assertEqual(rs, xp)
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-02-01', datetime(2012, 2, 2),
np.datetime64('2012-03-01')]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012,1,1)
self.assertEqual(dt + bm_offset,datetime(2012,1,2))
self.assertEqual(dt + 2*bm_offset,datetime(2012,2,3))
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
self.assertEqual(DatetimeIndex(start='20120101', end='20130101',
freq=cbmb).tolist()[0],
datetime(2012,1,3))
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected, ("\nExpected: %s\nActual: %s\nFor Offset: %s)"
"\nAt Date: %s" %
(expected, actual, offset, date))
class TestWeek(Base):
_offset = Week
def test_repr(self):
self.assertEqual(repr(Week(weekday=0)), "<Week: weekday=0>")
self.assertEqual(repr(Week(n=-1, weekday=0)), "<-1 * Week: weekday=0>")
self.assertEqual(repr(Week(n=-2, weekday=0)), "<-2 * Weeks: weekday=0>")
def test_corner(self):
self.assertRaises(ValueError, Week, weekday=7)
assertRaisesRegexp(ValueError, "Day must be", Week, weekday=-1)
def test_isAnchored(self):
self.assertTrue(Week(weekday=0).isAnchored())
self.assertFalse(Week().isAnchored())
self.assertFalse(Week(2, weekday=2).isAnchored())
self.assertFalse(Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
for weekday in range(7):
offset = Week(weekday=weekday)
for day in range(1, 8):
date = datetime(2008, 1, day)
if day % 7 == weekday:
expected = True
else:
expected = False
assertOnOffset(offset, date, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = Week()
offset2 = Week()
self.assertFalse(offset1 != offset2)
class TestWeekOfMonth(Base):
_offset = WeekOfMonth
def test_constructor(self):
assertRaisesRegexp(ValueError, "^N cannot be 0", WeekOfMonth, n=0, week=1, weekday=1)
assertRaisesRegexp(ValueError, "^Week", WeekOfMonth, n=1, week=4, weekday=0)
assertRaisesRegexp(ValueError, "^Week", WeekOfMonth, n=1, week=-1, weekday=0)
assertRaisesRegexp(ValueError, "^Day", WeekOfMonth, n=1, week=0, weekday=-1)
assertRaisesRegexp(ValueError, "^Day", WeekOfMonth, n=1, week=0, weekday=7)
def test_repr(self):
self.assertEqual(repr(WeekOfMonth(weekday=1,week=2)), "<WeekOfMonth: week=2, weekday=1>")
def test_offset(self):
date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month
date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month
date4 = datetime(2011, 1, 25) # 4th Tuesday of Month
# see for loop for structure
test_cases = [
(-2, 2, 1, date1, datetime(2010, 11, 16)),
(-2, 2, 1, date2, datetime(2010, 11, 16)),
(-2, 2, 1, date3, datetime(2010, 11, 16)),
(-2, 2, 1, date4, datetime(2010, 12, 21)),
(-1, 2, 1, date1, datetime(2010, 12, 21)),
(-1, 2, 1, date2, datetime(2010, 12, 21)),
(-1, 2, 1, date3, datetime(2010, 12, 21)),
(-1, 2, 1, date4, datetime(2011, 1, 18)),
(1, 0, 0, date1, datetime(2011, 2, 7)),
(1, 0, 0, date2, datetime(2011, 2, 7)),
(1, 0, 0, date3, datetime(2011, 2, 7)),
(1, 0, 0, date4, datetime(2011, 2, 7)),
(1, 0, 1, date1, datetime(2011, 2, 1)),
(1, 0, 1, date2, datetime(2011, 2, 1)),
(1, 0, 1, date3, datetime(2011, 2, 1)),
(1, 0, 1, date4, datetime(2011, 2, 1)),
(1, 0, 2, date1, datetime(2011, 1, 5)),
(1, 0, 2, date2, datetime(2011, 2, 2)),
(1, 0, 2, date3, datetime(2011, 2, 2)),
(1, 0, 2, date4, datetime(2011, 2, 2)),
(1, 2, 1, date1, datetime(2011, 1, 18)),
(1, 2, 1, date2, datetime(2011, 1, 18)),
(1, 2, 1, date3, datetime(2011, 2, 15)),
(1, 2, 1, date4, datetime(2011, 2, 15)),
(2, 2, 1, date1, datetime(2011, 2, 15)),
(2, 2, 1, date2, datetime(2011, 2, 15)),
(2, 2, 1, date3, datetime(2011, 3, 15)),
(2, 2, 1, date4, datetime(2011, 3, 15)),
]
for n, week, weekday, date, expected in test_cases:
offset = WeekOfMonth(n, week=week, weekday=weekday)
assertEq(offset, date, expected)
# try subtracting
result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)
self.assertEqual(result, datetime(2011, 1, 12))
result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
self.assertEqual(result, datetime(2011, 2, 2))
def test_onOffset(self):
test_cases = [
(0, 0, datetime(2011, 2, 7), True),
(0, 0, datetime(2011, 2, 6), False),
(0, 0, datetime(2011, 2, 14), False),
(1, 0, datetime(2011, 2, 14), True),
(0, 1, datetime(2011, 2, 1), True),
(0, 1, datetime(2011, 2, 8), False),
]
for week, weekday, date, expected in test_cases:
offset = WeekOfMonth(week=week, weekday=weekday)
self.assertEqual(offset.onOffset(date), expected)
class TestLastWeekOfMonth(Base):
_offset = LastWeekOfMonth
def test_constructor(self):
assertRaisesRegexp(ValueError, "^N cannot be 0", \
LastWeekOfMonth, n=0, weekday=1)
assertRaisesRegexp(ValueError, "^Day", LastWeekOfMonth, n=1, weekday=-1)
assertRaisesRegexp(ValueError, "^Day", LastWeekOfMonth, n=1, weekday=7)
def test_offset(self):
#### Saturday
last_sat = datetime(2013,8,31)
next_sat = datetime(2013,9,28)
offset_sat = LastWeekOfMonth(n=1, weekday=5)
one_day_before = (last_sat + timedelta(days=-1))
self.assertEqual(one_day_before + offset_sat, last_sat)
one_day_after = (last_sat + timedelta(days=+1))
self.assertEqual(one_day_after + offset_sat, next_sat)
#Test On that day
self.assertEqual(last_sat + offset_sat, next_sat)
#### Thursday
offset_thur = LastWeekOfMonth(n=1, weekday=3)
last_thurs = datetime(2013,1,31)
next_thurs = datetime(2013,2,28)
one_day_before = last_thurs + timedelta(days=-1)
self.assertEqual(one_day_before + offset_thur, last_thurs)
one_day_after = last_thurs + timedelta(days=+1)
self.assertEqual(one_day_after + offset_thur, next_thurs)
# Test on that day
self.assertEqual(last_thurs + offset_thur, next_thurs)
three_before = last_thurs + timedelta(days=-3)
self.assertEqual(three_before + offset_thur, last_thurs)
two_after = last_thurs + timedelta(days=+2)
self.assertEqual(two_after + offset_thur, next_thurs)
offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
self.assertEqual(datetime(2013,7,31) + offset_sunday, datetime(2013,8,25))
def test_onOffset(self):
test_cases = [
(WeekDay.SUN, datetime(2013, 1, 27), True),
(WeekDay.SAT, datetime(2013, 3, 30), True),
(WeekDay.MON, datetime(2013, 2, 18), False), #Not the last Mon
(WeekDay.SUN, datetime(2013, 2, 25), False), #Not a SUN
(WeekDay.MON, datetime(2013, 2, 25), True),
(WeekDay.SAT, datetime(2013, 11, 30), True),
(WeekDay.SAT, datetime(2006, 8, 26), True),
(WeekDay.SAT, datetime(2007, 8, 25), True),
(WeekDay.SAT, datetime(2008, 8, 30), True),
(WeekDay.SAT, datetime(2009, 8, 29), True),
(WeekDay.SAT, datetime(2010, 8, 28), True),
(WeekDay.SAT, datetime(2011, 8, 27), True),
(WeekDay.SAT, datetime(2019, 8, 31), True),
]
for weekday, date, expected in test_cases:
offset = LastWeekOfMonth(weekday=weekday)
self.assertEqual(offset.onOffset(date), expected, msg=date)
class TestBMonthBegin(Base):
_offset = BMonthBegin
def test_offset(self):
tests = []
tests.append((BMonthBegin(),
{datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 1): datetime(2006, 10, 2),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1)}))
tests.append((BMonthBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 10, 2): datetime(2006, 10, 2),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 15): datetime(2006, 10, 2)}))
tests.append((BMonthBegin(2),
{datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 1, 15): datetime(2008, 3, 3),
datetime(2006, 12, 29): datetime(2007, 2, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1)}))
tests.append((BMonthBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 2),
datetime(2008, 6, 1): datetime(2008, 5, 1),
datetime(2008, 3, 10): datetime(2008, 3, 3),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 30): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 1)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [(BMonthBegin(), datetime(2007, 12, 31), False),
(BMonthBegin(), datetime(2008, 1, 1), True),
(BMonthBegin(), datetime(2001, 4, 2), True),
(BMonthBegin(), datetime(2008, 3, 3), True)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthBegin()
offset2 = BMonthBegin()
self.assertFalse(offset1 != offset2)
class TestBMonthEnd(Base):
_offset = BMonthEnd
def test_offset(self):
tests = []
tests.append((BMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((BMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + BMonthEnd(normalize=True)
expected = dt.replace(hour=0) + BMonthEnd()
self.assertEqual(result, expected)
def test_onOffset(self):
tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthEnd()
offset2 = BMonthEnd()
self.assertFalse(offset1 != offset2)
class TestMonthBegin(Base):
_offset = MonthBegin
def test_offset(self):
tests = []
# NOTE: I'm not entirely happy with the logic here for Begin -ss
# see thread 'offset conventions' on the ML
tests.append((MonthBegin(),
{datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 2, 1): datetime(2008, 3, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2007, 1, 31): datetime(2007, 2, 1)}))
tests.append((MonthBegin(0),
{datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 12, 3): datetime(2007, 1, 1),
datetime(2007, 1, 31): datetime(2007, 2, 1)}))
tests.append((MonthBegin(2),
{datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 1, 31): datetime(2008, 3, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 12, 28): datetime(2008, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1)}))
tests.append((MonthBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 5, 31): datetime(2008, 5, 1),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 1, 2): datetime(2006, 1, 1)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
class TestMonthEnd(Base):
_offset = MonthEnd
def test_offset(self):
tests = []
tests.append((MonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((MonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# def test_day_of_month(self):
# dt = datetime(2007, 1, 1)
# offset = MonthEnd(day=20)
# result = dt + offset
# self.assertEqual(result, datetime(2007, 1, 20))
# result = result + offset
# self.assertEqual(result, datetime(2007, 2, 20))
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + MonthEnd(normalize=True)
expected = dt.replace(hour=0) + MonthEnd()
self.assertEqual(result, expected)
def test_onOffset(self):
tests = [(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBQuarterBegin(Base):
_offset = BQuarterBegin
def test_repr(self):
self.assertEqual(repr(BQuarterBegin()),"<BusinessQuarterBegin: startingMonth=3>")
self.assertEqual(repr(BQuarterBegin(startingMonth=3)), "<BusinessQuarterBegin: startingMonth=3>")
self.assertEqual(repr(BQuarterBegin(startingMonth=1)), "<BusinessQuarterBegin: startingMonth=1>")
def test_isAnchored(self):
self.assertTrue(BQuarterBegin(startingMonth=1).isAnchored())
self.assertTrue(BQuarterBegin().isAnchored())
self.assertFalse(BQuarterBegin(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((BQuarterBegin(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 4, 1),
datetime(2008, 1, 31): datetime(2008, 4, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2007, 3, 15): datetime(2007, 4, 2),
datetime(2007, 2, 28): datetime(2007, 4, 2),
datetime(2007, 1, 1): datetime(2007, 4, 2),
datetime(2007, 4, 15): datetime(2007, 7, 2),
datetime(2007, 7, 1): datetime(2007, 7, 2),
datetime(2007, 4, 1): datetime(2007, 4, 2),
datetime(2007, 4, 2): datetime(2007, 7, 2),
datetime(2008, 4, 30): datetime(2008, 7, 1), }))
tests.append((BQuarterBegin(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 5, 1),
datetime(2008, 3, 15): datetime(2008, 5, 1),
datetime(2008, 3, 31): datetime(2008, 5, 1),
datetime(2008, 4, 15): datetime(2008, 5, 1),
datetime(2008, 8, 15): datetime(2008, 11, 3),
datetime(2008, 9, 15): datetime(2008, 11, 3),
datetime(2008, 11, 1): datetime(2008, 11, 3),
datetime(2008, 4, 30): datetime(2008, 5, 1), }))
tests.append((BQuarterBegin(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2007, 12, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 1, 15): datetime(2008, 4, 1),
datetime(2008, 2, 27): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2007, 4, 1): datetime(2007, 4, 2),
datetime(2007, 4, 2): datetime(2007, 4, 2),
datetime(2007, 7, 1): datetime(2007, 7, 2),
datetime(2007, 4, 15): datetime(2007, 7, 2),
datetime(2007, 7, 2): datetime(2007, 7, 2), }))
tests.append((BQuarterBegin(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2007, 7, 3): datetime(2007, 7, 2),
datetime(2007, 4, 3): datetime(2007, 4, 2),
datetime(2007, 7, 2): datetime(2007, 4, 2),
datetime(2008, 4, 1): datetime(2008, 1, 1), }))
tests.append((BQuarterBegin(startingMonth=1, n=2),
{datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 1, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2007, 3, 31): datetime(2007, 7, 2),
datetime(2007, 4, 15): datetime(2007, 10, 1),
datetime(2008, 4, 30): datetime(2008, 10, 1), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# corner
offset = BQuarterBegin(n=-1, startingMonth=1)
self.assertEqual(datetime(2007, 4, 3) + offset, datetime(2007, 4, 2))
class TestBQuarterEnd(Base):
_offset = BQuarterEnd
def test_repr(self):
self.assertEqual(repr(BQuarterEnd()),"<BusinessQuarterEnd: startingMonth=3>")
self.assertEqual(repr(BQuarterEnd(startingMonth=3)), "<BusinessQuarterEnd: startingMonth=3>")
self.assertEqual(repr(BQuarterEnd(startingMonth=1)), "<BusinessQuarterEnd: startingMonth=1>")
def test_isAnchored(self):
self.assertTrue(BQuarterEnd(startingMonth=1).isAnchored())
self.assertTrue(BQuarterEnd().isAnchored())
self.assertFalse(BQuarterEnd(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((BQuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31), }))
tests.append((BQuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30), }))
tests.append((BQuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30), }))
tests.append((BQuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31), }))
tests.append((BQuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 1, 31) + offset, datetime(2010, 1, 29))
def test_onOffset(self):
tests = [
(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def makeFY5253LastOfMonthQuarter(*args, **kwds):
return FY5253Quarter(*args, variation="last", **kwds)
def makeFY5253NearestEndMonthQuarter(*args, **kwds):
return FY5253Quarter(*args, variation="nearest", **kwds)
def makeFY5253NearestEndMonth(*args, **kwds):
return FY5253(*args, variation="nearest", **kwds)
def makeFY5253LastOfMonth(*args, **kwds):
return FY5253(*args, variation="last", **kwds)
class TestFY5253LastOfMonth(Base):
def test_onOffset(self):
offset_lom_sat_aug = makeFY5253LastOfMonth(1, startingMonth=8, weekday=WeekDay.SAT)
offset_lom_sat_sep = makeFY5253LastOfMonth(1, startingMonth=9, weekday=WeekDay.SAT)
tests = [
#From Wikipedia (see: http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end)
(offset_lom_sat_aug, datetime(2006, 8, 26), True),
(offset_lom_sat_aug, datetime(2007, 8, 25), True),
(offset_lom_sat_aug, datetime(2008, 8, 30), True),
(offset_lom_sat_aug, datetime(2009, 8, 29), True),
(offset_lom_sat_aug, datetime(2010, 8, 28), True),
(offset_lom_sat_aug, datetime(2011, 8, 27), True),
(offset_lom_sat_aug, datetime(2012, 8, 25), True),
(offset_lom_sat_aug, datetime(2013, 8, 31), True),
(offset_lom_sat_aug, datetime(2014, 8, 30), True),
(offset_lom_sat_aug, datetime(2015, 8, 29), True),
(offset_lom_sat_aug, datetime(2016, 8, 27), True),
(offset_lom_sat_aug, datetime(2017, 8, 26), True),
(offset_lom_sat_aug, datetime(2018, 8, 25), True),
(offset_lom_sat_aug, datetime(2019, 8, 31), True),
(offset_lom_sat_aug, datetime(2006, 8, 27), False),
(offset_lom_sat_aug, datetime(2007, 8, 28), False),
(offset_lom_sat_aug, datetime(2008, 8, 31), False),
(offset_lom_sat_aug, datetime(2009, 8, 30), False),
(offset_lom_sat_aug, datetime(2010, 8, 29), False),
(offset_lom_sat_aug, datetime(2011, 8, 28), False),
(offset_lom_sat_aug, datetime(2006, 8, 25), False),
(offset_lom_sat_aug, datetime(2007, 8, 24), False),
(offset_lom_sat_aug, datetime(2008, 8, 29), False),
(offset_lom_sat_aug, datetime(2009, 8, 28), False),
(offset_lom_sat_aug, datetime(2010, 8, 27), False),
(offset_lom_sat_aug, datetime(2011, 8, 26), False),
(offset_lom_sat_aug, datetime(2019, 8, 30), False),
#From GMCR (see for example: http://yahoo.brand.edgar-online.com/Default.aspx?companyid=3184&formtypeID=7)
(offset_lom_sat_sep, datetime(2010, 9, 25), True),
(offset_lom_sat_sep, datetime(2011, 9, 24), True),
(offset_lom_sat_sep, datetime(2012, 9, 29), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
offset_lom_aug_sat = makeFY5253LastOfMonth(startingMonth=8, weekday=WeekDay.SAT)
offset_lom_aug_sat_1 = makeFY5253LastOfMonth(n=1, startingMonth=8, weekday=WeekDay.SAT)
date_seq_lom_aug_sat = [datetime(2006, 8, 26), datetime(2007, 8, 25),
datetime(2008, 8, 30), datetime(2009, 8, 29),
datetime(2010, 8, 28), datetime(2011, 8, 27),
datetime(2012, 8, 25), datetime(2013, 8, 31),
datetime(2014, 8, 30), datetime(2015, 8, 29),
datetime(2016, 8, 27)]
tests = [
(offset_lom_aug_sat, date_seq_lom_aug_sat),
(offset_lom_aug_sat_1, date_seq_lom_aug_sat),
(offset_lom_aug_sat, [datetime(2006, 8, 25)] + date_seq_lom_aug_sat),
(offset_lom_aug_sat_1, [datetime(2006, 8, 27)] + date_seq_lom_aug_sat[1:]),
(makeFY5253LastOfMonth(n=-1, startingMonth=8, weekday=WeekDay.SAT), list(reversed(date_seq_lom_aug_sat))),
]
for test in tests:
offset, data = test
current = data[0]
for datum in data[1:]:
current = current + offset
self.assertEqual(current, datum)
class TestFY5253NearestEndMonth(Base):
def test_get_target_month_end(self):
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT).get_target_month_end(datetime(2013,1,1)), datetime(2013,8,31))
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=12, weekday=WeekDay.SAT).get_target_month_end(datetime(2013,1,1)), datetime(2013,12,31))
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=2, weekday=WeekDay.SAT).get_target_month_end(datetime(2013,1,1)), datetime(2013,2,28))
def test_get_year_end(self):
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT).get_year_end(datetime(2013,1,1)), datetime(2013,8,31))
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SUN).get_year_end(datetime(2013,1,1)), datetime(2013,9,1))
self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.FRI).get_year_end(datetime(2013,1,1)), datetime(2013,8,30))
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest")
self.assertEqual(offset_n.get_year_end(datetime(2012,1,1)), datetime(2013,1,1))
self.assertEqual(offset_n.get_year_end(datetime(2012,1,10)), datetime(2013,1,1))
self.assertEqual(offset_n.get_year_end(datetime(2013,1,1)), datetime(2013,12,31))
self.assertEqual(offset_n.get_year_end(datetime(2013,1,2)), datetime(2013,12,31))
self.assertEqual(offset_n.get_year_end(datetime(2013,1,3)), datetime(2013,12,31))
self.assertEqual(offset_n.get_year_end(datetime(2013,1,10)), datetime(2013,12,31))
JNJ = FY5253(n=1, startingMonth=12, weekday=6, variation="nearest")
self.assertEqual(JNJ.get_year_end(datetime(2006, 1, 1)), datetime(2006, 12, 31))
def test_onOffset(self):
offset_lom_aug_sat = makeFY5253NearestEndMonth(1, startingMonth=8, weekday=WeekDay.SAT)
offset_lom_aug_thu = makeFY5253NearestEndMonth(1, startingMonth=8, weekday=WeekDay.THU)
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest")
tests = [
# From Wikipedia (see: http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Saturday_nearest_the_end_of_month)
# 2006-09-02 2006 September 2
# 2007-09-01 2007 September 1
# 2008-08-30 2008 August 30 (leap year)
# 2009-08-29 2009 August 29
# 2010-08-28 2010 August 28
# 2011-09-03 2011 September 3
# 2012-09-01 2012 September 1 (leap year)
# 2013-08-31 2013 August 31
# 2014-08-30 2014 August 30
# 2015-08-29 2015 August 29
# 2016-09-03 2016 September 3 (leap year)
# 2017-09-02 2017 September 2
# 2018-09-01 2018 September 1
# 2019-08-31 2019 August 31
(offset_lom_aug_sat, datetime(2006, 9, 2), True),
(offset_lom_aug_sat, datetime(2007, 9, 1), True),
(offset_lom_aug_sat, datetime(2008, 8, 30), True),
(offset_lom_aug_sat, datetime(2009, 8, 29), True),
(offset_lom_aug_sat, datetime(2010, 8, 28), True),
(offset_lom_aug_sat, datetime(2011, 9, 3), True),
(offset_lom_aug_sat, datetime(2016, 9, 3), True),
(offset_lom_aug_sat, datetime(2017, 9, 2), True),
(offset_lom_aug_sat, datetime(2018, 9, 1), True),
(offset_lom_aug_sat, datetime(2019, 8, 31), True),
(offset_lom_aug_sat, datetime(2006, 8, 27), False),
(offset_lom_aug_sat, datetime(2007, 8, 28), False),
(offset_lom_aug_sat, datetime(2008, 8, 31), False),
(offset_lom_aug_sat, datetime(2009, 8, 30), False),
(offset_lom_aug_sat, datetime(2010, 8, 29), False),
(offset_lom_aug_sat, datetime(2011, 8, 28), False),
(offset_lom_aug_sat, datetime(2006, 8, 25), False),
(offset_lom_aug_sat, datetime(2007, 8, 24), False),
(offset_lom_aug_sat, datetime(2008, 8, 29), False),
(offset_lom_aug_sat, datetime(2009, 8, 28), False),
(offset_lom_aug_sat, datetime(2010, 8, 27), False),
(offset_lom_aug_sat, datetime(2011, 8, 26), False),
(offset_lom_aug_sat, datetime(2019, 8, 30), False),
#From Micron, see: http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
(offset_lom_aug_thu, datetime(2012, 8, 30), True),
(offset_lom_aug_thu, datetime(2011, 9, 1), True),
(offset_n, datetime(2012, 12, 31), False),
(offset_n, datetime(2013, 1, 1), True),
(offset_n, datetime(2013, 1, 2), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
date_seq_nem_8_sat = [datetime(2006, 9, 2), datetime(2007, 9, 1),
datetime(2008, 8, 30), datetime(2009, 8, 29),
datetime(2010, 8, 28), datetime(2011, 9, 3)]
JNJ = [datetime(2005, 1, 2), datetime(2006, 1, 1),
datetime(2006, 12, 31), datetime(2007, 12, 30),
datetime(2008, 12, 28), datetime(2010, 1, 3),
datetime(2011, 1, 2), datetime(2012, 1, 1),
datetime(2012, 12, 30)]
DEC_SAT = FY5253(n=-1, startingMonth=12, weekday=5, variation="nearest")
tests = [
(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT), date_seq_nem_8_sat),
(makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT), date_seq_nem_8_sat),
(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT), [datetime(2006, 9, 1)] + date_seq_nem_8_sat),
(makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT), [datetime(2006, 9, 3)] + date_seq_nem_8_sat[1:]),
(makeFY5253NearestEndMonth(n=-1, startingMonth=8, weekday=WeekDay.SAT), list(reversed(date_seq_nem_8_sat))),
(makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN), JNJ),
(makeFY5253NearestEndMonth(n=-1, startingMonth=12, weekday=WeekDay.SUN), list(reversed(JNJ))),
(makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN), [datetime(2005,1,2), datetime(2006, 1, 1)]),
(makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN), [datetime(2006,1,2), datetime(2006, 12, 31)]),
(DEC_SAT, [datetime(2013,1,15), datetime(2012,12,29)])
]
for test in tests:
offset, data = test
current = data[0]
for datum in data[1:]:
current = current + offset
self.assertEqual(current, datum)
class TestFY5253LastOfMonthQuarter(Base):
def test_isAnchored(self):
self.assertTrue(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4).isAnchored())
self.assertTrue(makeFY5253LastOfMonthQuarter(weekday=WeekDay.SAT, startingMonth=3, qtr_with_extra_week=4).isAnchored())
self.assertFalse(makeFY5253LastOfMonthQuarter(2, startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4).isAnchored())
def test_equality(self):
self.assertEqual(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4), makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4))
self.assertNotEqual(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4), makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SUN, qtr_with_extra_week=4))
self.assertNotEqual(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4), makeFY5253LastOfMonthQuarter(startingMonth=2, weekday=WeekDay.SAT, qtr_with_extra_week=4))
def test_offset(self):
offset = makeFY5253LastOfMonthQuarter(1, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4)
offset2 = makeFY5253LastOfMonthQuarter(2, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4)
offset4 = makeFY5253LastOfMonthQuarter(4, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4)
offset_neg1 = makeFY5253LastOfMonthQuarter(-1, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4)
offset_neg2 = makeFY5253LastOfMonthQuarter(-2, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4)
GMCR = [datetime(2010, 3, 27),
datetime(2010, 6, 26),
datetime(2010, 9, 25),
datetime(2010, 12, 25),
datetime(2011, 3, 26),
datetime(2011, 6, 25),
datetime(2011, 9, 24),
datetime(2011, 12, 24),
datetime(2012, 3, 24),
datetime(2012, 6, 23),
datetime(2012, 9, 29),
datetime(2012, 12, 29),
datetime(2013, 3, 30),
datetime(2013, 6, 29)]
assertEq(offset, base=GMCR[0], expected=GMCR[1])
assertEq(offset, base=GMCR[0] + relativedelta(days=-1), expected=GMCR[0])
assertEq(offset, base=GMCR[1], expected=GMCR[2])
assertEq(offset2, base=GMCR[0], expected=GMCR[2])
assertEq(offset4, base=GMCR[0], expected=GMCR[4])
assertEq(offset_neg1, base=GMCR[-1], expected=GMCR[-2])
assertEq(offset_neg1, base=GMCR[-1] + relativedelta(days=+1), expected=GMCR[-1])
assertEq(offset_neg2, base=GMCR[-1], expected=GMCR[-3])
date = GMCR[0] + relativedelta(days=-1)
for expected in GMCR:
assertEq(offset, date, expected)
date = date + offset
date = GMCR[-1] + relativedelta(days=+1)
for expected in reversed(GMCR):
assertEq(offset_neg1, date, expected)
date = date + offset_neg1
def test_onOffset(self):
lomq_aug_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=8, weekday=WeekDay.SAT, qtr_with_extra_week=4)
lomq_sep_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4)
tests = [
#From Wikipedia
(lomq_aug_sat_4, datetime(2006, 8, 26), True),
(lomq_aug_sat_4, datetime(2007, 8, 25), True),
(lomq_aug_sat_4, datetime(2008, 8, 30), True),
(lomq_aug_sat_4, datetime(2009, 8, 29), True),
(lomq_aug_sat_4, datetime(2010, 8, 28), True),
(lomq_aug_sat_4, datetime(2011, 8, 27), True),
(lomq_aug_sat_4, datetime(2019, 8, 31), True),
(lomq_aug_sat_4, datetime(2006, 8, 27), False),
(lomq_aug_sat_4, datetime(2007, 8, 28), False),
(lomq_aug_sat_4, datetime(2008, 8, 31), False),
(lomq_aug_sat_4, datetime(2009, 8, 30), False),
(lomq_aug_sat_4, datetime(2010, 8, 29), False),
(lomq_aug_sat_4, datetime(2011, 8, 28), False),
(lomq_aug_sat_4, datetime(2006, 8, 25), False),
(lomq_aug_sat_4, datetime(2007, 8, 24), False),
(lomq_aug_sat_4, datetime(2008, 8, 29), False),
(lomq_aug_sat_4, datetime(2009, 8, 28), False),
(lomq_aug_sat_4, datetime(2010, 8, 27), False),
(lomq_aug_sat_4, datetime(2011, 8, 26), False),
(lomq_aug_sat_4, datetime(2019, 8, 30), False),
#From GMCR
(lomq_sep_sat_4, datetime(2010, 9, 25), True),
(lomq_sep_sat_4, datetime(2011, 9, 24), True),
(lomq_sep_sat_4, datetime(2012, 9, 29), True),
(lomq_sep_sat_4, datetime(2013, 6, 29), True),
(lomq_sep_sat_4, datetime(2012, 6, 23), True),
(lomq_sep_sat_4, datetime(2012, 6, 30), False),
(lomq_sep_sat_4, datetime(2013, 3, 30), True),
(lomq_sep_sat_4, datetime(2012, 3, 24), True),
(lomq_sep_sat_4, datetime(2012, 12, 29), True),
(lomq_sep_sat_4, datetime(2011, 12, 24), True),
#INTC (extra week in Q1)
#See: http://www.intc.com/releasedetail.cfm?ReleaseID=542844
(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1), datetime(2011, 4, 2), True),
#see: http://google.brand.edgar-online.com/?sym=INTC&formtypeID=7
(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1), datetime(2012, 12, 29), True),
(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1), datetime(2011, 12, 31), True),
(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1), datetime(2010, 12, 25), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_year_has_extra_week(self):
#End of long Q1
self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2011, 4, 2)))
#Start of long Q1
self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 26)))
#End of year before year with long Q1
self.assertFalse(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 25)))
for year in [x for x in range(1994, 2011+1) if x not in [2011, 2005, 2000, 1994]]:
self.assertFalse(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(year, 4, 2)))
#Other long years
self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2005, 4, 2)))
self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2000, 4, 2)))
self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(1994, 4, 2)))
def test_get_weeks(self):
sat_dec_1 = makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1)
sat_dec_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=4)
self.assertEqual(sat_dec_1.get_weeks(datetime(2011, 4, 2)), [14, 13, 13, 13])
self.assertEqual(sat_dec_4.get_weeks(datetime(2011, 4, 2)), [13, 13, 13, 14])
self.assertEqual(sat_dec_1.get_weeks(datetime(2010, 12, 25)), [13, 13, 13, 13])
class TestFY5253NearestEndMonthQuarter(Base):
def test_onOffset(self):
offset_nem_sat_aug_4 = makeFY5253NearestEndMonthQuarter(1, startingMonth=8, weekday=WeekDay.SAT, qtr_with_extra_week=4)
offset_nem_thu_aug_4 = makeFY5253NearestEndMonthQuarter(1, startingMonth=8, weekday=WeekDay.THU, qtr_with_extra_week=4)
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest", qtr_with_extra_week=4)
tests = [
#From Wikipedia
(offset_nem_sat_aug_4, datetime(2006, 9, 2), True),
(offset_nem_sat_aug_4, datetime(2007, 9, 1), True),
(offset_nem_sat_aug_4, datetime(2008, 8, 30), True),
(offset_nem_sat_aug_4, datetime(2009, 8, 29), True),
(offset_nem_sat_aug_4, datetime(2010, 8, 28), True),
(offset_nem_sat_aug_4, datetime(2011, 9, 3), True),
(offset_nem_sat_aug_4, datetime(2016, 9, 3), True),
(offset_nem_sat_aug_4, datetime(2017, 9, 2), True),
(offset_nem_sat_aug_4, datetime(2018, 9, 1), True),
(offset_nem_sat_aug_4, datetime(2019, 8, 31), True),
(offset_nem_sat_aug_4, datetime(2006, 8, 27), False),
(offset_nem_sat_aug_4, datetime(2007, 8, 28), False),
(offset_nem_sat_aug_4, datetime(2008, 8, 31), False),
(offset_nem_sat_aug_4, datetime(2009, 8, 30), False),
(offset_nem_sat_aug_4, datetime(2010, 8, 29), False),
(offset_nem_sat_aug_4, datetime(2011, 8, 28), False),
(offset_nem_sat_aug_4, datetime(2006, 8, 25), False),
(offset_nem_sat_aug_4, datetime(2007, 8, 24), False),
(offset_nem_sat_aug_4, datetime(2008, 8, 29), False),
(offset_nem_sat_aug_4, datetime(2009, 8, 28), False),
(offset_nem_sat_aug_4, datetime(2010, 8, 27), False),
(offset_nem_sat_aug_4, datetime(2011, 8, 26), False),
(offset_nem_sat_aug_4, datetime(2019, 8, 30), False),
#From Micron, see: http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
(offset_nem_thu_aug_4, datetime(2012, 8, 30), True),
(offset_nem_thu_aug_4, datetime(2011, 9, 1), True),
#See: http://google.brand.edgar-online.com/?sym=MU&formtypeID=13
(offset_nem_thu_aug_4, datetime(2013, 5, 30), True),
(offset_nem_thu_aug_4, datetime(2013, 2, 28), True),
(offset_nem_thu_aug_4, datetime(2012, 11, 29), True),
(offset_nem_thu_aug_4, datetime(2012, 5, 31), True),
(offset_nem_thu_aug_4, datetime(2007, 3, 1), True),
(offset_nem_thu_aug_4, datetime(1994, 3, 3), True),
(offset_n, datetime(2012, 12, 31), False),
(offset_n, datetime(2013, 1, 1), True),
(offset_n, datetime(2013, 1, 2), False)
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_offset(self):
offset = makeFY5253NearestEndMonthQuarter(1, startingMonth=8, weekday=WeekDay.THU, qtr_with_extra_week=4)
MU = [datetime(2012, 5, 31), datetime(2012, 8, 30), datetime(2012, 11, 29), datetime(2013, 2, 28), datetime(2013, 5, 30)]
date = MU[0] + relativedelta(days=-1)
for expected in MU:
assertEq(offset, date, expected)
date = date + offset
assertEq(offset, datetime(2012, 5, 31), datetime(2012, 8, 30))
assertEq(offset, datetime(2012, 5, 30), datetime(2012, 5, 31))
offset2 = FY5253Quarter(weekday=5, startingMonth=12,
variation="last", qtr_with_extra_week=4)
assertEq(offset2, datetime(2013,1,15), datetime(2013, 3, 30))
class TestQuarterBegin(Base):
def test_repr(self):
self.assertEqual(repr(QuarterBegin()), "<QuarterBegin: startingMonth=3>")
self.assertEqual(repr(QuarterBegin(startingMonth=3)), "<QuarterBegin: startingMonth=3>")
self.assertEqual(repr(QuarterBegin(startingMonth=1)),"<QuarterBegin: startingMonth=1>")
def test_isAnchored(self):
self.assertTrue(QuarterBegin(startingMonth=1).isAnchored())
self.assertTrue(QuarterBegin().isAnchored())
self.assertFalse(QuarterBegin(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((QuarterBegin(startingMonth=1),
{datetime(2007, 12, 1): datetime(2008, 1, 1),
datetime(2008, 1, 1): datetime(2008, 4, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 1): datetime(2008, 7, 1), }))
tests.append((QuarterBegin(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 5, 1),
datetime(2008, 3, 15): datetime(2008, 5, 1),
datetime(2008, 3, 31): datetime(2008, 5, 1),
datetime(2008, 4, 15): datetime(2008, 5, 1),
datetime(2008, 4, 30): datetime(2008, 5, 1), }))
tests.append((QuarterBegin(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 12, 1): datetime(2009, 1, 1),
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2008, 4, 30): datetime(2008, 4, 1), }))
tests.append((QuarterBegin(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2008, 4, 30): datetime(2008, 4, 1),
datetime(2008, 7, 1): datetime(2008, 4, 1)}))
tests.append((QuarterBegin(startingMonth=1, n=2),
{datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 2, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2008, 3, 31): datetime(2008, 7, 1),
datetime(2008, 4, 15): datetime(2008, 10, 1),
datetime(2008, 4, 1): datetime(2008, 10, 1), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# corner
offset = QuarterBegin(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 2, 1) + offset, datetime(2010, 1, 1))
class TestQuarterEnd(Base):
_offset = QuarterEnd
def test_repr(self):
self.assertEqual(repr(QuarterEnd()), "<QuarterEnd: startingMonth=3>")
self.assertEqual(repr(QuarterEnd(startingMonth=3)), "<QuarterEnd: startingMonth=3>")
self.assertEqual(repr(QuarterEnd(startingMonth=1)), "<QuarterEnd: startingMonth=1>")
def test_isAnchored(self):
self.assertTrue(QuarterEnd(startingMonth=1).isAnchored())
self.assertTrue(QuarterEnd().isAnchored())
self.assertFalse(QuarterEnd(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((QuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31), }))
tests.append((QuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 5, 31),
datetime(2008, 3, 31): datetime(2008, 5, 31),
datetime(2008, 4, 15): datetime(2008, 5, 31),
datetime(2008, 4, 30): datetime(2008, 5, 31), }))
tests.append((QuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30), }))
tests.append((QuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),
datetime(2008, 7, 1): datetime(2008, 4, 30)}))
tests.append((QuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# corner
offset = QuarterEnd(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 2, 1) + offset, datetime(2010, 1, 31))
def test_onOffset(self):
tests = [(QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(QuarterEnd(
1, startingMonth=1), datetime(2007, 12, 31), False),
(QuarterEnd(
1, startingMonth=1), datetime(2008, 2, 29), False),
(QuarterEnd(
1, startingMonth=1), datetime(2007, 3, 30), False),
(QuarterEnd(
1, startingMonth=1), datetime(2007, 3, 31), False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(QuarterEnd(
1, startingMonth=1), datetime(2008, 5, 30), False),
(QuarterEnd(
1, startingMonth=1), datetime(2008, 5, 31), False),
(QuarterEnd(
1, startingMonth=1), datetime(2007, 6, 29), False),
(QuarterEnd(
1, startingMonth=1), datetime(2007, 6, 30), False),
(QuarterEnd(
1, startingMonth=2), datetime(2008, 1, 31), False),
(QuarterEnd(
1, startingMonth=2), datetime(2007, 12, 31), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(QuarterEnd(
1, startingMonth=2), datetime(2007, 3, 30), False),
(QuarterEnd(
1, startingMonth=2), datetime(2007, 3, 31), False),
(QuarterEnd(
1, startingMonth=2), datetime(2008, 4, 30), False),
(QuarterEnd(
1, startingMonth=2), datetime(2008, 5, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 31), True),
(QuarterEnd(
1, startingMonth=2), datetime(2007, 6, 29), False),
(QuarterEnd(
1, startingMonth=2), datetime(2007, 6, 30), False),
(QuarterEnd(
1, startingMonth=3), datetime(2008, 1, 31), False),
(QuarterEnd(
1, startingMonth=3), datetime(2007, 12, 31), True),
(QuarterEnd(
1, startingMonth=3), datetime(2008, 2, 29), False),
(QuarterEnd(
1, startingMonth=3), datetime(2007, 3, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),
(QuarterEnd(
1, startingMonth=3), datetime(2008, 4, 30), False),
(QuarterEnd(
1, startingMonth=3), datetime(2008, 5, 30), False),
(QuarterEnd(
1, startingMonth=3), datetime(2008, 5, 31), False),
(QuarterEnd(
1, startingMonth=3), datetime(2007, 6, 29), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBYearBegin(Base):
_offset = BYearBegin
def test_misspecified(self):
self.assertRaises(ValueError, BYearBegin, month=13)
self.assertRaises(ValueError, BYearEnd, month=13)
def test_offset(self):
tests = []
tests.append((BYearBegin(),
{datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2011, 1, 1): datetime(2011, 1, 3),
datetime(2011, 1, 3): datetime(2012, 1, 2),
datetime(2005, 12, 30): datetime(2006, 1, 2),
datetime(2005, 12, 31): datetime(2006, 1, 2)
}
))
tests.append((BYearBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 2),
datetime(2005, 12, 31): datetime(2006, 1, 2), }))
tests.append((BYearBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 1, 2),
datetime(2009, 1, 4): datetime(2009, 1, 1),
datetime(2009, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 2),
datetime(2006, 12, 30): datetime(2006, 1, 2),
datetime(2006, 1, 1): datetime(2005, 1, 3), }))
tests.append((BYearBegin(-2),
{datetime(2007, 1, 1): datetime(2005, 1, 3),
datetime(2007, 6, 30): datetime(2006, 1, 2),
datetime(2008, 12, 31): datetime(2007, 1, 1), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
class TestYearBegin(Base):
_offset = YearBegin
def test_misspecified(self):
self.assertRaises(ValueError, YearBegin, month=13)
def test_offset(self):
tests = []
tests.append((YearBegin(),
{datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1), }))
tests.append((YearBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1), }))
tests.append((YearBegin(3),
{datetime(2008, 1, 1): datetime(2011, 1, 1),
datetime(2008, 6, 30): datetime(2011, 1, 1),
datetime(2008, 12, 31): datetime(2011, 1, 1),
datetime(2005, 12, 30): datetime(2008, 1, 1),
datetime(2005, 12, 31): datetime(2008, 1, 1), }))
tests.append((YearBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 1, 1),
datetime(2007, 1, 15): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 1),
datetime(2006, 12, 30): datetime(2006, 1, 1),
datetime(2007, 1, 1): datetime(2006, 1, 1), }))
tests.append((YearBegin(-2),
{datetime(2007, 1, 1): datetime(2005, 1, 1),
datetime(2008, 6, 30): datetime(2007, 1, 1),
datetime(2008, 12, 31): datetime(2007, 1, 1), }))
tests.append((YearBegin(month=4),
{datetime(2007, 4, 1): datetime(2008, 4, 1),
datetime(2007, 4, 15): datetime(2008, 4, 1),
datetime(2007, 3, 1): datetime(2007, 4, 1),
datetime(2007, 12, 15): datetime(2008, 4, 1),
datetime(2012, 1, 31): datetime(2012, 4, 1), }))
tests.append((YearBegin(0, month=4),
{datetime(2007, 4, 1): datetime(2007, 4, 1),
datetime(2007, 3, 1): datetime(2007, 4, 1),
datetime(2007, 12, 15): datetime(2008, 4, 1),
datetime(2012, 1, 31): datetime(2012, 4, 1), }))
tests.append((YearBegin(4, month=4),
{datetime(2007, 4, 1): datetime(2011, 4, 1),
datetime(2007, 4, 15): datetime(2011, 4, 1),
datetime(2007, 3, 1): datetime(2010, 4, 1),
datetime(2007, 12, 15): datetime(2011, 4, 1),
datetime(2012, 1, 31): datetime(2015, 4, 1), }))
tests.append((YearBegin(-1, month=4),
{datetime(2007, 4, 1): datetime(2006, 4, 1),
datetime(2007, 3, 1): datetime(2006, 4, 1),
datetime(2007, 12, 15): datetime(2007, 4, 1),
datetime(2012, 1, 31): datetime(2011, 4, 1), }))
tests.append((YearBegin(-3, month=4),
{datetime(2007, 4, 1): datetime(2004, 4, 1),
datetime(2007, 3, 1): datetime(2004, 4, 1),
datetime(2007, 12, 15): datetime(2005, 4, 1),
datetime(2012, 1, 31): datetime(2009, 4, 1), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [
(YearBegin(), datetime(2007, 1, 3), False),
(YearBegin(), datetime(2008, 1, 1), True),
(YearBegin(), datetime(2006, 12, 31), False),
(YearBegin(), datetime(2006, 1, 2), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBYearEndLagged(Base):
def test_bad_month_fail(self):
self.assertRaises(Exception, BYearEnd, month=13)
self.assertRaises(Exception, BYearEnd, month=0)
def test_offset(self):
tests = []
tests.append((BYearEnd(month=6),
{datetime(2008, 1, 1): datetime(2008, 6, 30),
datetime(2007, 6, 30): datetime(2008, 6, 30)},
))
tests.append((BYearEnd(n=-1, month=6),
{datetime(2008, 1, 1): datetime(2007, 6, 29),
datetime(2007, 6, 30): datetime(2007, 6, 29)},
))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
self.assertEqual(base + offset, expected)
def test_roll(self):
offset = BYearEnd(month=6)
date = datetime(2009, 11, 30)
self.assertEqual(offset.rollforward(date), datetime(2010, 6, 30))
self.assertEqual(offset.rollback(date), datetime(2009, 6, 30))
def test_onOffset(self):
tests = [
(BYearEnd(month=2), datetime(2007, 2, 28), True),
(BYearEnd(month=6), datetime(2007, 6, 30), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBYearEnd(Base):
_offset = BYearEnd
def test_offset(self):
tests = []
tests.append((BYearEnd(),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2006, 12, 29),
datetime(2005, 12, 31): datetime(2006, 12, 29), }))
tests.append((BYearEnd(0),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 29), }))
tests.append((BYearEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29), }))
tests.append((BYearEnd(-2),
{datetime(2007, 1, 1): datetime(2005, 12, 30),
datetime(2008, 6, 30): datetime(2006, 12, 29),
datetime(2008, 12, 31): datetime(2006, 12, 29), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [
(BYearEnd(), datetime(2007, 12, 31), True),
(BYearEnd(), datetime(2008, 1, 1), False),
(BYearEnd(), datetime(2006, 12, 31), False),
(BYearEnd(), datetime(2006, 12, 29), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestYearEnd(Base):
_offset = YearEnd
def test_misspecified(self):
self.assertRaises(ValueError, YearEnd, month=13)
def test_offset(self):
tests = []
tests.append((YearEnd(),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 31), }))
tests.append((YearEnd(0),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31), }))
tests.append((YearEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 31),
datetime(2006, 12, 30): datetime(2005, 12, 31),
datetime(2007, 1, 1): datetime(2006, 12, 31), }))
tests.append((YearEnd(-2),
{datetime(2007, 1, 1): datetime(2005, 12, 31),
datetime(2008, 6, 30): datetime(2006, 12, 31),
datetime(2008, 12, 31): datetime(2006, 12, 31), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [
(YearEnd(), datetime(2007, 12, 31), True),
(YearEnd(), datetime(2008, 1, 1), False),
(YearEnd(), datetime(2006, 12, 31), True),
(YearEnd(), datetime(2006, 12, 29), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestYearEndDiffMonth(Base):
def test_offset(self):
tests = []
tests.append((YearEnd(month=3),
{datetime(2008, 1, 1): datetime(2008, 3, 31),
datetime(2008, 2, 15): datetime(2008, 3, 31),
datetime(2008, 3, 31): datetime(2009, 3, 31),
datetime(2008, 3, 30): datetime(2008, 3, 31),
datetime(2005, 3, 31): datetime(2006, 3, 31),
datetime(2006, 7, 30): datetime(2007, 3, 31)}))
tests.append((YearEnd(0, month=3),
{datetime(2008, 1, 1): datetime(2008, 3, 31),
datetime(2008, 2, 28): datetime(2008, 3, 31),
datetime(2008, 3, 31): datetime(2008, 3, 31),
datetime(2005, 3, 30): datetime(2005, 3, 31), }))
tests.append((YearEnd(-1, month=3),
{datetime(2007, 1, 1): datetime(2006, 3, 31),
datetime(2008, 2, 28): datetime(2007, 3, 31),
datetime(2008, 3, 31): datetime(2007, 3, 31),
datetime(2006, 3, 29): datetime(2005, 3, 31),
datetime(2006, 3, 30): datetime(2005, 3, 31),
datetime(2007, 3, 1): datetime(2006, 3, 31), }))
tests.append((YearEnd(-2, month=3),
{datetime(2007, 1, 1): datetime(2005, 3, 31),
datetime(2008, 6, 30): datetime(2007, 3, 31),
datetime(2008, 3, 31): datetime(2006, 3, 31), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [
(YearEnd(month=3), datetime(2007, 3, 31), True),
(YearEnd(month=3), datetime(2008, 1, 1), False),
(YearEnd(month=3), datetime(2006, 3, 31), True),
(YearEnd(month=3), datetime(2006, 3, 29), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def assertEq(offset, base, expected):
actual = offset + base
actual_swapped = base + offset
actual_apply = offset.apply(base)
try:
assert actual == expected
assert actual_swapped == expected
assert actual_apply == expected
except AssertionError:
raise AssertionError("\nExpected: %s\nActual: %s\nFor Offset: %s)"
"\nAt Date: %s" %
(expected, actual, offset, base))
def test_Easter():
assertEq(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))
assertEq(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))
assertEq(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24))
assertEq(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24))
assertEq(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8))
assertEq(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4))
assertEq(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4))
assertEq(-Easter(2), datetime(2011, 1, 1), datetime(2009, 4, 12))
assertEq(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12))
assertEq(-Easter(2), datetime(2010, 4, 4), datetime(2008, 3, 23))
class TestTicks(tm.TestCase):
ticks = [Hour, Minute, Second, Milli, Micro, Nano]
def test_ticks(self):
offsets = [(Hour, Timedelta(hours=5)),
(Minute, Timedelta(hours=2, minutes=3)),
(Second, Timedelta(hours=2, seconds=3)),
(Milli, Timedelta(hours=2, milliseconds=3)),
(Micro, Timedelta(hours=2, microseconds=3)),
(Nano, Timedelta(hours=2, nanoseconds=3))]
for kls, expected in offsets:
offset = kls(3)
result = offset + Timedelta(hours=2)
self.assertTrue(isinstance(result, Timedelta))
self.assertEqual(result, expected)
def test_Hour(self):
assertEq(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1))
assertEq(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assertEq(2 * Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 2))
assertEq(-1 * Hour(), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
self.assertEqual(Hour(3) + Hour(2), Hour(5))
self.assertEqual(Hour(3) - Hour(2), Hour())
self.assertNotEqual(Hour(4), Hour(1))
def test_Minute(self):
assertEq(Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 1))
assertEq(Minute(-1), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assertEq(2 * Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 2))
assertEq(-1 * Minute(), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
self.assertEqual(Minute(3) + Minute(2), Minute(5))
self.assertEqual(Minute(3) - Minute(2), Minute())
self.assertNotEqual(Minute(5), Minute())
def test_Second(self):
assertEq(Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 1))
assertEq(Second(-1), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1))
assertEq(2 * Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 2))
assertEq(
-1 * Second(), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1))
self.assertEqual(Second(3) + Second(2), Second(5))
self.assertEqual(Second(3) - Second(2), Second())
def test_Millisecond(self):
assertEq(Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1000))
assertEq(Milli(-1), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1))
assertEq(Milli(2), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000))
assertEq(2 * Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000))
assertEq(-1 * Milli(), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1))
self.assertEqual(Milli(3) + Milli(2), Milli(5))
self.assertEqual(Milli(3) - Milli(2), Milli())
def test_MillisecondTimestampArithmetic(self):
assertEq(Milli(), Timestamp('2010-01-01'), Timestamp('2010-01-01 00:00:00.001'))
assertEq(Milli(-1), Timestamp('2010-01-01 00:00:00.001'), Timestamp('2010-01-01'))
def test_Microsecond(self):
assertEq(Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1))
assertEq(Micro(-1), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1))
assertEq(2 * Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2))
assertEq(-1 * Micro(), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1))
self.assertEqual(Micro(3) + Micro(2), Micro(5))
self.assertEqual(Micro(3) - Micro(2), Micro())
def test_NanosecondGeneric(self):
timestamp = Timestamp(datetime(2010, 1, 1))
self.assertEqual(timestamp.nanosecond, 0)
result = timestamp + Nano(10)
self.assertEqual(result.nanosecond, 10)
reverse_result = Nano(10) + timestamp
self.assertEqual(reverse_result.nanosecond, 10)
def test_Nanosecond(self):
timestamp = Timestamp(datetime(2010, 1, 1))
assertEq(Nano(), timestamp, timestamp + np.timedelta64(1, 'ns'))
assertEq(Nano(-1), timestamp + np.timedelta64(1, 'ns'), timestamp)
assertEq(2 * Nano(), timestamp, timestamp + np.timedelta64(2, 'ns'))
assertEq(-1 * Nano(), timestamp + np.timedelta64(1, 'ns'), timestamp)
self.assertEqual(Nano(3) + Nano(2), Nano(5))
self.assertEqual(Nano(3) - Nano(2), Nano())
# GH9284
self.assertEqual(Nano(1) + Nano(10), Nano(11))
self.assertEqual(Nano(5) + Micro(1), Nano(1005))
self.assertEqual(Micro(5) + Nano(1), Nano(5001))
def test_tick_zero(self):
for t1 in self.ticks:
for t2 in self.ticks:
self.assertEqual(t1(0), t2(0))
self.assertEqual(t1(0) + t2(0), t1(0))
if t1 is not Nano:
self.assertEqual(t1(2) + t2(0), t1(2))
if t1 is Nano:
self.assertEqual(t1(2) + Nano(0), t1(2))
def test_tick_equalities(self):
for t in self.ticks:
self.assertEqual(t(3), t(3))
self.assertEqual(t(), t(1))
# not equals
self.assertNotEqual(t(3), t(2))
self.assertNotEqual(t(3), t(-3))
def test_tick_operators(self):
for t in self.ticks:
self.assertEqual(t(3) + t(2), t(5))
self.assertEqual(t(3) - t(2), t(1))
self.assertEqual(t(800) + t(300), t(1100))
self.assertEqual(t(1000) - t(5), t(995))
def test_tick_offset(self):
for t in self.ticks:
self.assertFalse(t().isAnchored())
def test_compare_ticks(self):
for kls in self.ticks:
three = kls(3)
four = kls(4)
for _ in range(10):
self.assertTrue(three < kls(4))
self.assertTrue(kls(3) < four)
self.assertTrue(four > kls(3))
self.assertTrue(kls(4) > three)
self.assertTrue(kls(3) == kls(3))
self.assertTrue(kls(3) != kls(4))
class TestOffsetNames(tm.TestCase):
def test_get_offset_name(self):
assertRaisesRegexp(ValueError, 'Bad rule.*BusinessDays', get_offset_name, BDay(2))
assert get_offset_name(BDay()) == 'B'
assert get_offset_name(BMonthEnd()) == 'BM'
assert get_offset_name(Week(weekday=0)) == 'W-MON'
assert get_offset_name(Week(weekday=1)) == 'W-TUE'
assert get_offset_name(Week(weekday=2)) == 'W-WED'
assert get_offset_name(Week(weekday=3)) == 'W-THU'
assert get_offset_name(Week(weekday=4)) == 'W-FRI'
self.assertEqual(get_offset_name(LastWeekOfMonth(weekday=WeekDay.SUN)), "LWOM-SUN")
self.assertEqual(get_offset_name(makeFY5253LastOfMonthQuarter(weekday=1, startingMonth=3, qtr_with_extra_week=4)),"REQ-L-MAR-TUE-4")
self.assertEqual(get_offset_name(makeFY5253NearestEndMonthQuarter(weekday=1, startingMonth=3, qtr_with_extra_week=3)), "REQ-N-MAR-TUE-3")
def test_get_offset():
assertRaisesRegexp(ValueError, "rule.*GIBBERISH", get_offset, 'gibberish')
assertRaisesRegexp(ValueError, "rule.*QS-JAN-B", get_offset, 'QS-JAN-B')
pairs = [
('B', BDay()), ('b', BDay()), ('bm', BMonthEnd()),
('Bm', BMonthEnd()), ('W-MON', Week(weekday=0)),
('W-TUE', Week(weekday=1)), ('W-WED', Week(weekday=2)),
('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4)),
('w@Sat', Week(weekday=5)),
("RE-N-DEC-MON", makeFY5253NearestEndMonth(weekday=0, startingMonth=12)),
("RE-L-DEC-TUE", makeFY5253LastOfMonth(weekday=1, startingMonth=12)),
("REQ-L-MAR-TUE-4", makeFY5253LastOfMonthQuarter(weekday=1, startingMonth=3, qtr_with_extra_week=4)),
("REQ-L-DEC-MON-3", makeFY5253LastOfMonthQuarter(weekday=0, startingMonth=12, qtr_with_extra_week=3)),
("REQ-N-DEC-MON-3", makeFY5253NearestEndMonthQuarter(weekday=0, startingMonth=12, qtr_with_extra_week=3)),
]
for name, expected in pairs:
offset = get_offset(name)
assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
(name, expected, offset))
def test_parse_time_string():
(date, parsed, reso) = parse_time_string('4Q1984')
(date_lower, parsed_lower, reso_lower) = parse_time_string('4q1984')
assert date == date_lower
assert parsed == parsed_lower
assert reso == reso_lower
def test_get_standard_freq():
fstr = get_standard_freq('W')
assert fstr == get_standard_freq('w')
assert fstr == get_standard_freq('1w')
assert fstr == get_standard_freq(('W', 1))
assert fstr == get_standard_freq('WeEk')
fstr = get_standard_freq('5Q')
assert fstr == get_standard_freq('5q')
assert fstr == get_standard_freq('5QuarTer')
assert fstr == get_standard_freq(('q', 5))
def test_quarterly_dont_normalize():
date = datetime(2012, 3, 31, 5, 30)
offsets = (QuarterBegin, QuarterEnd, BQuarterEnd, BQuarterBegin)
for klass in offsets:
result = date + klass()
assert(result.time() == date.time())
class TestOffsetAliases(tm.TestCase):
def setUp(self):
_offset_map.clear()
def test_alias_equality(self):
for k, v in compat.iteritems(_offset_map):
if v is None:
continue
self.assertEqual(k, v.copy())
def test_rule_code(self):
lst = ['M', 'MS', 'BM', 'BMS', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
self.assertEqual(k, get_offset(k).rule_code)
# should be cached - this is kind of an internals test...
assert k in _offset_map
self.assertEqual(k, (get_offset(k) * 3).rule_code)
suffix_lst = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
base = 'W'
for v in suffix_lst:
alias = '-'.join([base, v])
self.assertEqual(alias, get_offset(alias).rule_code)
self.assertEqual(alias, (get_offset(alias) * 5).rule_code)
suffix_lst = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG',
'SEP', 'OCT', 'NOV', 'DEC']
base_lst = ['A', 'AS', 'BA', 'BAS', 'Q', 'QS', 'BQ', 'BQS']
for base in base_lst:
for v in suffix_lst:
alias = '-'.join([base, v])
self.assertEqual(alias, get_offset(alias).rule_code)
self.assertEqual(alias, (get_offset(alias) * 5).rule_code)
def test_apply_ticks():
result = offsets.Hour(3).apply(offsets.Hour(4))
exp = offsets.Hour(7)
assert(result == exp)
def test_delta_to_tick():
delta = timedelta(3)
tick = offsets._delta_to_tick(delta)
assert(tick == offsets.Day(3))
def test_dateoffset_misc():
oset = offsets.DateOffset(months=2, days=4)
# it works
result = oset.freqstr
assert(not offsets.DateOffset(months=2) == 2)
def test_freq_offsets():
off = BDay(1, offset=timedelta(0, 1800))
assert(off.freqstr == 'B+30Min')
off = BDay(1, offset=timedelta(0, -1800))
assert(off.freqstr == 'B-30Min')
def get_all_subclasses(cls):
ret = set()
this_subclasses = cls.__subclasses__()
ret = ret | set(this_subclasses)
for this_subclass in this_subclasses:
ret | get_all_subclasses(this_subclass)
return ret
class TestCaching(tm.TestCase):
# as of GH 6479 (in 0.14.0), offset caching is turned off
# as of v0.12.0 only BusinessMonth/Quarter were actually caching
def setUp(self):
_daterange_cache.clear()
_offset_map.clear()
def run_X_index_creation(self, cls):
inst1 = cls()
if not inst1.isAnchored():
self.assertFalse(inst1._should_cache(), cls)
return
self.assertTrue(inst1._should_cache(), cls)
DatetimeIndex(start=datetime(2013,1,31), end=datetime(2013,3,31), freq=inst1, normalize=True)
self.assertTrue(cls() in _daterange_cache, cls)
def test_should_cache_month_end(self):
self.assertFalse(MonthEnd()._should_cache())
def test_should_cache_bmonth_end(self):
self.assertFalse(BusinessMonthEnd()._should_cache())
def test_should_cache_week_month(self):
self.assertFalse(WeekOfMonth(weekday=1, week=2)._should_cache())
def test_all_cacheableoffsets(self):
for subclass in get_all_subclasses(CacheableOffset):
if subclass.__name__[0] == "_" \
or subclass in TestCaching.no_simple_ctr:
continue
self.run_X_index_creation(subclass)
def test_month_end_index_creation(self):
DatetimeIndex(start=datetime(2013,1,31), end=datetime(2013,3,31), freq=MonthEnd(), normalize=True)
self.assertFalse(MonthEnd() in _daterange_cache)
def test_bmonth_end_index_creation(self):
DatetimeIndex(start=datetime(2013,1,31), end=datetime(2013,3,29), freq=BusinessMonthEnd(), normalize=True)
self.assertFalse(BusinessMonthEnd() in _daterange_cache)
def test_week_of_month_index_creation(self):
inst1 = WeekOfMonth(weekday=1, week=2)
DatetimeIndex(start=datetime(2013,1,31), end=datetime(2013,3,29), freq=inst1, normalize=True)
inst2 = WeekOfMonth(weekday=1, week=2)
self.assertFalse(inst2 in _daterange_cache)
class TestReprNames(tm.TestCase):
def test_str_for_named_is_name(self):
# look at all the amazing combinations!
month_prefixes = ['A', 'AS', 'BA', 'BAS', 'Q', 'BQ', 'BQS', 'QS']
names = [prefix + '-' + month for prefix in month_prefixes
for month in ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN',
'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']]
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
names += ['W-' + day for day in days]
names += ['WOM-' + week + day for week in ('1', '2', '3', '4')
for day in days]
#singletons
names += ['S', 'T', 'U', 'BM', 'BMS', 'BQ', 'QS'] # No 'Q'
_offset_map.clear()
for name in names:
offset = get_offset(name)
self.assertEqual(repr(offset), name)
self.assertEqual(str(offset), name)
def get_utc_offset_hours(ts):
# take a Timestamp and compute total hours of utc offset
o = ts.utcoffset()
return (o.days * 24 * 3600 + o.seconds) / 3600.0
class TestDST(tm.TestCase):
"""
test DateOffset additions over Daylight Savings Time
"""
# one microsecond before the DST transition
ts_pre_fallback = "2013-11-03 01:59:59.999999"
ts_pre_springfwd = "2013-03-10 01:59:59.999999"
# test both basic names and dateutil timezones
timezone_utc_offsets = {
'US/Eastern': dict(
utc_offset_daylight=-4,
utc_offset_standard=-5,
),
'dateutil/US/Pacific': dict(
utc_offset_daylight=-7,
utc_offset_standard=-8,
)
}
valid_date_offsets_singular = [
'weekday', 'day', 'hour', 'minute', 'second', 'microsecond'
]
valid_date_offsets_plural = [
'weeks', 'days',
'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'
]
def _test_all_offsets(self, n, **kwds):
valid_offsets = self.valid_date_offsets_plural if n > 1 \
else self.valid_date_offsets_singular
for name in valid_offsets:
self._test_offset(offset_name=name, offset_n=n, **kwds)
def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
offset = DateOffset(**{offset_name: offset_n})
t = tstart + offset
if expected_utc_offset is not None:
self.assertTrue(get_utc_offset_hours(t) == expected_utc_offset)
if offset_name == 'weeks':
# dates should match
self.assertTrue(
t.date() ==
timedelta(days=7 * offset.kwds['weeks']) + tstart.date()
)
# expect the same day of week, hour of day, minute, second, ...
self.assertTrue(
t.dayofweek == tstart.dayofweek and
t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second
)
elif offset_name == 'days':
# dates should match
self.assertTrue(timedelta(offset.kwds['days']) + tstart.date() == t.date())
# expect the same hour of day, minute, second, ...
self.assertTrue(
t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second
)
elif offset_name in self.valid_date_offsets_singular:
# expect the signular offset value to match between tstart and t
datepart_offset = getattr(t, offset_name if offset_name != 'weekday' else 'dayofweek')
self.assertTrue(datepart_offset == offset.kwds[offset_name])
else:
# the offset should be the same as if it was done in UTC
self.assertTrue(
t == (tstart.tz_convert('UTC') + offset).tz_convert('US/Pacific')
)
def _make_timestamp(self, string, hrs_offset, tz):
offset_string = '{hrs:02d}00'.format(hrs=hrs_offset) if hrs_offset >= 0 else \
'-{hrs:02d}00'.format(hrs=-1 * hrs_offset)
return Timestamp(string + offset_string).tz_convert(tz)
def test_fallback_plural(self):
"""test moving from daylight savings to standard time"""
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_daylight']
hrs_post = utc_offsets['utc_offset_standard']
self._test_all_offsets(
n=3,
tstart=self._make_timestamp(self.ts_pre_fallback, hrs_pre, tz),
expected_utc_offset=hrs_post
)
def test_springforward_plural(self):
"""test moving from standard to daylight savings"""
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
hrs_post = utc_offsets['utc_offset_daylight']
self._test_all_offsets(
n=3,
tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),
expected_utc_offset=hrs_post
)
def test_fallback_singular(self):
# in the case of signular offsets, we dont neccesarily know which utc offset
# the new Timestamp will wind up in (the tz for 1 month may be different from 1 second)
# so we don't specify an expected_utc_offset
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(
n=1,
tstart=self._make_timestamp(self.ts_pre_fallback, hrs_pre, tz),
expected_utc_offset=None
)
def test_springforward_singular(self):
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(
n=1,
tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),
expected_utc_offset=None
)
def test_all_offset_classes(self):
tests = {MonthBegin: ['11/2/2012', '12/1/2012'],
MonthEnd: ['11/2/2012', '11/30/2012'],
BMonthBegin: ['11/2/2012', '12/3/2012'],
BMonthEnd: ['11/2/2012', '11/30/2012'],
CBMonthBegin: ['11/2/2012', '12/3/2012'],
CBMonthEnd: ['11/2/2012', '11/30/2012'],
Week: ['11/2/2012', '11/9/2012'],
YearBegin: ['11/2/2012', '1/1/2013'],
YearEnd: ['11/2/2012', '12/31/2012'],
BYearBegin: ['11/2/2012', '1/1/2013'],
BYearEnd: ['11/2/2012', '12/31/2012'],
QuarterBegin: ['11/2/2012', '12/1/2012'],
QuarterEnd: ['11/2/2012', '12/31/2012'],
BQuarterBegin: ['11/2/2012', '12/3/2012'],
BQuarterEnd: ['11/2/2012', '12/31/2012'],
Day: ['11/4/2012', '11/4/2012 23:00']
}
for offset, test_values in iteritems(tests):
first = Timestamp(test_values[0], tz='US/Eastern') + offset()
second = Timestamp(test_values[1], tz='US/Eastern')
self.assertEqual(first, second, str(offset))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
bikash/kaggleCompetition | microsoft malware/code/fullline.py | 1 | 11844 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 01:55:47 2015
@author: marios michailidis
"""
# licence: FreeBSD
"""
Copyright (c) 2015, Marios Michailidis
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import random
import numpy as np
import scipy as spss
from scipy.sparse import csr_matrix
import sys
sys.path.append("../../xgboost/wrapper")
import xgboost as xgb
from sklearn.ensemble import ExtraTreesClassifier
thre=20
num_round=1150
lr=0.05
max_de=7
subsam=0.4
colsample_bytree=0.5
gamma =0.001
min_child_weight=0.05
seed=1
objective='multi:softprob'
param = {}
param['booster']= 'gbtree'#gblinear
param['objective'] = objective
param['bst:eta'] = lr
param['seed']=seed
param['bst:max_depth'] = max_de
param['eval_metric'] = 'auc'
param['bst:min_child_weight']=min_child_weight
param['silent'] = 1
param['nthread'] = thre
param['bst:subsample'] = subsam
param['num_class'] = 9
param['gamma'] = gamma
param['colsample_bytree']=colsample_bytree
def transform2dtos(D2,y2):
# transform a 2d array of predictions to single array
# we also change
d1=[]
y1=[]
for i in range (0,len(D2)):
for j in range (0,len(D2[0])):
d1.append(float(D2[i][j]))
if y2[i]==float(j):
y1.append(1.0)
else:
y1.append(0.0)
return d1,y1
""" print predictions in file"""
def printfilewithtarget(X, name):
print("start print the training file with target")
wfile=open(name + ".csv", "w")
for i in range (0, len(X)):
wfile.write(str(X[i][0]) )
for j in range (1, len(X[i])):
wfile.write("," +str(X[i][j]) )
wfile.write("\n")
wfile.close()
print("done")
""" the metric we are being tested on"""
def logloss_metric(p, y):
logloss=0
for i in range (0, len(p)):
for j in range (0,len(p[i])):
if y[i]==float(j):
logloss+= np.log(spss.maximum(spss.minimum(p[i][j],1-(1e-15) ),1e-15 ))
return -logloss/float(len(y))
"""Load a csv file"""
def load(name):
print("start reading file with target")
wfile=open(name , "r")
line=wfile.readline().replace("\n","")
splits=line.split(",")
datalen=len(splits)
wfile.close()
X = np.loadtxt(open( name), delimiter=',',usecols=range(0, datalen), skiprows=0)
print("done")
return np.array(X)
""" use to concatebate the various kfold sets together"""
def cving(x1, x2, x3, x4,x5, y1 ,y2, y3, y4, y5, ind1, ind2, ind3, ind4 ,ind5, num):
if num==0:
xwhole=np.concatenate((x2,x3,x4,x5), axis=0)
yhol=np.concatenate((y2,y3,y4,y5), axis=0)
return x1,y1 ,ind1,xwhole,yhol
elif num==1:
xwhole=np.concatenate((x1,x3,x4,x5), axis=0)
yhol=np.concatenate((y1,y3,y4,y5), axis=0)
return x2,y2 ,ind2,xwhole,yhol
elif num==2:
xwhole=np.concatenate((x1,x2,x4,x5), axis=0)
yhol=np.concatenate((y1,y2,y4,y5), axis=0)
return x3,y3 ,ind3,xwhole,yhol
elif num==3:
xwhole=np.concatenate((x1,x2,x3,x5), axis=0)
yhol=np.concatenate((y1,y2,y3,y5), axis=0)
return x4,y4 ,ind4,xwhole,yhol
else :
xwhole=np.concatenate((x1,x2,x3,x4), axis=0)
yhol=np.concatenate((y1,y2,y3,y4), axis=0)
return x5,y5 ,ind5,xwhole,yhol
""" Splits data to 5 kfold sets"""
def split_array_in_5(array, seed):
random.seed(seed)
new_arra1=[]
new_arra2=[]
new_arra3=[]
new_arra4=[]
new_arra5=[]
indiceds1=[]
indiceds2=[]
indiceds3=[]
indiceds4=[]
indiceds5=[]
for j in range (0,len(array)):
rand=random.random()
if rand <0.2:
new_arra1.append(array[j])
indiceds1.append(j)
elif rand <0.4:
new_arra2.append(array[j])
indiceds2.append(j)
elif rand <0.6:
new_arra3.append(array[j])
indiceds3.append(j)
elif rand <0.8:
new_arra4.append(array[j])
indiceds4.append(j)
else :
new_arra5.append(array[j])
indiceds5.append(j)
#convert to numpy
new_arra1=np.array(new_arra1)
new_arra2=np.array(new_arra2)
new_arra3=np.array(new_arra3)
new_arra4=np.array(new_arra4)
new_arra5=np.array(new_arra5)
#return arrays and indices
return new_arra1,new_arra2,new_arra3,new_arra4,new_arra5,indiceds1,indiceds2,indiceds3,indiceds4,indiceds5
def scalepreds(prs):
for i in range (0, len(prs)):
suum=0.0
for j in range (0,9):
suum+=prs[i][j]
for j in range (0,9):
prs[i][j]/=suum
"""loads first columns of a file"""
def loadfirstcolumn(filename):
pred=[]
op=open(filename,'r')
op.readline() #header
for line in op:
line=line.replace('\n','')
sp=line.split(',')
#load always the last columns
pred.append(sp[0])
op.close()
return pred
"""loads last columns of a file"""
def loadlastcolumn(filename):
pred=[]
op=open(filename,'r')
op.readline() #header
for line in op:
line=line.replace('\n','')
sp=line.split(',')
#load always the last columns
pred.append(float(sp[len(sp)-1])-1.0)
op.close()
return pred
""" This is the main method"""
def main():
directory=''
train_file="fullinebytetrain.csv"
test_file="fullinebytetest.csv"
SEED= 15
outset="fullline"
y= loadlastcolumn(directory+"trainLabels.csv")
ids=loadfirstcolumn(directory+"sampleSubmission.csv")
include_inpretrain=True
model=ExtraTreesClassifier(n_estimators=100, criterion='entropy', max_depth=16, min_samples_split=2,min_samples_leaf=1, max_features=0.5,n_jobs=20, random_state=1)
trainini_file= ["old1gramtrain.csv"]
testini_file = ["old1gramtest.csv"]
X=load(train_file)
print ("train samples: %d columns: %d " % (len(X) , len(X[0])))
X_test=load(test_file)
print ("train samples: %d columns: %d" % (len(X_test) , len(X_test[0])))
if include_inpretrain:
for t in range(0,len(trainini_file)):
Xini=load(trainini_file[t])
print ("train samples: %d columns: %d " % (len(Xini) , len(Xini[0])))
X_testini=load(testini_file[t])
print ("train samples: %d columns: %d" % (len(X_testini) , len(X_testini[0])))
X=np.column_stack((X,Xini))
X_test=np.column_stack((X_test,X_testini))
print ("train after merge samples: %d columns: %d" % (len(X) , len(X[0])))
print ("train after merge samples: %d columns: %d" % (len(X_test) , len(X_test[0])))
number_of_folds=5 # repeat the CV procedure 10 times to get more precise results
train_stacker=[ [0.0 for d in range (0,9)] for k in range (0,len(X)) ]
test_stacker=[[0.0 for d in range (0,9)] for k in range (0,len(X_test))]
#label_stacker=[0 for k in range (0,len(X))]
#split trainingg
x1,x2,x3,x4,x5,in1,in2,in3,in4,in5=split_array_in_5(X, SEED)
y1,y2,y3,y4,y5,iny1,iny2,iny3,iny4,iny5=split_array_in_5(y, SEED)
#create target variable
mean_log = 0.0
for i in range(0,number_of_folds):
X_cv,y_cv,indcv,X_train,y_train=cving(x1, x2, x3, x4,x5, y1 ,y2, y3, y4, y5,in1, in2, in3, in4 ,in5, i)
print (" train size: %d. test size: %d, cols: %d " % (len(X_train) ,len(X_cv) ,len(X_train[0]) ))
""" model XGBOOST classifier"""
xgmat = xgb.DMatrix( csr_matrix(X_train), label=y_train, missing =-999.0 )
bst = xgb.train( param.items(), xgmat, num_round );
xgmat_cv = xgb.DMatrix( csr_matrix(X_cv), missing =-999.0)
preds =bst.predict( xgmat_cv ).reshape( len(X_cv), 9).tolist()
scalepreds(preds)
"""now model scikit classifier"""
model.fit(X_train, y_train)
predsextra = model.predict_proba(X_cv)
scalepreds(predsextra)
for pr in range (0,len(preds)):
for d in range (0,9):
preds[pr][d]=preds[pr][d]*0.8 + predsextra[pr][d]*0.2
# compute Loglikelihood metric for this CV fold
loglike = logloss_metric( preds,y_cv)
print "size train: %d size cv: %d Loglikelihood (fold %d/%d): %f" % (len(X_train), len(X_cv), i + 1, number_of_folds, loglike)
mean_log += loglike
#save the results
no=0
for real_index in indcv:
for d in range (0,9):
train_stacker[real_index][d]=(preds[no][d])
no+=1
if (number_of_folds)>0:
mean_log/=number_of_folds
print (" Average M loglikelihood: %f" % (mean_log) )
xgmat = xgb.DMatrix( csr_matrix(X), label=y, missing =-999.0 )
bst = xgb.train( param.items(), xgmat, num_round );
xgmat_cv = xgb.DMatrix(csr_matrix(X_test), missing =-999.0)
preds =bst.predict( xgmat_cv ).reshape( len(X_test), 9 ).tolist()
scalepreds(preds)
#predicting for test
model.fit(X, y)
predsextra = model.predict_proba(X_test)
scalepreds(predsextra)
for pr in range (0,len(preds)):
for d in range (0,9):
test_stacker[pr][d]=preds[pr][d]*0.8 + predsextra[pr][d]*0.2
# === Predictions === #
print (" printing datasets ")
printfilewithtarget(train_stacker, outset + "train")
printfilewithtarget(test_stacker, outset + "test")
print("Write results...")
output_file = "submission_"+str( (mean_log ))+".csv"
print("Writing submission to %s" % output_file)
f = open(output_file, "w")
f.write("Id")# the header
for b in range (1,10):
f.write("," + str("Prediction" + str(b) ) )
f.write("\n")
for g in range(0, len(test_stacker)) :
f.write("%s" % ((ids[g])))
for prediction in test_stacker[g]:
f.write(",%f" % (prediction))
f.write("\n")
f.close()
print("Done.")
if __name__=="__main__":
main() | apache-2.0 |
takeriki/colonylive | clive/analysis/image/part/grid.py | 1 | 1363 | """
Determine colony grid position based on object map
"""
import cPickle
import numpy as np
import matplotlib.pylab as plt
class PlateGrid:
def __init__(self, ncol, nrow, xy_init, x_dif, y_dif):
self.ncol = ncol
self.nrow = nrow
self.xy_init = xy_init
self.x_dif = x_dif
self.y_dif = y_dif
self.poss = [(col,row)
for row in range(1,nrow+1)
for col in range(1,ncol+1)]
self._calc_pos2xy_center()
self._calc_crippos()
def _calc_pos2xy_center(self):
self.pos2xy_center = {}
for col, row in self.poss:
cx = int(self.xy_init[0] +
self.x_dif * (col-1))
cy = int(self.xy_init[1] +
self.y_dif * (row-1))
self.pos2xy_center[(col,row)] = (cx,cy)
def _calc_crippos(self):
rx = self.x_dif
ry = self.y_dif
x_tl = int(self.xy_init[0] - self.x_dif)
y_tl = int(self.xy_init[1] - self.y_dif)
x_br = int(self.xy_init[0] +
self.x_dif * self.ncol)
y_br = int(self.xy_init[1] +
self.y_dif * self.nrow)
self.xy_tl = (x_tl,y_tl)
self.xy_br = (x_br,y_br)
if __name__ == "__main__":
ncol = 48
nrow = 32
grid = PlateGrid(ncol, nrow, (1,1), 30, 30)
print grid
| gpl-3.0 |
sarahgrogan/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
jpn--/larch | larch/model/latentclass.py | 1 | 19924 |
import numpy
import pandas
from typing import MutableMapping
from ..util import Dict
from ..dataframes import DataFrames, MissingDataError
from ..exceptions import ParameterNotInModelWarning
from .linear import ParameterRef_C
from ..general_precision import l4_float_dtype
from ..model import persist_flags
from ..model.abstract_model import AbstractChoiceModel
def sync_frames(*models):
"""
Synchronize model parameter frames.
Parameters
----------
*models : Sequence[Model]
"""
# check if all frames are already in sync
in_sync = True
pf1 = models[0]._frame
for m in models[1:]:
if m._frame is not pf1:
in_sync = False
if not in_sync:
joined = pandas.concat([m._frame for m in models], sort=False)
joined = joined[~joined.index.duplicated(keep='first')]
for m in models:
m.set_frame(joined)
class LatentClassModel(AbstractChoiceModel):
"""
A latent class model.
Parameters
----------
k_membership : Model
The class membership model.
k_models : MutableMapping[int,Model]
The individual class choice models.
dataservice : DataService
The dataservice used to populate data for this model.
title : str, optional
A descriptive title for use in reporting.
frame : pandas.DataFrame, optional
Initialize the parameterframe with this.
"""
def __init__(
self,
k_membership,
k_models,
*,
dataservice=None,
title=None,
frame=None,
constraints=None,
):
assert isinstance(k_membership, AbstractChoiceModel)
if len(getattr(k_membership, 'utility_ca', [])):
raise ValueError("the class membership model cannot include `utility_ca`, only `utility_co`")
self._k_membership = k_membership
self._k_membership._model_does_not_require_choice = True
if not isinstance(k_models, MutableMapping):
raise ValueError(f'k_models must be a MutableMapping, not {type(k_models)}')
self._k_models = k_models
self._dataservice = dataservice
if self._dataservice is None:
self._dataservice = k_membership.dataservice
else:
if not(k_membership.dataservice is None or k_membership.dataservice is self._dataservice):
raise ValueError("dataservice for all constituent models must be the same")
for m in self._k_models.values():
if self._dataservice is None:
self._dataservice = m.dataservice
if not (m.dataservice is None or m.dataservice is self._dataservice):
raise ValueError("dataservice for all constituent models must be the same")
self._dataframes = None
self._mangled = True
self.constraints = constraints
super().__init__(
parameters=None,
frame=frame,
title=title,
)
self.unmangle()
def _k_model_names(self):
return list(sorted(self._k_models.keys()))
def required_data(self):
# combine all required_data from all class-level submodels
req = Dict()
for k_name, k_model in self._k_models.items():
k_req = k_model.required_data()
for i in ['ca','co']:
if i in k_req or i in req:
req[i] = list(set(req.get(i,[])) | set(k_req.get(i,[])))
for i in ['weight_co', 'avail_ca', 'avail_co', 'choice_ca', 'choice_co_code', 'choice_co_vars', ]:
if i in req:
if i in k_req and req[i] != k_req[i]:
raise ValueError(f'mismatched {i}')
else:
pass
else:
if i in k_req:
req[i] = k_req[i]
else:
pass
top_req = self._k_membership.required_data()
if 'co' in top_req:
req['co'] = list(sorted(set(req.get('co', [])) | set(top_req.get('co', []))))
return req
def total_weight(self):
"""
The total weight of cases in the attached dataframes.
Returns
-------
float
"""
if self._dataframes is None:
raise MissingDataError("no dataframes are set")
return self._dataframes.total_weight()
def __prep_for_compute(self, x=None):
self.unmangle()
if x is not None:
self._k_membership.set_values(x)
def class_membership_probability(self, x=None, start_case=0, stop_case=-1, step_case=1):
self.__prep_for_compute(x)
return self._k_membership.probability(
x=None,
return_dataframe='idco',
start_case=start_case,
stop_case=stop_case,
step_case=step_case,
)
def class_membership_d_probability(self, x=None, start_case=0, stop_case=-1, step_case=1):
self.__prep_for_compute(x)
return self._k_membership.d_probability(
x=None,
start_case=start_case,
stop_case=stop_case,
step_case=step_case,
)
def probability(self, x=None, start_case=0, stop_case=-1, step_case=1, return_dataframe=False,):
self.__prep_for_compute(x)
if start_case >= self.dataframes.n_cases:
raise IndexError("start_case >= n_cases")
if stop_case == -1:
stop_case = self.dataframes.n_cases
if start_case > stop_case:
raise IndexError("start_case > stop_case")
if step_case <= 0:
raise IndexError("non-positive step_case")
n_rows = ((stop_case - start_case) // step_case) + (1 if (stop_case - start_case) % step_case else 0)
p = numpy.zeros([n_rows, self.dataframes.n_alts])
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ParameterNotInModelWarning)
k_membership_probability = self.class_membership_probability(
start_case=start_case, stop_case=stop_case, step_case=step_case,
)
for k_name, k_model in self._k_models.items():
k_pr = k_model.probability(start_case=start_case, stop_case=stop_case, step_case=step_case)
p += (
numpy.asarray( k_pr[:,:self.dataframes.n_alts] )
* k_membership_probability.loc[:,k_name].values[:, None]
)
if return_dataframe:
return pandas.DataFrame(
p,
index=self._dataframes.caseindex[start_case:stop_case:step_case],
columns=self._dataframes.alternative_codes(),
)
return p
def d_probability(self, x=None, start_case=0, stop_case=-1, step_case=1,):
"""
Compute the partial derivative of probability w.r.t. the parameters.
Note this function is known to be incomplete. It computes the
derivative only within the classes, not for the class membership model.
Parameters
----------
x
start_case
stop_case
step_case
return_dataframe
Returns
-------
"""
self.__prep_for_compute(x)
if start_case >= self.dataframes.n_cases:
raise IndexError("start_case >= n_cases")
if stop_case == -1:
stop_case = self.dataframes.n_cases
if start_case > stop_case:
raise IndexError("start_case > stop_case")
if step_case <= 0:
raise IndexError("non-positive step_case")
n_rows = ((stop_case - start_case) // step_case) + (1 if (stop_case - start_case) % step_case else 0)
p = numpy.zeros([n_rows, self.dataframes.n_alts, len(self.pf)])
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ParameterNotInModelWarning)
k_membership_probability = self.class_membership_probability(
start_case=start_case, stop_case=stop_case, step_case=step_case,
)
k_membership_d_probability = self.class_membership_d_probability(
start_case=start_case, stop_case=stop_case, step_case=step_case,
)
for k_name, k_model in self._k_models.items():
k_pr = k_model.probability(start_case=start_case, stop_case=stop_case, step_case=step_case)
k_d_pr = k_model.d_probability(start_case=start_case, stop_case=stop_case, step_case=step_case)
p += (
numpy.asarray( k_d_pr[:,:self.dataframes.n_alts,:] )
* k_membership_probability.loc[:,k_name].values[:, None, None]
)
k_position = k_membership_probability.columns.get_loc(k_name)
p += (
numpy.asarray( k_pr[:,:self.dataframes.n_alts, None] )
* k_membership_d_probability[:,k_position,:][:,None,:]
)
return p
def loglike2(
self,
x=None,
*,
start_case=0,
stop_case=-1,
step_case=1,
persist=0,
leave_out=-1,
keep_only=-1,
subsample=-1,
return_series=True,
probability_only=False,
):
"""
Compute a log likelihood value and it first derivative.
Parameters
----------
x : {'null', 'init', 'best', array-like, dict, scalar}, optional
Values for the parameters. See :ref:`set_values` for details.
start_case : int, default 0
The first case to include in the log likelihood computation. To include all
cases, start from 0 (the default).
stop_case : int, default -1
One past the last case to include in the log likelihood computation. This is processed as usual for
Python slicing and iterating, and negative values count backward from the end. To include all cases,
end at -1 (the default).
step_case : int, default 1
The step size of the case iterator to use in likelihood calculation. This is processed as usual for
Python slicing and iterating. To include all cases, step by 1 (the default).
persist : int, default 0
Whether to return a variety of internal and intermediate arrays in the result dictionary.
If set to 0, only the final `ll` value is included.
leave_out, keep_only, subsample : int, optional
Settings for cross validation calculations.
If `leave_out` and `subsample` are set, then case rows where rownumber % subsample == leave_out are dropped.
If `keep_only` and `subsample` are set, then only case rows where rownumber % subsample == keep_only are used.
return_series : bool
Deprecated, no effect. Derivatives are always returned as a Series.
Returns
-------
dictx
The log likelihood is given by key 'll' and the first derivative by key 'dll'.
Other arrays are also included if `persist` is set to True.
"""
if leave_out != -1 or keep_only != -1 or subsample != -1:
raise NotImplementedError()
from ..util import dictx
self.__prep_for_compute(x)
pr = self.probability(
x=None,
start_case=start_case, stop_case=stop_case, step_case=step_case,
)
y = dictx()
if probability_only:
y.ll = numpy.nan
y.probability = pr
return y
d_p = self.d_probability(
x=None,
start_case=start_case, stop_case=stop_case, step_case=step_case,
)
from .nl import d_loglike_from_d_probability
from .mnl import loglike_from_probability
if stop_case == -1:
stop_case_ = self.n_cases
else:
stop_case_ = stop_case
if self.dataframes.data_wt is not None:
wt = self.dataframes.data_wt.iloc[start_case:stop_case_:step_case]
else:
wt = None
ch = self.dataframes.array_ch()[start_case:stop_case_:step_case]
y.ll = loglike_from_probability(
pr,
ch,
wt
)
if persist & persist_flags.PERSIST_PROBABILITY:
y.probability = pr
if persist & persist_flags.PERSIST_BHHH:
y.dll, y.bhhh = d_loglike_from_d_probability(
pr,
d_p,
ch,
wt,
True
)
else:
y.dll = d_loglike_from_d_probability(
pr,
d_p,
ch,
wt,
False
)
if start_case==0 and (stop_case==-1 or stop_case==self.n_cases) and step_case==1:
self._check_if_best(y.ll)
return y
def loglike2_bhhh(
self,
x=None,
*,
start_case=0,
stop_case=-1,
step_case=1,
persist=0,
leave_out=-1,
keep_only=-1,
subsample=-1,
return_series=False,
):
"""
Compute a log likelihood value, it first derivative, and the BHHH approximation of the Hessian.
The `BHHH algorithm <https://en.wikipedia.org/wiki/Berndt–Hall–Hall–Hausman_algorithm>`
employs a matrix computated as the sum of the casewise outer product of the gradient, to
approximate the hessian matrix.
Parameters
----------
x : {'null', 'init', 'best', array-like, dict, scalar}, optional
Values for the parameters. See :ref:`set_values` for details.
start_case : int, default 0
The first case to include in the log likelihood computation. To include all
cases, start from 0 (the default).
stop_case : int, default -1
One past the last case to include in the log likelihood computation. This is processed as usual for
Python slicing and iterating, and negative values count backward from the end. To include all cases,
end at -1 (the default).
step_case : int, default 1
The step size of the case iterator to use in likelihood calculation. This is processed as usual for
Python slicing and iterating. To include all cases, step by 1 (the default).
persist : int, default False
Whether to return a variety of internal and intermediate arrays in the result dictionary.
If set to 0, only the final `ll` value is included.
leave_out, keep_only, subsample : int, optional
Settings for cross validation calculations.
If `leave_out` and `subsample` are set, then case rows where rownumber % subsample == leave_out are dropped.
If `keep_only` and `subsample` are set, then only case rows where rownumber % subsample == keep_only are used.
return_series : bool
Deprecated, no effect. Derivatives are always returned as a Series.
Returns
-------
dictx
The log likelihood is given by key 'll', the first derivative by key 'dll', and the BHHH matrix by 'bhhh'.
Other arrays are also included if `persist` is set to True.
"""
return self.loglike2(
x=x,
start_case=start_case,
stop_case=stop_case,
step_case=step_case,
persist=persist | persist_flags.PERSIST_BHHH,
leave_out=leave_out,
keep_only=keep_only,
subsample=subsample,
return_series=True,
probability_only=False,
)
def loglike(
self,
x=None,
*,
start_case=0, stop_case=-1, step_case=1,
persist=0,
leave_out=-1, keep_only=-1, subsample=-1,
probability_only=False,
):
"""
Compute a log likelihood value.
Parameters
----------
x : {'null', 'init', 'best', array-like, dict, scalar}, optional
Values for the parameters. See :ref:`set_values` for details.
start_case : int, default 0
The first case to include in the log likelihood computation. To include all
cases, start from 0 (the default).
stop_case : int, default -1
One past the last case to include in the log likelihood computation. This is processed as usual for
Python slicing and iterating, and negative values count backward from the end. To include all cases,
end at -1 (the default).
step_case : int, default 1
The step size of the case iterator to use in likelihood calculation. This is processed as usual for
Python slicing and iterating. To include all cases, step by 1 (the default).
persist : int, default 0
Whether to return a variety of internal and intermediate arrays in the result dictionary.
If set to 0, only the final `ll` value is included.
leave_out, keep_only, subsample : int, optional
Settings for cross validation calculations.
If `leave_out` and `subsample` are set, then case rows where rownumber % subsample == leave_out are dropped.
If `keep_only` and `subsample` are set, then only case rows where rownumber % subsample == keep_only are used.
probability_only : bool, default False
Compute only the probability and ignore the likelihood. If this setting is active, the
dataframes need not include the "actual" choice data.
Returns
-------
float or array or dictx
The log likelihood as a float, when `probability_only` is False and `persist` is 0.
The probability as an array, when `probability_only` is True and `persist` is 0.
A dictx is returned if `persist` is non-zero.
"""
self.__prep_for_compute(x)
pr = self.probability(
x=None,
start_case=start_case,
stop_case=stop_case,
step_case=step_case,
return_dataframe=False,
)
if probability_only:
return pr
from .mnl import loglike_from_probability
if stop_case == -1:
stop_case_ = self.n_cases
else:
stop_case_ = stop_case
if self.dataframes.data_wt is not None:
wt = self.dataframes.data_wt.iloc[start_case:stop_case_:step_case]
else:
wt = None
from ..util import dictx
y = dictx()
y.ll = loglike_from_probability(
pr,
self.dataframes.array_ch()[start_case:stop_case_:step_case],
wt
)
if start_case==0 and (stop_case==-1 or stop_case==self.n_cases) and step_case==1:
self._check_if_best(y.ll)
if persist & persist_flags.PERSIST_PROBABILITY:
y.probability = pr
if persist:
return y
return y.ll
@property
def dataframes(self):
return self._dataframes
@dataframes.setter
def dataframes(self, x):
self._dataframes = x
if self._dataframes.data_co is None and self._dataframes.data_ce is not None:
self._dataframes.data_co = pandas.DataFrame(data=None, index=self._dataframes.data_ce.index.levels[0].copy())
if self._dataframes.data_co is None and self._dataframes.data_ca is not None:
self._dataframes.data_co = pandas.DataFrame(data=None, index=self._dataframes.data_ca.index.levels[0].copy())
top_data = DataFrames(
co = x.make_idco(*self._k_membership.required_data().get('co', [])),
alt_codes=numpy.arange(1, len(self._k_models)+1),
alt_names=self._k_model_names(),
av=1,
)
# TODO: This kludge creates an empty array for the data_ch in the class membership model
# : but it is not needed except to satisfy a data integrity check on that model
# : We should instead just allow no-choice when it is a class membership model.
# top_data.data_ch = pandas.DataFrame(0, index=top_data.caseindex, columns=self._k_models.keys())
self._k_membership._model_does_not_require_choice = True
self._k_membership.dataframes = top_data
for k_name, k_model in self._k_models.items():
k_model.dataframes = DataFrames(
co=x.data_co,
ca=x.data_ca,
ce=x.data_ce,
av=x.data_av,
ch=x.data_ch,
wt=x.data_wt,
alt_names=x.alternative_names(),
alt_codes=x.alternative_codes(),
)
def mangle(self, *args, **kwargs):
self._k_membership.mangle()
for m in self._k_models.values():
m.mangle()
super().mangle(*args, **kwargs)
def unmangle(self, force=False):
super().unmangle(force=force)
if self._mangled or force:
self._k_membership.unmangle(force=force)
for m in self._k_models.values():
m.unmangle(force=force)
sync_frames(self, self._k_membership, *self._k_models.values())
self._k_membership.unmangle()
for m in self._k_models.values():
m.unmangle()
self._mangled = False
def load_data(self, dataservice=None, autoscale_weights=True, log_warnings=True):
self.unmangle()
if dataservice is not None:
self._dataservice = dataservice
if self._dataservice is not None:
dfs = self._dataservice.make_dataframes(self.required_data(), log_warnings=log_warnings)
if autoscale_weights and dfs.data_wt is not None:
dfs.autoscale_weights()
self.dataframes = dfs
else:
raise ValueError('dataservice is not defined')
def set_value(self, name, value=None, **kwargs):
if isinstance(name, ParameterRef_C):
name = str(name)
if name not in self.pf.index:
self.unmangle()
if value is not None:
# value = numpy.float64(value)
# self.frame.loc[name,'value'] = value
kwargs['value'] = value
for k,v in kwargs.items():
if k in self.pf.columns:
if self.pf.dtypes[k] == 'float64':
v = numpy.float64(v)
elif self.pf.dtypes[k] == 'float32':
v = numpy.float32(v)
elif self.pf.dtypes[k] == 'int8':
v = numpy.int8(v)
self.pf.loc[name, k] = v
# update init values when they are implied
if k=='value':
if 'initvalue' not in kwargs and pandas.isnull(self.pf.loc[name, 'initvalue']):
self.pf.loc[name, 'initvalue'] = l4_float_dtype(v)
# update null values when they are implied
if 'nullvalue' not in kwargs and pandas.isnull(self.pf.loc[name, 'nullvalue']):
self.pf.loc[name, 'nullvalue'] = 0
# refresh everything # TODO: only refresh what changed
self._k_membership._check_if_frame_values_changed()
for m in self._k_models.values():
m._check_if_frame_values_changed()
def _frame_values_have_changed(self):
self._k_membership._frame_values_have_changed()
for m in self._k_models.values():
m._frame_values_have_changed()
@property
def n_cases(self):
"""int : The number of cases in the attached dataframes."""
if self._dataframes is None:
raise MissingDataError("no dataframes are set")
return self._k_membership.n_cases
| gpl-3.0 |
boland1992/seissuite_iran | build/lib.linux-x86_64-2.7/seissuite/spectrum/extrema_pickle.py | 8 | 9300 | # -*- coding: utf-8 -*-
"""
Created on Fri July 6 11:04:03 2015
@author: boland
"""
import os
import datetime
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
from scipy import signal
from obspy import read
from scipy.signal import argrelextrema
from info_dataless import locs_from_dataless
from scipy import interpolate
from matplotlib.colors import LogNorm
import pickle
import fiona
from shapely import geometry
from shapely.geometry import asPolygon, Polygon
from math import sqrt, radians, cos, sin, asin
from info_dataless import locs_from_dataless
from descartes.patch import PolygonPatch
from matplotlib.colors import LogNorm
from scipy.spatial import ConvexHull
from scipy.cluster.vq import kmeans
from shapely.affinity import scale
from matplotlib.path import Path
from scipy.interpolate import griddata
#------------------------------------------------------------------------------
# CLASSES
#------------------------------------------------------------------------------
class InShape:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. This class uses
the shapely module.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def shape_poly(self):
with fiona.open(self.boundary) as fiona_collection:
# In this case, we'll assume the shapefile only has one later
shapefile_record = fiona_collection.next()
# Use Shapely to create the polygon
self.polygon = geometry.asShape( shapefile_record['geometry'] )
return self.polygon
def point_check(self, coord):
"""
Function that takes a single (2,1) shape input, converts the points
into a shapely.geometry.Point object and then checks if the coord
is contained within the shapefile.
"""
self.polygon = self.shape_poly()
point = geometry.Point(coord[0], coord[1])
if self.polygon.contains(point):
return coord
def shape_bounds(self):
"""
Function that returns the bounding box coordinates xmin,xmax,ymin,ymax
"""
self.polygon = self.shape_poly()
return self.polygon.bounds
def shape_buffer(self, shape=None, size=1., res=1):
"""
Function that returns a new polygon of the larger buffered points.
Can import polygon into function if desired. Default is
self.shape_poly()
"""
if shape is None:
self.polygon = self.shape_poly()
return asPolygon(self.polygon.buffer(size, resolution=res)\
.exterior)
def extract_poly_coords(self, poly):
if poly.type == 'Polygon':
exterior_coords = poly.exterior.coords[:]
elif poly.type == 'MultiPolygon':
exterior_coords = []
for part in poly:
epc = np.asarray(self.extract_poly_coords(part)) # Recursive call
exterior_coords.append(epc)
else:
raise ValueError('Unhandled geometry type: ' + repr(poly.type))
return np.vstack(exterior_coords)
def external_coords(self, shape=None, buff=None, size=1., res=1):
"""
Function that returns the external coords of a buffered shapely
polygon. Note that shape variable input
MUST be a shapely Polygon object.
"""
if shape is not None and buff is not None:
poly = self.shape_buffer(shape=shape, size=size, res=res)
elif shape is not None:
poly = shape
else:
poly = self.shape_poly()
exterior_coords = self.extract_poly_coords(poly)
return exterior_coords
#------------------------------------------------------------------------------
# IMPORT PATHS TO MSEED FILES
#------------------------------------------------------------------------------
def spectrum(tr):
wave = tr.data #this is how to extract a data array from a mseed file
fs = tr.stats.sampling_rate
#hour = str(hour).zfill(2) #create correct format for eqstring
f, Pxx_spec = signal.welch(wave, fs, 'flattop', nperseg=1024, scaling='spectrum')
#plt.semilogy(f, np.sqrt(Pxx_spec))
if len(f) >= 256:
column = np.column_stack((f[:255], np.abs(np.sqrt(Pxx_spec)[:255])))
return column
else:
return 0.
# x = np.linspace(0, 10, 1000)
# f_interp = interp1d(np.sqrt(Pxx_spec),f, kind='cubic')
#x.reverse()
#y.reverse()
# print f_interp(x)
#f,np.sqrt(Pxx_spec),'o',
# plt.figure()
# plt.plot(x,f_interp(x),'-' )
# plt.show()
def paths_sort(path):
"""
Function defined for customised sorting of the abs_paths list
and will be used in conjunction with the sorted() built in python
function in order to produce file paths in chronological order.
"""
base_name = os.path.basename(path)
stat_name = base_name.split('.')[0]
date = base_name.split('.')[1]
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
return date, stat_name
except Exception as e:
a=4
def paths(folder_path, extension):
"""
Function that returns a list of desired absolute paths called abs_paths
of files that contains a given extension e.g. .txt should be entered as
folder_path, txt. This function will run recursively through and find
any and all files within this folder with that extension!
"""
abs_paths = []
for root, dirs, files in os.walk(folder_path):
for f in files:
fullpath = os.path.join(root, f)
if os.path.splitext(fullpath)[1] == '.{}'.format(extension):
abs_paths.append(fullpath)
abs_paths = sorted(abs_paths, key=paths_sort)
return abs_paths
# import background shapefile location
shape_path = "/home/boland/Dropbox/University/UniMelb\
/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
# generate shape object
# Generate InShape class
SHAPE = InShape(shape_path)
# Create shapely polygon from imported shapefile
UNIQUE_SHAPE = SHAPE.shape_poly()
# set plotting limits for shapefile boundaries
lonmin, latmin, lonmax, latmax = SHAPE.shape_bounds()
print lonmin, latmin, lonmax, latmax
#lonmin, lonmax, latmin, latmax = SHAPE.plot_lims()
dataless_path = 'ALL_AUSTRALIA.870093.dataless'
stat_locs = locs_from_dataless(dataless_path)
#folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
extension = 'mseed'
paths_list = paths(folder_path, extension)
t0_total = datetime.datetime.now()
figs_counter = 0
pickle_file = '/storage/ANT/spectral_density/station_pds_maxima/\
S Network 2014/noise_info0_SNetwork2014.pickle'
f = open(name=pickle_file, mode='rb')
noise_info0 = pickle.load(f)
f.close()
# dump noise_info1
fig = plt.figure(figsize=(15,10), dpi=1000)
plt.title('Average Seismic Noise First Peak Maximum PDS\n S Network | 2014')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
patch = PolygonPatch(UNIQUE_SHAPE, facecolor='white',\
edgecolor='k', zorder=1)
ax = fig.add_subplot(111)
ax.add_patch(patch)
#create 5000 Random points distributed within the circle radius 100
x, y = noise_info0[:,0], noise_info0[:,1]
points = np.column_stack((x,y))
xmin, xmax = np.min(x), np.max(x)
ymin, ymax = np.min(y), np.max(y)
values = noise_info0[:,2]
#now we create a grid of values, interpolated from our random sample above
y = np.linspace(ymin, ymax, 100)
x = np.linspace(xmin, xmax, 100)
gridx, gridy = np.meshgrid(x, y)
heat_field = griddata(points, values, (gridx, gridy), method='cubic',fill_value=0)
print heat_field
heat_field = np.where(heat_field < 0, 1, heat_field)
heat_field = np.ma.masked_where(heat_field==0,heat_field)
plt.pcolor(gridx, gridy, heat_field,
cmap='rainbow',alpha=0.5, norm=LogNorm(vmin=100, vmax=3e4),
zorder=2)
plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
norm=LogNorm(vmin=100, vmax=3e4), s=35, cmap='rainbow', zorder=3)
#cmin, cmax = np.min(noise_info0[:,2]), np.max(noise_info0[:,2])
#sc = plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
# norm=LogNorm(vmin=100, vmax=3e4), s=50, cmap=cm, zorder=2)
col = plt.colorbar()
col.ax.set_ylabel('Maximum Power Density Spectrum (V RMS)')
ax.set_xlim(lonmin-0.05*abs(lonmax-lonmin), \
lonmax+0.05*abs(lonmax-lonmin))
ax.set_ylim(latmin-0.05*abs(latmax-latmin), \
latmax+0.05*abs(latmax-latmin))
fig.savefig('station_pds_maxima/check1.svg', format='SVG')
| gpl-3.0 |
GoogleCloudPlatform/mlops-on-gcp | workshops/kfp-caip-sklearn/lab-02-kfp-pipeline/trainer_image/train.py | 12 | 3253 | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Covertype Classifier trainer script."""
import pickle
import subprocess
import sys
import fire
import hypertune
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
def train_evaluate(job_dir, training_dataset_path, validation_dataset_path,
alpha, max_iter, hptune):
"""Trains the Covertype Classifier model."""
df_train = pd.read_csv(training_dataset_path)
df_validation = pd.read_csv(validation_dataset_path)
if not hptune:
df_train = pd.concat([df_train, df_validation])
numeric_features = [
'Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology',
'Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways',
'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm',
'Horizontal_Distance_To_Fire_Points'
]
categorical_features = ['Wilderness_Area', 'Soil_Type']
preprocessor = ColumnTransformer(transformers=[(
'num', StandardScaler(),
numeric_features), ('cat', OneHotEncoder(), categorical_features)])
pipeline = Pipeline([('preprocessor', preprocessor),
('classifier', SGDClassifier(loss='log'))])
num_features_type_map = {feature: 'float64' for feature in numeric_features}
df_train = df_train.astype(num_features_type_map)
df_validation = df_validation.astype(num_features_type_map)
print('Starting training: alpha={}, max_iter={}'.format(alpha, max_iter))
X_train = df_train.drop('Cover_Type', axis=1)
y_train = df_train['Cover_Type']
pipeline.set_params(classifier__alpha=alpha, classifier__max_iter=max_iter)
pipeline.fit(X_train, y_train)
if hptune:
X_validation = df_validation.drop('Cover_Type', axis=1)
y_validation = df_validation['Cover_Type']
accuracy = pipeline.score(X_validation, y_validation)
print('Model accuracy: {}'.format(accuracy))
# Log it with hypertune
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy', metric_value=accuracy)
# Save the model
if not hptune:
model_filename = 'model.pkl'
with open(model_filename, 'wb') as model_file:
pickle.dump(pipeline, model_file)
gcs_model_path = '{}/{}'.format(job_dir, model_filename)
subprocess.check_call(['gsutil', 'cp', model_filename, gcs_model_path],
stderr=sys.stdout)
print('Saved model in: {}'.format(gcs_model_path))
if __name__ == '__main__':
fire.Fire(train_evaluate)
| apache-2.0 |
toolgirl/irish-folk-motifs | src/tune_fragment.py | 1 | 2261 | # Copyright (C) 2017 Zia Rauwolf. See LICENSE.txt
import subprocess
import pandas as pd
from tempfile import mkdtemp
import os.path
class TuneFragment(object):
"""
Takes in a row of a pandas DataFrame() or a dictionary and plays it as midi.
"""
def __init__(self, data):
"""
Creates a .abc file and plays it with timidity from a text file using abc2midi.
Parameters
----------
data : .txt
Text file that contains abc notation without any of the required notations for .abc.
Attributes
----------
_temp_dir : tempfile
Temporary directory to put the working files in.
_abc_file : abc file
Path and file for temporary abc file.
_midi_file : midi file
Path and name for temporary midi file.
"""
self.data = data
self._temp_dir = mkdtemp()
self._abc_file = os.path.join(self._temp_dir, 'current.abc')
self._midi_file = os.path.join('/Users/zia/galvanize', 'current.mid')
self.create_abcfile()
self._convert_abc_2_midi()
def play(self):
"""
Takes the current data and plays it.
"""
subprocess.call(['timidity', self._midi_file])
def create_abcfile(self, filename=None):
'''
Writes the necessary lines to convert .txt to .abc.
'''
if filename is None:
filename = self._abc_file
else:
self._abc_file = filename
with open(filename, 'w') as f:
f.write('X:1\n')
f.write('M:{}\n'.format(self.data['meter']))
f.write('L:1/16\n'.format())
if 'type' in self.data:
f.write('T:{}\n'.format(self.data['type']))
f.write('K:{}\n'.format(self.data['mode']))
f.write('%%MIDI program 73\n') #plays flute (piano is program 0.)
f.write(self.data['abc'])
def _convert_abc_2_midi(self):
'''
INPUT: .abc file
Takes a .abc with at least an X: and a K: and a minimum fragment of
one bar if necessary padded with z's that will still play
'''
subprocess.call(['abc2midi', self._abc_file, '-BF','-o', self._midi_file])
| gpl-3.0 |
xiaoxiamii/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
ilyes14/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
chiotlune/ext | gnuradio-3.7.0.1/gr-filter/examples/channelize.py | 58 | 7003 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 1000 # initial sampling rate
self._M = M = 9 # Number of channels to channelize
self._ifs = M*self._fs # initial sampling rate
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = blocks.add_cc()
freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80]
for i in xrange(len(freqs)):
f = freqs[i] + (M/2-M+i+1)*self._fs
self.signals.append(analog.sig_source_c(self._ifs, analog.GR_SIN_WAVE, f, 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._ifs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(X))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-2.0 |
hgrif/incubator-airflow | airflow/hooks/hive_hooks.py | 5 | 28558 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from six.moves import zip
from past.builtins import basestring
import unicodecsv as csv
import itertools
import re
import subprocess
import time
from tempfile import NamedTemporaryFile
import hive_metastore
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.helpers import as_flattened_list
from airflow.utils.file import TemporaryDirectory
from airflow import configuration
import airflow.security.utils as utils
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: string
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: string
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: string
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if configuration.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = jdbc_url.format(**locals())
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _prepare_hiveconf(self, d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
hive_conf_params = self._prepare_hiveconf(hive_conf)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue)])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
self.log.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
l = int(error_loc.group(1))
begin = max(l-2, 0)
end = min(l+3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df,
table,
create=True,
recreate=False,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param field_dict: mapping from column name to hive data type
:type field_dict: dict
:param encoding: string encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
return dict((col, DTYPE_KIND_HIVE_TYPE[dtype.kind]) for col, dtype in df.dtypes.iteritems())
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
if field_dict is None and (create or recreate):
field_dict = _infer_field_types_from_df(df)
df.to_csv(f, sep=delimiter, **pandas_kwargs)
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values
:type field_dict: dict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n"
hql += ";"
hql = hql.format(**locals())
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
self.log.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.get('core', 'security') == 'kerberos' and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
self.metastore._oprot.trans.open()
try:
self.metastore.get_partition_by_name(
schema, table, partition_name)
return True
except hive_metastore.ttypes.NoSuchObjectException:
return False
finally:
self.metastore._oprot.trans.close()
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
"""
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
"""
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
t = self.get_table(table_name, db)
return True
except Exception as e:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the impyla library
Note that the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI as in
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'PLAIN')
kerberos_service_name = None
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# impyla uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'KERBEROS':
self.log.warning(
"Detected deprecated 'KERBEROS' for authMechanism for %s. Please use 'GSSAPI' instead",
self.hiveserver2_conn_id
)
auth_mechanism = 'GSSAPI'
from impala.dbapi import connect
return connect(
host=db.host,
port=db.port,
auth_mechanism=auth_mechanism,
kerberos_service_name=kerberos_service_name,
user=db.login,
database=schema or db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
from impala.error import ProgrammingError
with self.get_conn(schema) as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
cur = conn.cursor()
for statement in hql:
cur.execute(statement)
records = []
try:
# impala Lib raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
records = cur.fetchall()
except ProgrammingError:
self.log.debug("get_results returned no records")
if records:
results = {
'data': records,
'header': cur.description,
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000):
schema = schema or 'default'
with self.get_conn(schema) as conn:
with conn.cursor() as cur:
self.log.info("Running query: %s", hql)
cur.execute(hql)
schema = cur.description
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
if output_header:
writer.writerow([c[0] for c in cur.description])
i = 0
while True:
rows = [row for row in cur.fetchmany(fetch_size) if row]
if not rows:
break
writer.writerows(rows)
i += len(rows)
self.log.info("Written %s rows so far.", i)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(self, hql, schema='default'):
"""
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 |
shahankhatch/scikit-learn | sklearn/linear_model/least_angle.py | 37 | 53448 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (alphas_[alphas_ >
0.].min() when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (alphas_[alphas_ >
0.].min() when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (alphas_[alphas_ >
0.].min() when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (alphas_[alphas_ >
0.].min() when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
madjelan/CostSensitiveClassification | costcla/models/regression.py | 1 | 9231 | """
This module include the cost-sensitive logistic regression method.
"""
# Authors: Alejandro Correa Bahnsen <[email protected]>
# License: BSD 3 clause
import numpy as np
import math
from scipy.optimize import minimize
from sklearn.base import BaseEstimator
# from sklearn.linear_model.logistic import _intercept_dot
from pyea import GeneticAlgorithmOptimizer
from ..metrics import cost_loss
# Not in sklearn 0.15, is in 0.16-git
#TODO: replace once sklearn 0.16 is release
# The one in sklearn 0.16 return yz instead of z, therefore,
# the impact on the code should be addressed before making the change.
def _intercept_dot(w, X):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = np.dot(X, w) + c
return w, c, z
def _sigmoid(z):
""" Private function that calculate the sigmoid function """
return 1 / (1 + np.exp(-z))
def _logistic_cost_loss_i(w, X, y, cost_mat, alpha):
n_samples = X.shape[0]
w, c, z = _intercept_dot(w, X)
y_prob = _sigmoid(z)
out = cost_loss(y, y_prob, cost_mat) / n_samples
out += .5 * alpha * np.dot(w, w)
return out
def _logistic_cost_loss(w, X, y, cost_mat, alpha):
"""Computes the logistic loss.
Parameters
----------
w : array-like, shape (n_w, n_features,) or (n_w, n_features + 1,)
Coefficient vector or matrix of coefficient.
X : array-like, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
Returns
-------
out : float
Logistic loss.
"""
if w.shape[0] == w.size:
# Only evaluating one w
return _logistic_cost_loss_i(w, X, y, cost_mat, alpha)
else:
# Evaluating a set of w
n_w = w.shape[0]
out = np.zeros(n_w)
for i in range(n_w):
out[i] = _logistic_cost_loss_i(w[i], X, y, cost_mat, alpha)
return out
class CostSensitiveLogisticRegression(BaseEstimator):
"""A example-dependent cost-sensitive Logistic Regression classifier.
Parameters
----------
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
max_iter : int
Useful only for the ga and bfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'ga', 'bfgs'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
verbose : int, optional (default=0)
Controls the verbosity of the optimization process.
Attributes
----------
`coef_` : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
`intercept_` : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
See also
--------
sklearn.tree.DecisionTreeClassifier
References
----------
.. [1] A. Correa Bahnsen, D.Aouada, B, Ottersten,
`"Example-Dependent Cost-Sensitive Logistic Regression for Credit Scoring" <http://albahnsen.com/files/Example-Dependent%20Cost-Sensitive%20Logistic%20Regression%20for%20Credit%20Scoring_publish.pdf>`__,
in Proceedings of the International Conference on Machine Learning and Applications,
, 2014.
Examples
--------
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.cross_validation import train_test_split
>>> from costcla.datasets import load_creditscoring2
>>> from costcla.models import CostSensitiveLogisticRegression
>>> from costcla.metrics import savings_score
>>> data = load_creditscoring2()
>>> sets = train_test_split(data.data, data.target, data.cost_mat, test_size=0.33, random_state=0)
>>> X_train, X_test, y_train, y_test, cost_mat_train, cost_mat_test = sets
>>> y_pred_test_lr = LogisticRegression(random_state=0).fit(X_train, y_train).predict(X_test)
>>> f = CostSensitiveLogisticRegression()
>>> f.fit(X_train, y_train, cost_mat_train)
>>> y_pred_test_cslr = f.predict(X_test)
>>> # Savings using Logistic Regression
>>> print savings_score(y_test, y_pred_test_lr, cost_mat_test)
0.00283419465107
>>> # Savings using Cost Sensitive Logistic Regression
>>> print savings_score(y_test, y_pred_test_cslr, cost_mat_test)
0.142872237978
"""
def __init__(self,
C=1.0,
fit_intercept=True,
max_iter=100,
random_state=None,
solver='ga',
tol=1e-4,
verbose=0):
self.C = C
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.random_state = random_state
self.solver = solver
self.tol = tol
self.coef_ = None
self.intercept_ = 0.
self.verbose = verbose
def fit(self, X, y, cost_mat):
""" Build a example-dependent cost-sensitive logistic regression from the training set (X, y, cost_mat)
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
y : array indicator matrix
Ground truth (correct) labels.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
Returns
-------
self : object
Returns self.
"""
#TODO: Check input
n_features = X.shape[1]
if self.fit_intercept:
w0 = np.zeros(n_features + 1)
else:
w0 = np.zeros(n_features)
if self.solver == 'ga':
#TODO: add n_jobs
res = GeneticAlgorithmOptimizer(_logistic_cost_loss,
w0.shape[0],
iters=self.max_iter,
type_='cont',
n_chromosomes=100,
per_mutations=0.25,
n_elite=10,
fargs=(X, y, cost_mat, 1. / self.C),
range_=(-5, 5),
n_jobs=1,
verbose=self.verbose)
res.fit()
elif self.solver == 'bfgs':
if self.verbose > 0:
disp = True
else:
disp = False
res = minimize(_logistic_cost_loss,
w0,
method='BFGS',
args=(X, y, cost_mat, 1. / self.C),
tol=self.tol,
options={'maxiter': self.max_iter, 'disp': disp})
if self.fit_intercept:
self.coef_ = res.x[:-1]
self.intercept_ = res.x[-1]
else:
self.coef_ = res.x
def predict_proba(self, X):
"""Probability estimates.
The returned estimates.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, 2]
Returns the probability of the sample for each class in the model.
"""
y_prob = np.zeros((X.shape[0], 2))
y_prob[:, 1] = _sigmoid(np.dot(X, self.coef_) + self.intercept_)
y_prob[:, 0] = 1 - y_prob[:, 1]
return y_prob
def predict(self, X, cut_point=0.5):
"""Predicted class.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples]
Returns the prediction of the sample..
"""
return np.floor(self.predict_proba(X)[:, 1] + (1 - cut_point))
| bsd-3-clause |
LeBarbouze/tunacell | scripts/univariate-analysis-2.py | 1 | 10791 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
script: univariate-analysis-2.py
followinf univariate-analysis.py, used to explore more features of univariate
analysis
In this script we show how to load/compute univariate analysis on other
types of observables, and how to build good-looking autocorrelation functions
by using the stationary hypothesis, where only time differences matter, which
in practice increases the effective sample size.
"""
from __future__ import print_function
from builtins import input
import argparse
import time
from tqdm import tqdm
import matplotlib.pyplot as plt
from tunacell import Experiment, Observable, FilterSet
from tunacell.base.observable import FunctionalObservable
from tunacell.filters.cells import FilterCellIDparity
from tunacell.stats.api import (compute_univariate, load_univariate,
compute_stationary, load_stationary, NoValidTimes)
from tunacell.stats.single import UnivariateIOError, StationaryUnivariateIOError
from tunacell.stats.utils import Regions, CompuParams
from tunacell.plotting.dynamics import plot_onepoint, plot_twopoints, plot_stationary
# close all open plots
plt.close('all')
# Arguments
argparser = argparse.ArgumentParser()
argparser.add_argument('-e', '--experiment', type=str,
help='Path to experiment root folder',
default='~/tmptunacell/simutest')
argparser.add_argument('-i', '--interactive',
help='Ask user to press Enter between parts',
action='store_true')
argparser.add_argument('--time', type=float,
help='Time per figure when non-interactive mode is on',
default=3)
args = argparser.parse_args()
single_plot_timing = args.time
msg = ('==============tunacell=tutorial==============\n'
'== ==\n'
'== Univariate analysis (2/2) ==\n'
'== ==\n'
'== This tutorial shows more details about ==\n'
'== the univariate analysis (statistics of ==\n'
'== single, dynamic observable): ==\n'
'== * import/export of results ==\n'
'== * details of stationary analysis ==\n'
'== * time-lapse, cell cycle observables ==\n'
'== (refer to comments in code to get more ==\n'
'== details) ==\n'
'== ==\n'
'==============tunacell=tutorial==============\n')
print(msg)
print()
# =============================================================================
# We start with the same settings as in univariate-analysis.py.
# We first load the univariate analysis that we performed, and exported
# in univariate-anbalysis.py (please run that script before starting this one)
# =============================================================================
msg = 'Loading experiment with evenID condition (from part 1/2)'
dashes = len(msg) * '*'
print(msg + '\n' + dashes)
# define the exp instance, no filter applied
path_to_exp = args.experiment
exp = Experiment(path_to_exp)
# define a condition
even = FilterCellIDparity('even')
condition = FilterSet(label='evenID', filtercell=even)
ou = Observable(name='exact-growth-rate', raw='ou')
# Reference values
md = exp.metadata
params = md['ornstein_uhlenbeck_params']
ref_mean = params['target']
ref_var = params['noise']/(2 * params['spring'])
ref_decayrate = params['spring']
print('Loading univariate results (computed and exported in part 1/2)')
# loading univariate analysis for the ou observable
univariate = load_univariate(exp, ou, cset=[condition, ])
# =============================================================================
# The last command would raise an exception of type UnivariateIOError if
# computations had not been exported before. We can use this property
# to try loading results, and if it fails, start the computation.
# Below we do so for a few other observables
# =============================================================================
# TIME-LAPSE OBSERVABLES (time-series per cell)
print('Defining a bunch of observables, time-lapse, and cell-cycle')
# local estimate of growth rate by using the differentiation of size measurement
# (the raw column 'exp_ou_int' plays the role of cell size in our simulations)
gr = Observable(name='approx-growth-rate', raw='exp_ou_int',
differentiate=True, scale='log',
local_fit=True, time_window=15.)
# dynamic, functional observable: twice the growth rate
ou2 = FunctionalObservable(name='double-growth-rate', f=lambda x : 2 * x, observables=[ou, ])
# time-aligned upon root cell division for size analysis
# fixing tref allows to align timeseries to a common origin; the 'root' option
# means that it will be aligned to each colony root cell division time
size = Observable(name='size', raw='exp_ou_int', tref='root')
continuous_obs = [ou, gr, ou2, size]
# SOME CELL-CYCLE TYPE OBSERVABLES (one value per cell)
# cell-cycle average growth rate
average_gr = Observable(name='average-growth-rate', raw='ou',
differentiate=False, scale='linear',
local_fit=False, mode='average', timing='g')
# size at cell division
division_size = Observable(name='division-size', raw='exp_ou_int',
differentiate=False, scale='log',
local_fit=False, mode='division', timing='g')
# increase in cell size timed at division time
increase = Observable(name='added-size', raw='exp_ou_int',
mode='net-increase-additive', timing='d')
cycle_obs = [average_gr, division_size, increase]
# Start computations
univariates_store = {}
figs = []
msg = 'Computing dynamic univariate statistics...'
dashes = len(msg) * '*'
print(msg + '\n' + dashes)
for obs in continuous_obs + cycle_obs:
print('* {} ...'.format(obs.name))
try:
univ = load_univariate(exp, obs, cset=[condition, ])
except UnivariateIOError:
univ = compute_univariate(exp, obs, cset=[condition, ])
univ.export_text() # save as text files
# store univariate object in a dic indexed by observable
univariates_store[obs] = univ
# some options for plotting functions
trefs = [40., 80., 150.]
grefs = [1, 2]
if obs in [ou, gr]:
kwargs = {'mean_ref': ref_mean,
'var_ref': ref_var}
kwargs2 = {'show_exp_decay': ref_decayrate,
'trefs': trefs}
elif obs in [ou2, ]:
kwargs = {'mean_ref': 2 * ref_mean,
'var_ref': 4 * ref_var}
kwargs2 = {'show_exp_decay': ref_decayrate,
'trefs': trefs}
elif obs in [size, increase]:
kwargs = {}
kwargs2 = {'trefs': trefs}
elif obs in [average_gr, ]:
kwargs = {'mean_ref': ref_mean}
kwargs2 = {'trefs': grefs}
else:
kwargs = {}
kwargs2 = {'trefs': grefs}
# print('Ok')
fig = plot_onepoint(univ, show_ci=True, save=True, verbose=False, **kwargs)
fig.show()
figs.append(fig)
fig2 = plot_twopoints(univ, save=True, verbose=False, **kwargs2)
# figs.append(fig2) # commented: too much figures
if args.interactive:
ans = input('Press Enter to close these figures and proceed to stationary autocorrelation analysis')
else:
for seconds in tqdm(range(10*len(figs)), desc='waiting'):
time.sleep(single_plot_timing/10)
plt.close('all')
# =============================================================================
# A look at the onepoint functions allows the user to identify regions of time
# where the process looks stationary. There is a function to define such
# regions, and in fact, we already use one in the previous computations,
# defined by default: the region 'ALL' that comprises all time values.
# Here we mention the used region explicitely, and we stick to the 'ALL'
# region since we find the process stationary on the entire time course
# =============================================================================
regions = Regions(exp)
regions.reset() # eliminate all regions except 'ALL'
steady_region = regions.get('ALL')
# and we need to use some computation options (more on that elsewhere)
# define computation options
options = CompuParams() # leaving to default is safe
# =============================================================================
# Now we proceed in the same way: try to load, if it fails, compute.
# We call the plotting function accordingly.
# =============================================================================
msg = 'Computing stationary autocorrelation functions...'
dashes = len(msg) * '*'
print(msg + '\n' + dashes)
figs = []
for obs in continuous_obs + cycle_obs:
print('* {} ...'.format(obs.name))
# need the univariate object to compute stationary statistics
univ = univariates_store[obs]
try:
stat = load_stationary(univ, steady_region, options)
except StationaryUnivariateIOError:
try:
stat = compute_stationary(univ, steady_region, options)
stat.export_text() # save as text files
except NoValidTimes:
stat = None
# print('Ok')
# plotting features
if obs in [ou, gr, ou2]:
kwargs = {'show_exp_decay': ref_decayrate}
else:
kwargs = {}
if stat is not None:
fig = plot_stationary(stat, save=True, verbose=False, **kwargs)
fig.show()
figs.append(fig)
if args.interactive:
ans = input('Press Enter to close these figures and proceed')
else:
for seconds in tqdm(range(10*len(figs)), desc='waiting'):
time.sleep(single_plot_timing/10)
plt.close('all')
# =============================================================================
# For the sake of demonstration, we define here another, smaller region
# =============================================================================
msg = 'Selecting a smaller region of time'
dashes = len(msg) * '*'
print(msg + '\n' + dashes)
regions.add(name='beginning', tmin=0., tmax=100.)
reg = regions.get('beginning')
# we just start the computation for exact-growth-rate
univ = univariates_store[ou]
try:
stat = load_stationary(univ, reg, options)
except StationaryUnivariateIOError:
stat = compute_stationary(univ, reg, options)
stat.export_text()
fig = plot_stationary(stat, save=True, verbose=False, show_exp_decay=ref_decayrate)
fig.show()
if args.interactive:
ans = input('Press Enter to close these figures and terminate script')
else:
for seconds in tqdm(range(10), desc='waiting'):
time.sleep(single_plot_timing/10)
plt.close('all')
| mit |
vsmolyakov/opt | bayes_opt/bayes_opt_sklearn.py | 1 | 3486 | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import gridspec
from sklearn.datasets import make_classification
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.svm import SVC
from bayes_opt import BayesianOptimization
np.random.seed(0)
# Load data set and target values
data, target = make_classification(
n_samples=1000,
n_features=45,
n_informative=12,
n_redundant=7
)
def svccv(gamma):
val = cross_val_score(
SVC(gamma=gamma, random_state=0),
data, target, 'f1', cv=2
).mean()
return val
def rfccv(n_estimators, max_depth):
val = cross_val_score(
RFC(n_estimators=int(n_estimators),
max_depth=int(max_depth),
random_state=0
),
data, target, 'f1', cv=2
).mean()
return val
def posterior(bo, x, xmin=-2, xmax=10):
xmin, xmax = -2, 10
bo.gp.fit(bo.X, bo.Y)
mu, sigma = bo.gp.predict(x, return_std=True)
return mu, sigma
def plot_gp(bo, x):
fig = plt.figure(figsize=(16, 10))
fig.suptitle('Gaussian Process and Utility Function After {} Steps'.format(len(bo.X)), fontdict={'size':30})
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
mu, sigma = posterior(bo, x)
axis.plot(bo.X.flatten(), bo.Y, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x, mu, '--', color='k', label='Prediction')
axis.fill(np.concatenate([x, x[::-1]]),
np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]),
alpha=.6, fc='c', ec='None', label='95% confidence interval')
axis.set_xlim((0, 0.1))
axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':20})
axis.set_xlabel('x', fontdict={'size':20})
utility = bo.util.utility(x, bo.gp, 0)
acq.plot(x, utility, label='Utility Function', color='purple')
acq.plot(x[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
acq.set_xlim((0, 0.1))
acq.set_ylim((0, np.max(utility) + 0.5))
acq.set_ylabel('Utility', fontdict={'size':20})
acq.set_xlabel('x', fontdict={'size':20})
axis.legend()
acq.legend()
if __name__ == "__main__":
gp_params = {"alpha": 1e-5}
#SVM
svcBO = BayesianOptimization(svccv,
{'gamma': (0.00001, 0.1)})
svcBO.maximize(init_points=3, n_iter=4, **gp_params)
#Random Forest
rfcBO = BayesianOptimization(
rfccv,
{'n_estimators': (10, 300),
'max_depth': (2, 10)
}
)
rfcBO.explore({'max_depth': [2, 4, 6], 'n_estimators': [64, 128, 256]})
rfcBO.maximize(init_points=4, n_iter=4, **gp_params)
print('Final Results')
print('SVC: %f' % svcBO.res['max']['max_val'])
print('RFC: %f' % rfcBO.res['max']['max_val'])
#visualize results
x = np.linspace(0.00001,0.1,64).reshape(-1,1)
plot_gp(svcBO,x)
plt.show()
rfc_X = map(lambda x: round(x,0), rfcBO.X[:,0])
rfc_Y = map(lambda x: round(x,0), rfcBO.X[:,1])
data = pd.DataFrame(data={'n_est':rfc_X,'max_depth':rfc_Y,'score':rfcBO.Y})
data = data.pivot(index='n_est', columns='max_depth', values='score')
sns.heatmap(data)
plt.show()
| mit |
yavalvas/yav_com | build/matplotlib/doc/utils/pylab_names.py | 4 | 1632 | from __future__ import print_function
"""
autogenerate some tables for pylab namespace
"""
from pylab import *
d = locals()
keys = d.keys()
keys.sort()
modd = dict()
for k in keys:
o = d[k]
if not callable(o):
continue
doc = getattr(o, '__doc__', None)
if doc is not None:
doc = ' - '.join([line for line in doc.split('\n') if line.strip()][:2])
mod = getattr(o, '__module__', None)
if mod is None:
mod = 'unknown'
if mod is not None:
if mod.startswith('matplotlib'):
if k[0].isupper():
k = ':class:`~%s.%s`'%(mod, k)
else:
k = ':func:`~%s.%s`'%(mod, k)
mod = ':mod:`%s`'%mod
elif mod.startswith('numpy'):
#k = '`%s <%s>`_'%(k, 'http://scipy.org/Numpy_Example_List_With_Doc#%s'%k)
k = '`%s <%s>`_'%(k, 'http://sd-2116.dedibox.fr/pydocweb/doc/%s.%s'%(mod, k))
if doc is None: doc = 'TODO'
mod, k, doc = mod.strip(), k.strip(), doc.strip()[:80]
modd.setdefault(mod, []).append((k, doc))
mods = modd.keys()
mods.sort()
for mod in mods:
border = '*'*len(mod)
print(mod)
print(border)
print()
funcs, docs = zip(*modd[mod])
maxfunc = max([len(f) for f in funcs])
maxdoc = max(40, max([len(d) for d in docs]) )
border = ' '.join(['='*maxfunc, '='*maxdoc])
print(border)
print(' '.join(['symbol'.ljust(maxfunc), 'description'.ljust(maxdoc)]))
print(border)
for func, doc in modd[mod]:
row = ' '.join([func.ljust(maxfunc), doc.ljust(maxfunc)])
print(row)
print(border)
print()
#break
| mit |
akhilaananthram/nupic.research | sequence_prediction/continuous_sequence/processNN5dataset.py | 1 | 1892 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import pandas as pd
import csv
def saveSeriesToCSV(fileName, dataSet):
outputFile = open(fileName,"w")
csvWriter = csv.writer(outputFile)
csvWriter.writerow(['date', 'data'])
csvWriter.writerow(['datetime', 'float'])
csvWriter.writerow(['T', ''])
for r in range(len(dataSet)):
csvWriter.writerow([str(dataSet.index[r]), dataSet[r]])
outputFile.close()
df = pd.read_excel('./data/NN5dataset.xlsx', header=0, skiprows=[1, 2, 3], index_col=0)
(numRec, numFile) = df.shape
numRecTrain = 735
# break into series and train/test dataset
for i in range(numFile):
dataSetName = df.columns[i]
print " save data: ", dataSetName
dataSet = pd.Series(df[dataSetName])
trainfileName = './data/NN5/' + dataSetName + '.csv'
testfileName = './data/NN5/' + dataSetName + '_cont.csv'
saveSeriesToCSV(trainfileName, dataSet[:numRecTrain])
saveSeriesToCSV(testfileName, dataSet[numRecTrain:])
| gpl-3.0 |
nmayorov/scikit-learn | sklearn/grid_search.py | 8 | 38406 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module has been deprecated in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
mikebenfield/scikit-learn | sklearn/mixture/dpgmm.py | 25 | 35852 | """Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos ([email protected])
# Bertrand Thirion <[email protected]>
#
# Based on mixture.py by:
# Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp, pinvh, squared_norm, stable_cumsum
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import _GMMBase
@deprecated("The function digamma is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.digamma instead.")
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
@deprecated("The function gammaln is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.gammaln instead.")
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
@deprecated("The function log_normalize is deprecated in 0.18 and "
"will be removed in 0.20.")
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
@deprecated("The function wishart_log_det is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
@deprecated("The function wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approximation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class _DPGMMBase(_GMMBase):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <dpgmm>`.
Parameters
----------
n_components : int, default 1
Number of mixture components.
covariance_type : string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha : float, default 1
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
super(_DPGMMBase, self).__init__(n_components, covariance_type,
random_state=random_state,
tol=tol, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params,
verbose=verbose)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = stable_cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def _fit(self, X, y=None):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.random_state_ = check_random_state(self.random_state)
# initialization step
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state_).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
curr_logprob, z = self.score_samples(X)
current_log_likelihood = (
curr_logprob.mean() + self._logprior(z) / n_samples)
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
if self.n_iter == 0:
# Need to make sure that there is a z value to output
# Output zeros because it was just a quick initialization
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
@deprecated("The `DPGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be "
"removed in 0.20.")
class DPGMM(_DPGMMBase):
"""Dirichlet Process Gaussian Mixture Models
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with
parameter ``weight_concentration_prior_type='dirichlet_process'``
instead.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
super(DPGMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
alpha=alpha, random_state=random_state, tol=tol, verbose=verbose,
min_covar=min_covar, n_iter=n_iter, params=params,
init_params=init_params)
@deprecated("The `VBGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. "
"VBGMM is deprecated in 0.18 and will be removed in 0.20.")
class VBGMM(_DPGMMBase):
"""Variational Inference for the Gaussian Mixture Model
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with parameter
``weight_concentration_prior_type='dirichlet_distribution'`` instead.
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <vbgmm>`.
Parameters
----------
n_components : int, default 1
Number of mixture components.
covariance_type : string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha : float, default 1
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Infinite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
tol=tol, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = alpha
def _fit(self, X, y=None):
"""Estimate model parameters with the variational algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you just would like to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.alpha_ = float(self.alpha) / self.n_components
return super(VBGMM, self)._fit(X, y)
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha_ + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha_ * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha_)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha_))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha_)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
| bsd-3-clause |
alexsavio/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
TshepangRas/tshilo-dikotla | tshilo_dikotla/views/statistics_view.py | 1 | 5407 | import asyncio
import pandas as pd
import json
import pytz
from datetime import date, datetime
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http.response import HttpResponse
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from edc_base.views import EdcBaseViewMixin
from edc_constants.constants import CLOSED, NO, YES
from edc_sync.models.outgoing_transaction import OutgoingTransaction
from call_manager.models import Call
from td_maternal.models import MaternalConsent, PotentialCall
tz = pytz.timezone(settings.TIME_ZONE)
class StatisticsView(EdcBaseViewMixin, TemplateView):
template_name = 'tshilo_dikotla/home.html'
def __init__(self):
self._response_data = {}
self.columns = [
'consented',
'consented_today',
'contacted_retry',
'contacted_today',
'not_consented',
'not_contacted',
'consent_verified',
'pending_transactions',
'potential_calls']
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(
title=settings.PROJECT_TITLE,
project_name=settings.PROJECT_TITLE,
)
return context
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(StatisticsView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
if request.is_ajax():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop = asyncio.get_event_loop()
future_a = asyncio.Future()
future_b = asyncio.Future()
future_c = asyncio.Future()
tasks = [
self.potential_call_data(future_a),
self.transaction_data(future_b),
self.contact_data(future_c),
]
loop.run_until_complete(asyncio.wait(tasks))
self.response_data.update(future_a.result())
self.response_data.update(future_b.result())
self.response_data.update(future_c.result())
loop.close()
return HttpResponse(json.dumps(self.response_data), content_type='application/json')
return self.render_to_response(context)
@asyncio.coroutine
def contact_data(self, future):
response_data = {}
calls = Call.objects.filter(call_attempts__gte=1)
if calls:
response_data.update(contacted_retry=calls.exclude(call_status=CLOSED).count())
calls.filter(**self.modified_option)
if calls:
response_data.update(contacted_today=calls.count())
future.set_result(self.verified_response_data(response_data))
@asyncio.coroutine
def transaction_data(self, future):
response_data = {}
tx = OutgoingTransaction.objects.filter(is_consumed_server=False)
if tx:
response_data.update(pending_transactions=tx.count())
future.set_result(self.verified_response_data(response_data))
@asyncio.coroutine
def potential_call_data(self, future):
response_data = {}
columns = ['id', 'contacted', 'consented', 'modified']
qs = PotentialCall.objects.values_list(*columns).all()
# columns = ['id', 'consented', 'modified']
# qs = MaternalConsent.objects.values_list(*columns).all()
potential_calls = pd.DataFrame(list(qs), columns=columns)
if not potential_calls.empty:
response_data.update({
'potential_calls': int(potential_calls['id'].count()),
'not_contacted': int(potential_calls.query('contacted == False')['contacted'].count()),
'not_consented': int(potential_calls.query('consented == False')['consented'].count()),
'consented': int(potential_calls.query('consented == True')['consented'].count()),
'consent_verified': int(potential_calls.query('consented == True')['consented'].count()),
})
d = date.today()
local_date = tz.localize(datetime(d.year, d.month, d.day, 0, 0, 0))
potential_calls = potential_calls[(potential_calls['modified'] >= local_date)]
response_data.update({
'contacted_today': int(potential_calls.query('contacted == True')['contacted'].count()),
'consented_today': int(potential_calls.query('consented == True')['consented'].count()),
})
future.set_result(self.verified_response_data(response_data))
@property
def modified_option(self):
d = date.today()
local_date = tz.localize(datetime(d.year, d.month, d.day, 0, 0, 0))
return {'modified__gte': local_date}
def verified_response_data(self, response_data):
diff = set(response_data.keys()).difference(set(self.response_data.keys()))
if diff:
raise KeyError('Invalid key or keys in response data dictionary. Got {}'.format(diff))
return response_data
@property
def response_data(self):
if not self._response_data:
self._response_data = dict(zip(self.columns, len(self.columns) * [0]))
return self._response_data
| gpl-2.0 |
qiime2/q2-types | q2_types/per_sample_sequences/_util.py | 1 | 13180 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import gzip
import os
import shutil
import pandas as pd
import qiime2.util
import skbio
import yaml
# Note: we DI all of the formats into these utils so that we don't wind
# up in circular import mayhem. That is all.
def _parse_casava_filename(path, parse_lane=True):
directions = ['forward', 'reverse']
filename = str(path).replace('.fastq.gz', '')
if parse_lane:
sample_id, barcode_id, lane_number, read_number, _ = \
filename.rsplit('_', maxsplit=4)
else:
sample_id, barcode_id, read_number, _ = \
filename.rsplit('_', maxsplit=3)
read_number = int(read_number[1:])
lane_number = int(lane_number[1:]) if parse_lane else 1
direction = directions[read_number - 1]
return sample_id, barcode_id, lane_number, read_number, direction
def _single_lane_per_sample_fastq_helper(dirfmt, output_cls, manifest_fmt,
fastq_fmt, yaml_fmt, parse_lane=True):
result = output_cls()
manifest = manifest_fmt()
manifest_fh = manifest.open()
manifest_fh.write('sample-id,filename,direction\n')
for path, view in dirfmt.sequences.iter_views(fastq_fmt):
parsed = _parse_casava_filename(path, parse_lane)
sample_id, barcode_id, lane_number, read_number, direction = parsed
result.sequences.write_data(view, fastq_fmt, sample_id=sample_id,
barcode_id=barcode_id,
lane_number=lane_number,
read_number=read_number)
filepath = result.sequences.path_maker(sample_id=sample_id,
barcode_id=barcode_id,
lane_number=lane_number,
read_number=read_number)
name = filepath.name
manifest_fh.write('%s,%s,%s\n' % (sample_id, name, direction))
manifest_fh.close()
result.manifest.write_data(manifest, manifest_fmt)
metadata = yaml_fmt()
metadata.path.write_text(yaml.dump({'phred-offset': 33}))
result.metadata.write_data(metadata, yaml_fmt)
return result
def _dirfmt_to_casava(dirfmt_in, manifest_fmt, abs_manifest_fmt, fastq_fmt,
casava_fmt):
dirfmt_out = casava_fmt()
for fastq, _ in dirfmt_in.sequences.iter_views(fastq_fmt):
from_fp = str(dirfmt_in.path / fastq.name)
to_fp = str(dirfmt_out.path / fastq.name)
qiime2.util.duplicate(from_fp, to_fp)
return dirfmt_out
def _parse_and_validate_manifest(manifest_fh, single_end, absolute,
abs_manifest_fmt, manifest_fmt):
try:
manifest = pd.read_csv(manifest_fh, comment='#', header=0,
skip_blank_lines=True, dtype=object)
except Exception as e:
raise ValueError('There was an issue parsing the manifest '
'file as CSV:\n %s' % e)
expected_header = (abs_manifest_fmt.EXPECTED_HEADER if
absolute else manifest_fmt.EXPECTED_HEADER)
_validate_header(manifest, expected_header)
for idx in manifest.index:
record = manifest.loc[idx]
if record.isnull().any():
raise ValueError('Empty cells are not supported in '
'manifest files. Found one or more '
'empty cells in this record: %s'
% ','.join(map(str, record)))
record[expected_header[1]] = \
os.path.expandvars(record[expected_header[1]])
path = record[expected_header[1]]
if absolute:
if not os.path.isabs(path):
raise ValueError('All paths provided in manifest must be '
'absolute but found relative path: %s' % path)
else:
if os.path.isabs(path):
raise ValueError('All paths provided in manifest must be '
'relative but found absolute path: %s' % path)
path = os.path.join(os.path.dirname(manifest_fh.name), path)
if not os.path.exists(path):
raise FileNotFoundError(
'A path specified in the manifest does not exist '
'or is not accessible: '
'%s' % path)
if single_end:
_validate_single_end_fastq_manifest_directions(manifest)
else:
_validate_paired_end_fastq_manifest_directions(manifest)
return manifest
def _validate_header(manifest, expected_header):
header = manifest.columns.tolist()
if header != expected_header:
raise ValueError('Expected manifest header %r but '
'found %r.'
% (','.join(expected_header), ','.join(header)))
def _duplicated_ids(sample_ids):
counts = collections.Counter(sample_ids).most_common()
if len(counts) == 0 or counts[0][1] == 1:
# if there were no sample ids provided, or the most frequent sample id
# was only observed once, there are no duplicates
return []
else:
return [e[0] for e in counts if e[1] > 1]
def _validate_single_end_fastq_manifest_directions(manifest):
directions = set(manifest['direction'])
if not directions.issubset({'forward', 'reverse'}):
raise ValueError('Directions can only be "forward" or '
'"reverse", but observed: %s'
% ', '.join(directions))
if len(directions) > 1:
raise ValueError('Manifest for single-end reads can '
'contain only forward or reverse reads, '
'but not both. The following directions were '
'observed: %s' % ', '.join(directions))
duplicated_ids = _duplicated_ids(manifest['sample-id'])
if len(duplicated_ids) > 0:
raise ValueError('Each sample id can only appear one time in a '
'manifest for single-end reads, but the following '
'sample ids were observed more than once: '
'%s' % ', '.join(duplicated_ids))
def _validate_paired_end_fastq_manifest_directions(manifest):
forward_direction_sample_ids = []
reverse_direction_sample_ids = []
for _, sample_id, _, direction in manifest.itertuples():
if direction == 'forward':
forward_direction_sample_ids.append(sample_id)
elif direction == 'reverse':
reverse_direction_sample_ids.append(sample_id)
else:
raise ValueError('Directions can only be "forward" or '
'"reverse", but observed: %s' % direction)
duplicated_ids_forward = _duplicated_ids(forward_direction_sample_ids)
if len(duplicated_ids_forward) > 0:
raise ValueError('Each sample id can have only one forward read '
'record in a paired-end read manifest, but the '
'following sample ids were associated with more '
'than one forward read record: '
'%s' % ', '.join(duplicated_ids_forward))
duplicated_ids_reverse = _duplicated_ids(reverse_direction_sample_ids)
if len(duplicated_ids_reverse) > 0:
raise ValueError('Each sample id can have only one reverse read '
'record in a paired-end read manifest, but the '
'following sample ids were associated with more '
'than one reverse read record: '
'%s' % ', '.join(duplicated_ids_reverse))
if sorted(forward_direction_sample_ids) != \
sorted(reverse_direction_sample_ids):
forward_but_no_reverse = set(forward_direction_sample_ids) - \
set(reverse_direction_sample_ids)
if len(forward_but_no_reverse) > 0:
raise ValueError('Forward and reverse reads must be provided '
'exactly one time each for each sample. The '
'following samples had forward but not '
'reverse read fastq files: %s'
% ', '.join(forward_but_no_reverse))
else:
reverse_but_no_forward = set(reverse_direction_sample_ids) - \
set(forward_direction_sample_ids)
raise ValueError('Forward and reverse reads must be provided '
'exactly one time each for each sample. The '
'following samples had reverse but not '
'forward read fastq files: %s'
% ', '.join(reverse_but_no_forward))
def _copy_with_compression(src, dst):
with open(src, 'rb') as src_fh:
if src_fh.read(2)[:2] != b'\x1f\x8b':
src_fh.seek(0)
# SO: http://stackoverflow.com/a/27069578/579416
# shutil.copyfileobj will pick a pretty good chunksize for us
with gzip.open(dst, 'wb') as dst_fh:
shutil.copyfileobj(src_fh, dst_fh)
return
qiime2.util.duplicate(src, dst)
def _fastq_manifest_helper(fmt, fastq_copy_fn, single_end, se_fmt, pe_fmt,
abs_manifest_fmt, manifest_fmt, yaml_fmt):
direction_to_read_number = {'forward': 1, 'reverse': 2}
input_manifest = _parse_and_validate_manifest(
fmt.open(),
single_end=single_end,
absolute=True,
abs_manifest_fmt=abs_manifest_fmt,
manifest_fmt=manifest_fmt,
)
if single_end:
result = se_fmt()
else:
result = pe_fmt()
output_manifest_data = []
for idx, sample_id, input_fastq_fp, direction in \
input_manifest.itertuples():
read_number = direction_to_read_number[direction]
output_fastq_fp = \
result.sequences.path_maker(sample_id=sample_id,
# the remaining values aren't used
# internally by QIIME, so their values
# aren't very important
barcode_id=idx,
lane_number=1,
read_number=read_number)
output_manifest_data.append(
[sample_id, output_fastq_fp.name, direction])
fastq_copy_fn(input_fastq_fp, str(output_fastq_fp))
output_manifest = manifest_fmt()
output_manifest_df = \
pd.DataFrame(output_manifest_data,
columns=output_manifest.EXPECTED_HEADER)
output_manifest_df.to_csv(str(output_manifest), index=False)
result.manifest.write_data(output_manifest, manifest_fmt)
metadata = yaml_fmt()
metadata.path.write_text(yaml.dump({'phred-offset': 33}))
result.metadata.write_data(metadata, yaml_fmt)
return result
_phred64_warning = ('Importing of PHRED 64 data is slow as it is converted '
'internally to PHRED 33. Working with the imported data '
'will not be slower than working with PHRED 33 data.')
def _write_phred64_to_phred33(phred64_path, phred33_path):
with open(phred64_path, 'rb') as phred64_fh, \
open(phred33_path, 'wb') as phred33_fh:
for seq in skbio.io.read(phred64_fh, format='fastq',
variant='illumina1.3'):
skbio.io.write(seq, into=phred33_fh,
format='fastq',
variant='illumina1.8',
compression='gzip')
def _manifest_v2_to_v1(fmt, manifest_fmt):
df = qiime2.Metadata.load(str(fmt)).to_dataframe()
# Drop unneccessary metadata columns
df = df[list(fmt.METADATA_COLUMNS.keys())]
denormalized_dfs = []
for column, direction in fmt.METADATA_COLUMNS.items():
denormalized_df = df[[column]]
original_index_name = denormalized_df.index.name
denormalized_df.reset_index(drop=False, inplace=True)
denormalized_df.rename(columns={
original_index_name: 'sample-id',
column: 'absolute-filepath'
}, inplace=True)
denormalized_df['direction'] = direction
denormalized_dfs.append(denormalized_df)
old_fmt = manifest_fmt()
pd.concat(denormalized_dfs, axis=0).to_csv(str(old_fmt), index=False)
return old_fmt
def _manifest_to_df(ff, base_dir):
manifest = pd.read_csv(str(ff), header=0, comment='#')
manifest.filename = manifest.filename.apply(
lambda f: os.path.join(base_dir, f))
df = manifest.pivot(index='sample-id', columns='direction',
values='filename')
df.columns.name = None
return df
| bsd-3-clause |
research-team/memristive-brain | mem_neuron_code/delta_t_fb_finder.py | 1 | 5270 | from datetime import datetime as dt
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
from nptdms import TdmsFile
import numpy as np
import argparse
import fnmatch
import os
# func to find files in folder
def file_finder(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", type = str, required = True,
help="path to folder with tdms file")
ap.add_argument("-o", "--output", type = str, required = False,
help="output folder")
args = vars(ap.parse_args())
# find tdms file in folder
TDMSPath = file_finder('*.tdms', args['input'])[0]
# open founded tdms file
with TdmsFile.open(TDMSPath) as file_tdms:
# find all input channels in tdms and grap the time channel
grp='input_pins'
channels=[i.name for i in file_tdms[grp].channels()]
channel=file_tdms[grp][channels[0]]
times=channel.time_track()
# choose the time period of interest in the experiment (seconds)
time_period_begin = 1138.7 #351.7
time_period_end = 1141.5 #391.3
time_index_range_begin = np.where(times >= time_period_begin)[0][0]
time_index_range_end = np.where(times <= time_period_end)[-1][-1]
time = times[time_index_range_begin : time_index_range_end]
# show input channels with matplotlib
fig = plt.figure(figsize=(200,70))
plt.rcParams.update({'font.size': 64})
'''
fig.add_subplot(2, 1, 1)
inp1 = np.array(file_tdms[grp]['inp1'][time_index_range_begin : time_index_range_end])
out = np.array(file_tdms[grp]['out' ][time_index_range_begin : time_index_range_end])
plt.plot(time, inp1, time, out)
plt.ylabel('V_inp1_out (V)') # ось абсцисс
plt.xlabel('Time (s)') # ось ординат
plt.grid() # включение отображение сетки
fig.add_subplot(2, 1, 2)
mem1fb = np.array(file_tdms[grp]['mem1fb'][time_index_range_begin : time_index_range_end])
plt.plot(time, mem1fb)
plt.ylabel('V_mem1fb (V)') # ось абсцисс
plt.xlabel('Time (s)') # ось ординат
plt.grid() # включение отображение сетки
'''
inp1 = np.array(file_tdms[grp]['inp1'][time_index_range_begin : time_index_range_end])
out = np.array(file_tdms[grp]['out' ][time_index_range_begin : time_index_range_end])
# find delta_t on selected time period between inp1 and out
input_time_start_high_voltage = []
input_val_start_high_voltage = []
for c, i in enumerate(inp1):
if c == 0:
first_point = i
continue
second_point = i
if second_point - first_point > 1:
input_time_start_high_voltage.append(time[c])
input_val_start_high_voltage.append(inp1[c])
first_point = second_point
j = 0
input_val_start_high_voltage_n = []
input_time_start_high_voltage_n = []
for xk, yk in zip(input_val_start_high_voltage, input_time_start_high_voltage):
if j % 2 != 0:
input_val_start_high_voltage_n.append(xk)
input_time_start_high_voltage_n.append(yk)
j += 1
output_time_start_high_voltage = []
output_val_start_high_voltage = []
for c, i in enumerate(out):
if c == 0:
first_point = i
continue
second_point = i
if second_point - first_point > 1: #and c % 2 != 0:
output_time_start_high_voltage.append(time[c])
output_val_start_high_voltage.append(out[c])
first_point = second_point
fig.add_subplot(3, 1, 1)
plt.plot(time, inp1)
plt.plot(input_time_start_high_voltage_n, input_val_start_high_voltage_n, 'x', color = 'black')
plt.plot(time, out)
plt.plot(output_time_start_high_voltage, output_val_start_high_voltage, 'x', color = 'black')
#print(input_time_start_high_voltage, output_time_start_high_voltage)
for x, y in zip(input_time_start_high_voltage_n, output_time_start_high_voltage):
print(y - x)
fig.add_subplot(3, 1, 2)
mem1fb = np.array(file_tdms[grp]['mem1fb'][time_index_range_begin : time_index_range_end])
peaks_fb, _ = find_peaks(mem1fb, distance = 1, height = 0.8)
difr = np.diff(peaks_fb)
countxjk_array = []
for countxjk, xjk in enumerate(difr):
if xjk > 500:
countxjk_array.append(countxjk + 1)
test_peaks = np.split(peaks_fb, countxjk_array)
mean_index = []
mean_value = []
for test_peak in test_peaks:
mean_index.append(int(np.mean(test_peak)))
mean_value.append(np.median(mem1fb[test_peak]))
#print(mean_index)
#print(mean_value)
#print(peaks_fb)
plt.plot(time, mem1fb)
plt.plot(time[mean_index], mean_value, 'x', color = 'red')
fig.add_subplot(3, 1, 3)
difer = abs(np.diff(mean_value))
plt.boxplot(difer)
#plt.show()
plt.savefig('delta_t/datas_{}_{}.pdf'.format(time_period_begin, time_period_end))
plt.close('all')
| mit |
Kirubaharan/hydrology | cumulative_impact_of_check_dam/rainfall_had.py | 1 | 2007 | __author__ = 'kiruba'
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import itertools
import checkdam.checkdam as cd
# rain file
rain_file = '/media/kiruba/New Volume/KSNDMC 15 mins Daily Data/dailyrainfalldata15minsdailyrainfalldata15minsf/TUBAGERE.csv'
rain_df = pd.read_csv(rain_file, sep=',', header=0)
# print rain_df.head()
rain_df.drop(["TRGCODE", "DISTRICT", "TALUKNAME", "HOBLINAME", "HOBLICODE", "PHASE", "COMPANY", "TYPE", "CATEGORY", "FIRSTREPORTED", "Total" ], inplace=True, axis=1)
data_1 = []
for row_no, row in rain_df.iterrows():
date = row['Date']
for time, value in row.ix[1:, ].iteritems():
data_1.append((date, time, value))
data_1_df = pd.DataFrame(data_1,columns=['date', 'time', 'rain(mm)'])
# print data_1_df.head()
# print data_1_df.tail()
date_format_1 = "%d-%b-%y %H:%M"
data_1_df['date_time'] = pd.to_datetime(data_1_df['date'] + ' ' + data_1_df['time'], format=date_format_1)
data_1_df.set_index(data_1_df['date_time'], inplace=True)
data_1_df.sort_index(inplace=True)
data_1_df.drop(['date_time', 'date', 'time'], axis=1, inplace=True)
# cumulative difference
data_1_8h_df = data_1_df['2010-01-01 8H30T': '2015-11-30 8H30T']
data_1_8h_df['diff'] = 0.000
for d1, d2 in cd.pairwise(data_1_8h_df.index):
if data_1_8h_df['rain(mm)'][d2] > data_1_8h_df['rain(mm)'][d1]:
data_1_8h_df['diff'][d2] = data_1_8h_df['rain(mm)'][d2] - data_1_8h_df['rain(mm)'][d1]
"""
Remove duplicates
"""
rain_df = data_1_8h_df
rain_df['index'] = rain_df.index
rain_df.drop_duplicates(subset='index', take_last=True, inplace=True)
del rain_df['index']
rain_df.sort_index(inplace=True)
# print rain_df.head()
# resample_daily
rain_df_daily_had = rain_df.resample('D', how=np.sum, label='left', closed='left')
print rain_df_daily_had.head()
rain_df_daily_had.to_csv('/media/kiruba/New Volume/milli_watershed/cumulative impacts/had_rainfall_daily.csv')
rain_df.to_csv('/media/kiruba/New Volume/milli_watershed/cumulative impacts/had_rainfall.csv') | gpl-3.0 |
Hiyorimi/scikit-image | doc/examples/filters/plot_entropy.py | 9 | 2234 | """
=======
Entropy
=======
In information theory, information entropy is the log-base-2 of the number of
possible outcomes for a message.
For an image, local entropy is related to the complexity contained in a given
neighborhood, typically defined by a structuring element. The entropy filter can
detect subtle variations in the local gray level distribution.
In the first example, the image is composed of two surfaces with two slightly
different distributions. The image has a uniform random distribution in the
range [-14, +14] in the middle of the image and a uniform random distribution in
the range [-15, 15] at the image borders, both centered at a gray value of 128.
To detect the central square, we compute the local entropy measure using a
circular structuring element of a radius big enough to capture the local gray
level distribution. The second example shows how to detect texture in the camera
image using a smaller structuring element.
"""
import matplotlib.pyplot as plt
import numpy as np
from skimage import data
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
from skimage.morphology import disk
# First example: object detection.
noise_mask = 28 * np.ones((128, 128), dtype=np.uint8)
noise_mask[32:-32, 32:-32] = 30
noise = (noise_mask * np.random.random(noise_mask.shape) - 0.5 *
noise_mask).astype(np.uint8)
img = noise + 128
entr_img = entropy(img, disk(10))
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(8, 3))
ax0.imshow(noise_mask, cmap=plt.cm.gray)
ax0.set_xlabel("Noise mask")
ax1.imshow(img, cmap=plt.cm.gray)
ax1.set_xlabel("Noisy image")
ax2.imshow(entr_img)
ax2.set_xlabel("Local entropy")
fig.tight_layout()
# Second example: texture detection.
image = img_as_ubyte(data.camera())
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(10, 4), sharex=True,
sharey=True,
subplot_kw={"adjustable": "box-forced"})
img0 = ax0.imshow(image, cmap=plt.cm.gray)
ax0.set_title("Image")
ax0.axis("off")
fig.colorbar(img0, ax=ax0)
img1 = ax1.imshow(entropy(image, disk(5)), cmap=plt.cm.gray)
ax1.set_title("Entropy")
ax1.axis("off")
fig.colorbar(img1, ax=ax1)
fig.tight_layout()
plt.show()
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
martinwicke/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 4 | 46466 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import inspect
import itertools
import os
import tempfile
import time
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import device_setter
from tensorflow.python.training import saver
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
df = data_feeder.setup_train_data_feeder(x, y, n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
return input_fn, feed_fn
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Concrete implementation of this class should provide the following functions:
* _get_train_ops
* _get_eval_ops
* _get_predict_ops
`Estimator` implemented below is a good example of how to use this class.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: A RunConfig instance.
"""
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size'
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
input_fn, feed_fn = _get_input_fn(x, y, input_fn, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
loss = self._train_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitors=monitors,
max_steps=max_steps)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size'
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size'
)
def evaluate(
self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None,
steps=None, metrics=None, name=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
input_fn, feed_fn = _get_input_fn(x, y, input_fn=input_fn,
feed_fn=feed_fn, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'batch_size',
'as_iterable'
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=input_fn, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
return self._infer_model(
input_fn=input_fn, feed_fn=feed_fn, outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated_arg_values(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate. '
'input_fn (and in most cases, input_feature_key) will become required '
'args, and use_deprecated_input_fn will default to False and be removed '
'altogether.',
use_deprecated_input_fn=True,
input_fn=None)
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
# pylint: enable=protected-access
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _train_model(self,
input_fn,
steps,
feed_fn=None,
init_op=None,
init_feed_fn=None,
init_fn=None,
device_fn=None,
monitors=None,
log_every_steps=100,
fail_on_nan_loss=True,
max_steps=None):
# TODO(wicke): Remove this once Model and associated code are gone.
if hasattr(self._config, 'execution_mode'):
if self._config.execution_mode not in ('all', 'train'):
return
# Stagger startup of worker sessions based on task id.
sleep_secs = min(
self._config.training_worker_max_startup_secs,
self._config.task_id *
self._config.training_worker_session_startup_stagger_secs)
if sleep_secs:
logging.info('Waiting %d secs before starting task %d.', sleep_secs,
self._config.task_id)
time.sleep(sleep_secs)
# Device allocation
device_fn = device_fn or self._device_fn
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
# The default return type of _get_train_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_train_ops returns a
# (train_op, loss) tuple. The following else-statement code covers these
# cases, but will soon be deleted after the subclasses are updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
train_ops = self._get_train_ops(features, labels)
if isinstance(train_ops, model_fn_lib.ModelFnOps): # Default signature
train_op = train_ops.train_op
loss_op = train_ops.loss
else: # Legacy signature
if len(train_ops) != 2:
raise ValueError('Expected a tuple of train_op and loss, got {}'.
format(train_ops))
train_op = train_ops[0]
loss_op = train_ops[1]
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
ops.add_to_collection(ops.GraphKeys.LOSSES, loss_op)
return graph_actions._monitored_train( # pylint: disable=protected-access
graph=g,
output_dir=self._model_dir,
train_op=train_op,
loss_op=loss_op,
global_step_tensor=global_step,
init_op=init_op,
init_feed_dict=init_feed_fn() if init_feed_fn is not None else None,
init_fn=init_fn,
log_every_steps=log_every_steps,
supervisor_is_chief=self.config.is_chief,
supervisor_master=self._config.master,
supervisor_save_model_secs=self._config.save_checkpoints_secs,
supervisor_save_model_steps=self._config.save_checkpoints_steps,
supervisor_save_summaries_steps=self._config.save_summary_steps,
keep_checkpoint_max=self._config.keep_checkpoint_max,
feed_fn=feed_fn,
steps=steps,
fail_on_nan_loss=fail_on_nan_loss,
hooks=hooks,
max_steps=max_steps)
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name=''):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained.
checkpoint_path = self._model_dir
latest_path = saver.latest_checkpoint(checkpoint_path)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% checkpoint_path)
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
# The default return type of _get_eval_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_eval_ops returns an
# `eval_dict` dictionary of Tensors. The following else-statement code
# covers these cases, but will soon be deleted after the subclasses are
# updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
eval_ops = self._get_eval_ops(features, labels, metrics)
if isinstance(eval_ops, model_fn_lib.ModelFnOps): # Default signature
eval_dict = eval_ops.eval_metric_ops
else: # Legacy signature
eval_dict = eval_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
eval_results, current_global_step = graph_actions.evaluate(
graph=g,
output_dir=eval_dir,
checkpoint_path=checkpoint_path,
eval_dict=eval_dict,
update_op=update_op,
global_step_tensor=global_step,
supervisor_master=self._config.evaluation_master,
feed_fn=feed_fn,
max_steps=steps)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(
self, input_fn, feed_fn=None, outputs=None, as_iterable=True):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
# The default return type of _get_predict_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_predict_ops returns a
# `predictions` Tensor or dict or Tensors. The following else-statement
# code covers these cases, but will soon be deleted after the subclasses
# are updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
infer_ops = self._get_predict_ops(features)
if isinstance(infer_ops, model_fn_lib.ModelFnOps): # Default signature
predictions = infer_ops.predictions
else: # Legacy signature
predictions = infer_ops
# If predictions is single output - wrap it into dict, and remember to
# return not a dict.
return_dict = isinstance(predictions, dict)
if not return_dict:
predictions = {'predictions': predictions}
# Filter what to run predictions on, if outputs provided.
if outputs:
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
if as_iterable:
return self._infer_model_as_iterable(
checkpoint_path, predictions, feed_fn, return_dict)
else:
return self._infer_model_single(
checkpoint_path, predictions, feed_fn, return_dict)
def _infer_model_single(
self, checkpoint_path, predictions, feed_fn, return_dict):
if feed_fn is None:
preds = graph_actions.infer(checkpoint_path, predictions)
else:
def _feed_fn():
while True:
yield feed_fn()
outputs = graph_actions.run_feeds(
output_dict=predictions,
feed_dicts=_feed_fn(),
restore_checkpoint_path=checkpoint_path)
preds = {
key: np.concatenate([output[key] for output in outputs], axis=0)
for key in predictions}
return preds if return_dict else preds['predictions']
def _infer_model_as_iterable(
self, checkpoint_path, predictions, feed_fn, return_dict):
if feed_fn is None:
feed_dicts = itertools.repeat(None)
else:
def _feed_fn():
while True:
yield feed_fn()
feed_dicts = _feed_fn()
try:
for output_batch in graph_actions.run_feeds_iter(
output_dict=predictions,
feed_dicts=feed_dicts,
restore_checkpoint_path=checkpoint_path):
# Unpack batches into individual predictions
if return_dict:
batch_length = list(output_batch.values())[0].shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(output_batch)}
else:
for pred in output_batch['predictions']:
yield pred
except errors.OutOfRangeError:
# We fall out of the above loop naturally if feed_fn raises StopIteration,
# or we catch an OutOfRangeError if we've reached the end of inputs.
logging.info('Reached end of inputs for predict_iter.')
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features` are single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels` are `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode` specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params` is a `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _get_arguments(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _get_arguments(self._model_fn)
if 'mode' in model_fn_args:
if 'params' in model_fn_args:
model_fn_results = self._model_fn(features, labels, mode=mode,
params=self.params)
else:
model_fn_results = self._model_fn(features, labels, mode=mode)
else:
model_fn_results = self._model_fn(features, labels)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_ops should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
loss = self._estimator._train_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitors=monitors,
max_steps=max_steps)
logging.info('Loss for final step: %s.', loss)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
return self._estimator._infer_model(
input_fn=input_fn, feed_fn=feed_fn, outputs=outputs,
as_iterable=False)
| apache-2.0 |
nightjean/Deep-Learning | tensorflow/examples/learn/text_classification_cnn.py | 8 | 4386 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
def cnn_model(features, target):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
target = tf.one_hot(target, 15, 1, 0)
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
word_vectors, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = learn.SKCompat(learn.Estimator(model_fn=cnn_model))
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = classifier.predict(x_test)['class']
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
Dataweekends/pyladies_intro_to_data_science | Iris Flowers Workshop.py | 1 | 5598 |
# coding: utf-8
# # Separating Flowers
# This notebook explores a classic Machine Learning Dataset: the Iris flower dataset
#
# ## Tutorial goals
# 1. Explore the dataset
# 2. Build a simple predictive modeling
# 3. Iterate and improve your score
#
# How to follow along:
#
# git clone https://github.com/dataweekends/pyladies_intro_to_data_science
#
# cd pyladies_intro_to_data_science
#
# ipython notebook
# We start by importing the necessary libraries:
# In[ ]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
# ### 1) Explore the dataset
# #### Numerical exploration
#
# - Load the csv file into memory using Pandas
# - Describe each attribute
# - is it discrete?
# - is it continuous?
# - is it a number?
# - Identify the target
# - Check if any values are missing
#
# Load the csv file into memory using Pandas
# In[ ]:
df = pd.read_csv('iris-2-classes.csv')
# What's the content of ```df``` ?
# In[ ]:
df.head(3)
# Describe each attribute (is it discrete? is it continuous? is it a number? is it text?)
# In[ ]:
df.info()
# Are the features continuous or discrete?
# In[ ]:
df.describe()
# #### Identify the target
# What are we trying to predict?
# ah, yes... the type of Iris flower!
# In[ ]:
df['iris_type'].value_counts()
# Check if any values are missing
# In[ ]:
df.info()
# #### Mental notes so far:
#
# - Dataset contains 100 entries
# - 1 Target column (```iris_type```)
# - 4 Numerical Features
# - No missing values
# #### Visual exploration
# - plot the distribution of the Sepal Length feature
# - check the influence of Sepal Length on the target
# Plot the distribution of Sepal Length
# In[ ]:
df['sepal_length_cm'].plot(kind='hist', figsize=(10,6))
plt.title('Distribution of Sepal Length', size = '20')
plt.xlabel('Sepal Length (cm)', size = '20')
plt.ylabel('Number of flowers', size = '20')
# check the influence of Sepal Length
# In[ ]:
df[df['iris_type']=='virginica']['sepal_length_cm'].plot(kind='hist', bins = 10, range = (4,7),
figsize=(10,6), alpha = 0.3, color = 'b')
df[df['iris_type']=='versicolor']['sepal_length_cm'].plot(kind='hist', bins = 10, range = (4,7),
figsize=(10,6), alpha = 0.3, color = 'g')
plt.title('Distribution of Sepal Length', size = '20')
plt.xlabel('Sepal Length (cm)', size = '20')
plt.ylabel('Number of flowers', size = '20')
plt.legend(['Virginica', 'Versicolor'])
plt.show()
# Check the influence of two features of combined
# In[ ]:
plt.scatter(df[df['iris_type']== 'virginica']['petal_length_cm'].values,
df[df['iris_type']== 'virginica']['sepal_length_cm'].values, label = 'Virginica', c = 'b')
plt.scatter(df[df['iris_type']== 'versicolor']['petal_length_cm'].values,
df[df['iris_type']== 'versicolor']['sepal_length_cm'].values, label = 'Versicolor', c = 'r')
plt.legend(['virginica', 'versicolor'], loc = 2)
plt.title('Scatter plot', size = '20')
plt.xlabel('Petal Length (cm)', size = '20')
plt.ylabel('Sepal Length (cm)', size = '20')
plt.show()
# Ok, so, the flowers seem to have different characteristics
#
# Let's build a simple model to test that
# Define a new target column called "target" that is 1 if iris_kind = 'virginica' and 0 otherwise
# In[ ]:
df['target'] = df['iris_type'].map({'virginica': 1, 'versicolor': 0})
print df[['iris_type', 'target']].head(2)
print
print df[['iris_type', 'target']].tail(2)
# Define simplest model as benchmark
# The simplest model is a model that predicts 0 for everybody, i.e. all versicolor.
#
# How good is it?
# In[ ]:
actual_versicolor = len(df[df['target'] == 0])
total_flowers = len(df)
ratio_of_versicolor = actual_versicolor / float(total_flowers)
print "If I predict every flower is versicolor, I'm correct %0.1f %% of the time" % (100 * ratio_of_versicolor)
df['target'].value_counts()
# We need to do better than that
# Define features (X) and target (y) variables
# In[ ]:
X = df[['sepal_length_cm', 'sepal_width_cm',
'petal_length_cm', 'petal_width_cm']]
y = df['target']
# Initialize a decision Decision Tree model
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(random_state=0)
model
# Split the features and the target into a Train and a Test subsets.
#
# Ratio should be 70/30
# In[ ]:
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.3, random_state=0)
# Train the model
# In[ ]:
model.fit(X_train, y_train)
# Calculate the model score
# In[ ]:
my_score = model.score(X_test, y_test)
print "Classification Score: %0.2f" % my_score
# Print the confusion matrix for the decision tree model
# In[ ]:
from sklearn.metrics import confusion_matrix
y_pred = model.predict(X_test)
print "\n=======confusion matrix=========="
print confusion_matrix(y_test, y_pred)
# ### 3) Iterate and improve
#
# Now you have a basic pipeline. How can you improve the score? Try:
# - changing the parameters of the model
# check the documentation here:
# http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html
#
# - changing the model itself
# check examples here:
# http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html
#
# - try separating 3 classes of flowers using the ```iris.csv``` dataset provided
| mit |
pxzhang94/GAN | neuraltest/utils.py | 2 | 3230 | import tensorflow as tf
import tensorflow.contrib.slim as slim
import math
import matplotlib.pyplot as plt
import numpy as np
import os
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def get_preidcate_labels(predicate, sample_vectors):
predicate_labels = []
for index, sample_vector in enumerate(sample_vectors):
predicate_label = predicate(sample_vector)
predicate_labels.append([float(i) for i in predicate_label])
return np.array(predicate_labels)
def evaluate_samples(predicate, sample_vectors, batch_labels):
number_0 = 0
correct_0 = 0
correct_1 = 0
for index, sample_vector in enumerate(sample_vectors):
if batch_labels[index] == 0:
number_0 += 1
if batch_labels[index] == predicate(sample_vector):
correct_0 += 1
else:
if batch_labels[index] == predicate(sample_vector):
correct_1 += 1
return (correct_0 + correct_1) / len(batch_labels) * 100, correct_0 / number_0 * 100, correct_1 / (len(batch_labels) - number_0) * 100
def visualize_results(corrects):
epochs = [epoch + 1 for epoch in range(len(corrects))]
plt.plot(epochs, corrects)
plt.show()
def concat(x, y):
return tf.concat([x, y], 1)
def batch_norm(x, is_training, scope):
return tf.layers.batch_normalization(
x,
epsilon=1e-5,
momentum=0.9,
training=is_training,
name=scope)
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak * x)
def layer_init(input_layer, output_size, scope=None, with_w=False):
shape = input_layer.get_shape().as_list()
with tf.variable_scope(scope or "layer"):
weight = tf.get_variable(
"weights",
[
shape[1],
output_size],
tf.float32,
initializer=tf.truncated_normal_initializer(
stddev=1.0 /
math.sqrt(
shape[0])))
bias = tf.get_variable(
"bias",
[output_size],
initializer=tf.constant_initializer(0.0))
return tf.matmul(input_layer, weight) + bias
def layer_initialization(shape, scope=None, with_w=False):
with tf.variable_scope(scope or "layer"):
weight = tf.get_variable(
"weights",
shape,
tf.float32,
tf.random_normal_initializer(
stddev=1.0 /
math.sqrt(
float(
shape[0]))))
bias = tf.get_variable(
"bias",
shape[1],
initializer=tf.constant_initializer(0.0))
return weight, bias
def save_results(corrects, predictor):
epochs = [epoch + 1 for epoch in range(len(corrects))]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(epochs, corrects)
ax.set_title(predictor)
ax.grid(True)
file_path = os.path.dirname(os.path.abspath("neuraltest"))
fig.savefig(os.path.join(file_path, "results", predictor), dpi=200)
# fig.savefig("/Users/pxzhang/Documents/SUTD/NeuralTest/results/" + predictor, dpi=200)
| apache-2.0 |
ashhher3/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 19 | 22876 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_loss_grad_hess,
_multinomial_loss_grad_hess
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
"""Simple sanity check on a 2 classes dataset
Make sure it predicts the correct result on simple datasets.
"""
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
"""Test for appropriate exception on errors"""
assert_raises(ValueError, LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
"""Test logistic regression with the iris dataset"""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_multinomial_binary():
"""Test multinomial LR on a binary problem."""
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
"""Test sparsify and densify members."""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
"""Test that an exception is raised on inconsistent input"""
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
"""Test that we can write to coef_ and intercept_"""
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
"""Test proper NaN handling.
Regression test for Issue #252: fit used to go into an infinite loop.
"""
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
"""Test that the path algorithm is consistent"""
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_random_state():
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_loss_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_2, grad_2, hess = _logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
loss_interp_2, grad_interp_2, hess = \
_logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
"""test for LogisticRegressionCV object"""
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
loss_interp, grad_interp, hess_interp = _logistic_loss_grad_hess(
w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
loss, grad, hess = _logistic_loss_grad_hess(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
"""Test that OvR and multinomial are correct using the iris dataset."""
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=auto
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='auto')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='auto')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
"""Test that warnings are raised if model does not converge"""
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
"""Tests for the multinomial option in logistic regression"""
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=50, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
_, grad, hessp = _multinomial_loss_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_loss_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_loss_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
"""Test negative prediction when decision_function values are zero.
Liblinear predicts the positive class when decision_function values
are zero. This is a test to verify that we do not do the same.
See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
"""
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
"""Test LogRegCV with solver='liblinear' works for sparse matrices"""
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Interface_Mesh_Types/Interface_2/HardContact_ElPPlShear/Interface_Test_Normal_Plot.py | 30 | 2779 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Interface_Surface_Adding_axial_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig(outfigname, bbox_inches='tight')
# plt.show() | cc0-1.0 |
SvichkarevAnatoly/Course-Python-Bioinformatics | semester2/task13/exercise2.py | 1 | 3697 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
h = .02 # step size in the mesh
names = [
"Nearest Neighbors", "Decision Tree",
"Random Forest", "AdaBoost",
"Linear SVM", "RBF SVM",
"Naive Bayes"
]
classifiers = [
KNeighborsClassifier(3),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianNB()
]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [
make_moons(noise=0.1, random_state=0),
make_circles(noise=0.1, factor=0.5, random_state=0),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
print('------ds-------', ds)
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.savefig("13-Figure_3_C")
plt.show()
| gpl-2.0 |
nelson-liu/scikit-learn | sklearn/datasets/tests/test_20news.py | 75 | 3266 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
try:
datasets.fetch_20newsgroups(subset='all',
download_if_missing=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# test subset = train
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 130107))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
# test subset = test
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 130107))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
# test subset = all
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 130107))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
tomlof/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 28 | 17934 | import numpy as np
import scipy.sparse as sp
import numbers
from scipy import linalg
from sklearn.decomposition import NMF, non_negative_factorization
from sklearn.decomposition import nmf # For testing internals
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message, assert_no_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.extmath import squared_norm, fast_dot
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
def test_initialize_nn_output():
# Test that initialization does not return negative values
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid beta_loss parameter: got 'spam' instead of one"
assert_raise_message(ValueError, msg, NMF(solver='mu',
beta_loss=name).fit, A)
msg = "Invalid beta_loss parameter: solver 'cd' does not handle "
msg += "beta_loss = 1.0"
assert_raise_message(ValueError, msg, NMF(solver='cd',
beta_loss=1.0).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_almost_equal(evl[ref != 0], ref[ref != 0])
# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@ignore_warnings(category=UserWarning)
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('cd', 'mu'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random'):
model = NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_nmf_fit_close():
rng = np.random.mtrand.RandomState(42)
# Test that the fit is not too far away
for solver in ('cd', 'mu'):
pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
max_iter=600)
X = np.abs(rng.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.1)
def test_nmf_transform():
# Test that NMF.transform returns close values
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
for solver in ['cd', 'mu']:
m = NMF(solver=solver, n_components=3, init='random',
random_state=0, tol=1e-5)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
def test_nmf_transform_custom_init():
# Smoke test that checks if NMF.transform works with custom initialization
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 5))
n_components = 4
avg = np.sqrt(A.mean() / n_components)
H_init = np.abs(avg * random_state.randn(n_components, 5))
W_init = np.abs(avg * random_state.randn(6, n_components))
m = NMF(solver='cd', n_components=n_components, init='custom',
random_state=0)
m.fit_transform(A, W=W_init, H=H_init)
m.transform(A)
def test_nmf_inverse_transform():
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
for solver in ('cd', 'mu'):
m = NMF(solver=solver, n_components=4, init='random', random_state=0,
max_iter=1000)
ft = m.fit_transform(A)
A_new = m.inverse_transform(ft)
assert_array_almost_equal(A, A_new, decimal=2)
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(30, 10))
NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
def test_nmf_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('cd', 'mu'):
est1 = NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
def test_nmf_sparse_transform():
# Test that transform works on sparse data. Issue #2124
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(3, 2))
A[1, 1] = 0
A = csc_matrix(A)
for solver in ('cd', 'mu'):
model = NMF(solver=solver, random_state=0, n_components=2,
max_iter=400)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('cd', 'mu'):
W_nmf, H, _ = non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
assert_no_warnings(nnmf, A, A, A, np.int64(1))
msg = ("Number of components must be a positive integer; "
"got (n_components=1.5)")
assert_raise_message(ValueError, msg, nnmf, A, A, A, 1.5)
msg = ("Number of components must be a positive integer; "
"got (n_components='2')")
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
msg = "Invalid regularization parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom', True,
'cd', 2., 1e-4, 200, 0., 0., 'spam')
def _beta_divergence_dense(X, W, H, beta):
"""Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
"""
if isinstance(X, numbers.Number):
W = np.array([[W]])
H = np.array([[H]])
X = np.array([[X]])
WH = fast_dot(W, H)
if beta == 2:
return squared_norm(X - WH) / 2
WH_Xnonzero = WH[X != 0]
X_nonzero = X[X != 0]
np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
if beta == 1:
res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
res += WH.sum() - X.sum()
elif beta == 0:
div = X_nonzero / WH_Xnonzero
res = np.sum(div) - X.size - np.sum(np.log(div))
else:
res = (X_nonzero ** beta).sum()
res += (beta - 1) * (WH ** beta).sum()
res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
res /= beta * (beta - 1)
return res
def test_beta_divergence():
# Compare _beta_divergence with the reference _beta_divergence_dense
n_samples = 20
n_features = 10
n_components = 5
beta_losses = [0., 0.5, 1., 1.5, 2.]
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W, H = nmf._initialize_nmf(X, n_components, init='random', random_state=42)
for beta in beta_losses:
ref = _beta_divergence_dense(X, W, H, beta)
loss = nmf._beta_divergence(X, W, H, beta)
loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
assert_almost_equal(ref, loss, decimal=7)
assert_almost_equal(ref, loss_csr, decimal=7)
def test_special_sparse_dot():
# Test the function that computes np.dot(W, H), only where X is non zero.
n_samples = 10
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W = np.abs(rng.randn(n_samples, n_components))
H = np.abs(rng.randn(n_components, n_features))
WH_safe = nmf._special_sparse_dot(W, H, X_csr)
WH = nmf._special_sparse_dot(W, H, X)
# test that both results have same values, in X_csr nonzero elements
ii, jj = X_csr.nonzero()
WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
# test that WH_safe and X_csr have the same sparse structure
assert_array_equal(WH_safe.indices, X_csr.indices)
assert_array_equal(WH_safe.indptr, X_csr.indptr)
assert_array_equal(WH_safe.shape, X_csr.shape)
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_multiplicative_update_sparse():
# Compare sparse and dense input in multiplicative update NMF
# Also test continuity of the results with respect to beta_loss parameter
n_samples = 20
n_features = 10
n_components = 5
alpha = 0.1
l1_ratio = 0.5
n_iter = 20
# initialization
rng = np.random.mtrand.RandomState(1337)
X = rng.randn(n_samples, n_features)
X = np.abs(X)
X_csr = sp.csr_matrix(X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
# Reference with dense array X
W, H = W0.copy(), H0.copy()
W1, H1, _ = non_negative_factorization(
X, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
# Compare with sparse X
W, H = W0.copy(), H0.copy()
W2, H2, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W2, decimal=7)
assert_array_almost_equal(H1, H2, decimal=7)
# Compare with almost same beta_loss, since some values have a specific
# behavior, but the results should be continuous w.r.t beta_loss
beta_loss -= 1.e-5
W, H = W0.copy(), H0.copy()
W3, H3, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W3, decimal=4)
assert_array_almost_equal(H1, H3, decimal=4)
def test_nmf_negative_beta_loss():
# Test that an error is raised if beta_loss < 0 and X contains zeros.
# Test that the output has not NaN values when the input contains zeros.
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0
X_csr = sp.csr_matrix(X)
def _assert_nmf_no_nan(X, beta_loss):
W, H, _ = non_negative_factorization(
X, n_components=n_components, solver='mu', beta_loss=beta_loss,
random_state=0, max_iter=1000)
assert_false(np.any(np.isnan(W)))
assert_false(np.any(np.isnan(H)))
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
for beta_loss in (-0.6, 0.):
assert_raise_message(ValueError, msg, _assert_nmf_no_nan, X, beta_loss)
_assert_nmf_no_nan(X + 1e-9, beta_loss)
for beta_loss in (0.2, 1., 1.2, 2., 2.5):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X_csr, beta_loss)
def test_nmf_regularization():
# Test the effect of L1 and L2 regularizations
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(n_samples, n_features))
# L1 regularization should increase the number of zeros
l1_ratio = 1.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
W_regul_n_zeros = W_regul[W_regul == 0].size
W_model_n_zeros = W_model[W_model == 0].size
H_regul_n_zeros = H_regul[H_regul == 0].size
H_model_n_zeros = H_model[H_model == 0].size
assert_greater(W_regul_n_zeros, W_model_n_zeros)
assert_greater(H_regul_n_zeros, H_model_n_zeros)
# L2 regularization should decrease the mean of the coefficients
l1_ratio = 0.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
assert_greater(W_model.mean(), W_regul.mean())
assert_greater(H_model.mean(), H_regul.mean())
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_decreasing():
# test that the objective function is decreasing at each iteration
n_samples = 20
n_features = 15
n_components = 10
alpha = 0.1
l1_ratio = 0.5
tol = 0.
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.abs(X, X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
for solver in ('cd', 'mu'):
if solver != 'mu' and beta_loss != 2:
# not implemented
continue
W, H = W0.copy(), H0.copy()
previous_loss = None
for _ in range(30):
# one more iteration starting from the previous results
W, H, _ = non_negative_factorization(
X, W, H, beta_loss=beta_loss, init='custom',
n_components=n_components, max_iter=1, alpha=alpha,
solver=solver, tol=tol, l1_ratio=l1_ratio, verbose=0,
regularization='both', random_state=0, update_H=True)
loss = nmf._beta_divergence(X, W, H, beta_loss)
if previous_loss is not None:
assert_greater(previous_loss, loss)
previous_loss = loss
| bsd-3-clause |
hlin117/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
js850/pele | pele/utils/rotations.py | 1 | 7681 | """
Functions related to rotations
Most of these were adapted from victor's rotations.f90. Not all functions from
the file have been implemented.
Warning, they have not all been tested in this format.
.. currentmodule:: pele.utils.rotations
.. autosummary::
:toctree: generated/
q_multiply
aa2q
q2aa
q2mx
mx2q
mx2aa
rot_q2mx
aa2mx
random_q
random_aa
takestep_aa
rotate_aa
small_random_aa
vec_random
vec_random_ndim
vector_random_uniform_hypersphere
q_slerp
"""
import numpy as np
rot_epsilon = 1e-6
def q_multiply(q0, q1):
""" multiply 2 quaternions q1, q2 """
q3 = np.zeros(4)
q3[0] = q0[0]*q1[0]-q0[1]*q1[1]-q0[2]*q1[2]-q0[3]*q1[3]
q3[1] = q0[0]*q1[1]+q0[1]*q1[0]+q0[2]*q1[3]-q0[3]*q1[2]
q3[2] = q0[0]*q1[2]-q0[1]*q1[3]+q0[2]*q1[0]+q0[3]*q1[1]
q3[3] = q0[0]*q1[3]+q0[1]*q1[2]-q0[2]*q1[1]+q0[3]*q1[0]
return q3
def aa2q( AA ):
"""
convert angle axis to quaternion
input V: angle axis vector of lenth 3
output Q: quaternion of length 4
"""
q = np.zeros(4, np.float64)
thetah = 0.5 * np.linalg.norm( AA )
q[0] = np.cos( thetah )
# do linear expansion for small epsilon
if thetah < rot_epsilon:
q[1:] = 0.5 * AA
else:
q[1:] = 0.5 * np.sin(thetah) * AA / thetah
# make sure to have normal form
if q[0] < 0.0: q = -q
return q
def q2aa( qin ):
"""
quaternion to angle axis
input Q: quaternion of length 4
output V: angle axis vector of lenth 3
"""
q = np.copy(qin)
if q[0] < 0.: q = -q
if q[0] > 1.0: q /= np.sqrt(np.dot(q,q))
theta = 2. * np.arccos(q[0])
s = np.sqrt(1.-q[0]*q[0])
if s < rot_epsilon:
p = 2. * q[1:4]
else:
p = q[1:4] / s * theta
return p
def q2mx( qin ):
"""quaternion to rotation matrix"""
Q = qin / np.linalg.norm(qin)
RMX = np.zeros([3,3], np.float64)
Q2Q3 = Q[1]*Q[2];
Q1Q4 = Q[0]*Q[3];
Q2Q4 = Q[1]*Q[3];
Q1Q3 = Q[0]*Q[2];
Q3Q4 = Q[2]*Q[3];
Q1Q2 = Q[0]*Q[1];
RMX[0,0] = 2.*(0.5 - Q[2]*Q[2] - Q[3]*Q[3]);
RMX[1,1] = 2.*(0.5 - Q[1]*Q[1] - Q[3]*Q[3]);
RMX[2,2] = 2.*(0.5 - Q[1]*Q[1] - Q[2]*Q[2]);
RMX[0,1] = 2.*(Q2Q3 - Q1Q4);
RMX[1,0] = 2.*(Q2Q3 + Q1Q4);
RMX[0,2] = 2.*(Q2Q4 + Q1Q3);
RMX[2,0] = 2.*(Q2Q4 - Q1Q3);
RMX[1,2] = 2.*(Q3Q4 - Q1Q2);
RMX[2,1] = 2.*(Q3Q4 + Q1Q2);
return RMX
def mx2q(mi):
q = np.zeros(4)
m = np.transpose(mi)
trace=m[0,0] + m[1,1]+m[2,2]
if (trace > 0.):
s = np.sqrt(trace+1.0) * 2.0
q[0] = 0.25 * s
q[1] = (m[1,2] - m[2,1]) / s
q[2] = (m[2,0] - m[0,2]) / s
q[3] = (m[0,1] - m[1,0]) / s
elif ((m[0,0] > m[1,1]) and (m[0,0] > m[2,2])):
s=np.sqrt(1.0 + m[0,0] - m[1,1] - m[2,2]) * 2.0
q[0] = (m[1,2] - m[2,1]) / s
q[1] = 0.25 * s
q[2] = (m[1,0] + m[0,1]) / s
q[3] = (m[2,0] + m[0,2]) / s
elif (m[1,1] > m[2,2]):
s = np.sqrt(1.0 + m[1,1] - m[0,0] - m[2,2]) * 2.0
q[0] = (m[2,0] - m[0,2]) / s
q[1] = (m[1,0] + m[0,1]) / s
q[2] = 0.25 * s
q[3] = (m[2,1] + m[1,2]) / s
else:
s = np.sqrt(1.0 + m[2,2] - m[0,0] - m[1,1]) * 2.0
q[0] = (m[0,1] - m[1,0]) / s
q[1] = (m[2,0] + m[0,2]) / s
q[2] = (m[2,1] + m[1,2]) / s
q[3] = 0.25 * s
if(q[0] < 0):
q = -q
return q
def mx2aa(m):
return q2aa(mx2q(m))
def rot_q2mx(qin):
m = np.zeros([3,3], np.float64)
q = qin / np.linalg.norm(qin)
sq = q**2
m[0,0] = ( sq[1] - sq[2] - sq[3] + sq[0])
m[1,1] = (-sq[1] + sq[2] - sq[3] + sq[0])
m[2,2] = (-sq[1] - sq[2] + sq[3] + sq[0])
tmp0 = q[1]*q[2]
tmp1 = q[0]*q[3]
m[1,0] = 2.0 * (tmp0 + tmp1)
m[0,1] = 2.0 * (tmp0 - tmp1)
tmp0 = q[1]*q[3]
tmp1 = q[2]*q[0]
m[2,0] = 2.0 * (tmp0 - tmp1)
m[0,2] = 2.0 * (tmp0 + tmp1)
tmp0 = q[2]*q[3]
tmp1 = q[0]*q[1]
m[2,1] = 2.0 * (tmp0 + tmp1)
m[1,2] = 2.0 * (tmp0 - tmp1)
return m
def aa2mx( p ):
return q2mx( aa2q( p ) )
def random_q():
"""
uniform random rotation in angle axis formulation
input: 3 uniformly distributed random numbers
uses the algorithm given in
K. Shoemake, Uniform random rotations, Graphics Gems III, pages 124-132. Academic, New York, 1992.
This first generates a random rotation in quaternion representation. We should substitute this by
a direct angle axis generation, but be careful: the angle of rotation in angle axis representation
is NOT uniformly distributed
"""
from numpy import sqrt, sin, cos, pi
u = np.random.uniform(0,1,[3])
q = np.zeros(4, np.float64)
q[0] = sqrt(1.-u[0]) * sin(2.*pi*u[1])
q[1] = sqrt(1.-u[0]) * cos(2.*pi*u[1])
q[2] = sqrt(u[0]) * sin(2.*pi*u[2])
q[3] = sqrt(u[0]) * cos(2.*pi*u[2])
return q
def random_aa():
return q2aa( random_q() )
def takestep_aa(p, maxtheta):
""" change an angle axis vector by a small rotation"""
p[:] = rotate_aa(p, small_random_aa(maxtheta))
def rotate_aa(p1, p2):
"""
change a given angle axis rotation p1 by the
rotation p2
"""
return q2aa(q_multiply( aa2q(p2), aa2q(p1) ))
def small_random_aa(maxtheta):
""" generate a small random rotation"""
# first choose a random unit vector
p = vec_random()
# linear for too small steps
# this is not completely right but should be ok
if maxtheta < rot_epsilon:
p = p*dprand()*maxtheta
return p
s = 1. / (np.sin(0.5*maxtheta)**2)
# now choose the angle theta in range 0:step
# with distribution sin(0.5*theta)**2
u = dprand() * maxtheta
while dprand() > s * np.sin(0.5 * u)**2:
u=dprand() * maxtheta
p = p*u
return p
def vec_random():
""" uniform random unit vector """
p = np.zeros(3)
u1 = dprand()
u2 = dprand()
z = 2*u1 - 1.
p[0] = np.sqrt(1-z*z) * np.cos(2. * np.pi * u2)
p[1] = np.sqrt(1-z*z) * np.sin(2. * np.pi * u2)
p[2] = z
return p
def vec_random_ndim(n):
"""n-dimensional uniform random unit vector"""
v = np.random.normal(size=n)
v /= np.linalg.norm(v)
return v
def vector_random_uniform_hypersphere(k):
"""return a vector sampled uniformly in a hypersphere of dimension k"""
if k == 3:
# this function is much faster than the general one
u = vec_random()
else:
u = vec_random_ndim(k)
# draw the magnitude of the vector from a power law density:
# draws samples in [0, 1] from a power distribution with positive exponent k - 1.
p = np.random.power(k)
return p * u
dprand = lambda: np.random.rand()
def q_slerp (a, b,t):
if(t<=0.):
return a
if(t>=1.):
return b
costheta = np.dot(a, b)
c = b
# if theta > 180., go other direction
if (costheta < 0.0):
costheta = -costheta
c = -c
#linear interpolate close to zero
if (costheta > 1.0-1e-5):
return t*b + (1-t)*b
theta = np.arccos (costheta)
return (np.sin ((1.0 - t) * theta) * a + np.sin (t * theta) * c) / np.sin (theta)
#
# only testing below here
#
def test_vector_random_uniform_hypersphere():
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
nvec = 1000
r = np.zeros([nvec,3])
for i in range(nvec):
r[i,:] = vector_random_uniform_hypersphere(3)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(r[:,0], r[:,1], r[:,2])
plt.show()
if __name__ == "__main__":
test_vector_random_uniform_hypersphere()
| gpl-3.0 |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/feature_selection/plot_f_test_vs_mi.py | 1 | 1646 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| mit |
dpaiton/OpenPV | pv-core/analysis/python/plot_l1_activity.py | 1 | 1294 | """
Plot the highest activity of four different bar positionings
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadSparse as rs
import PVReadWeights as rw
import PVConversions as conv
import scipy.cluster.vq as sp
import math
extended = False
a1 = rs.PVReadSparse(sys.argv[1], extended)
end = int(sys.argv[2])
numofsteps = int(sys.argv[3])
nx = a1.nx
ny = a1.ny
numneur = nx * ny
activity = []
count = 0
counta=0
for k in range(end):
A=a1.next_activity()
#print "sum = ", np.sum(A)
d = k / numofsteps
#act = np.append(activity, np.sum(A))
act = np.sum(A)
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A1p = act
#print "k at first = ", k
else:
A1p = np.vstack((A1p,act))
if k == (numofsteps-1):
A1q = 0 #A1p.sum(axis=0)
#print A1q
if k == ((numofsteps*d) + (numofsteps-1)): #and k != (numofsteps-1):
A1q = np.vstack((A1q, A1p.sum(axis=0)))
#print A1q
t1 = A1q / float(numneur)
#print t1
t1 = t1 / (numofsteps / 2000.0)
#print t1
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(np.arange(np.shape(t1)[0]), t1, color='y', ls = '-')
plt.show()
sys.exit()
| epl-1.0 |
mattgiguere/scikit-learn | sklearn/pipeline.py | 8 | 20449 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Parameters
----------
steps: list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
"""
# BaseEstimator interface
def __init__(self, steps):
self.named_steps = dict(steps)
names, estimators = zip(*steps)
if len(self.named_steps) != len(steps):
raise ValueError("Names provided are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(zip(names, estimators))
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps.copy()
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/decomposition/tests/test_sparse_pca.py | 31 | 6002 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
"""
Test that SparsePCA won't return NaN when there is 0 feature in all
samples.
"""
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
yanlend/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
cwade/surveyhelper | surveyhelper/response_set.py | 1 | 3236 | import pandas as pd
import numpy as np
class ResponseSet:
# TODO - As currently written this code skips the second row of data,
# because that's what we want to do for Qualtrics csv results, but this is
# potentially a big gotcha, so document well or change.
def __init__(self, response_file, codebook,
skiprows = [1],
encoding="utf8",
grouping_var=None,
group_dict=None):
df = pd.read_csv(response_file , skiprows=skiprows, encoding=encoding)
# go through each variable in the codebook and make sure the corresponding
# column is integer coded
matched_questions = []
for q in codebook.get_questions():
matched = True
for v in q.get_variable_names():
if v not in df:
print("Warning: Expected variable {} not found in data file {}".format(v, response_file))
matched = False
elif df[v].dtype not in [np.int64, np.float64]:
print("Converting variable {} to integer from {}".format(v, df[v].dtype))
df[v] = df[v].convert_objects(convert_numeric=True)
if matched:
matched_questions.append(q)
self.data = df
self.matched_questions = matched_questions
self.codebook = codebook
self.grouping_var = grouping_var
if (not self.grouping_var and group_dict):
raise(Exception(
"Grouping variable must also be specified when a grouping dict is passed in."
))
if self.grouping_var and not group_dict:
self.data[grouping_var] = pd.Categorical(self.data[grouping_var])
if self.grouping_var and group_dict:
self.data[grouping_var] = self.data[grouping_var].astype(str)
# For some odd reason, inplace=True doesn't work when cats are ["1-2", "3+"]
self.data[grouping_var] = self.data[grouping_var].replace(group_dict)
self.data[grouping_var] = pd.Categorical(self.data[grouping_var])
self.data[grouping_var] = self.data[grouping_var].cat.reorder_categories(self.uniq(list(group_dict.values())))
def uniq(self, input):
output = []
for x in input:
if x not in output:
output.append(x)
return(output)
def get_data(self):
if not self.grouping_var:
group_var = 'z'
while group_var in self.data.columns:
group_var += 'z'
self.data[group_var] = 0
else:
group_var = self.grouping_var
# self.data.sort(group_var, inplace=True)
groups = self.data.groupby(group_var)
return(groups)
def export_to_tableau(self, output_file, other_vars = [], constants = {}):
df = pd.DataFrame()
if 'weight' not in self.data:
self.data['weight'] = 1
for q in self.matched_questions:
d = q.get_tableau_data(self.data, other_vars)
df = df.append(d, ignore_index=True)
for k, v in constants:
df[k] = v
df.to_csv(output_file, index=False)
| mit |
cmsteinbach/Python | Burgers1D/burgers_LaXF-W_with_source_flux_formulation.py | 1 | 7125 | # -*- coding: utf-8 -*-
"""
Created on: Wed Apr 6 14:58:30 2016
@author: cst
mail to: your email adress here
Abstract: Short Description here
Description: Detailed Description here
Source: Sources used for this
Version: 0.1
Changed on: Wed Apr 6 14:58:30 2016
TO DO´s: -
Parameter List Description: -
Public Methods Description: -
"""
from pylab import empty, arange, linspace, tanh, zeros, empty,sin,pi,plot,sign
from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d.axes3d import get_test_data
import inspect
class Burgers(object):
def __init__(self,dx,dt,nx,nt):
self.__dx = dx
self.__dt = dt
self.__nx = nx
self.__nt = nt
self.__u = empty([nx,nt])
self.__fi = zeros([nx,nt])
self.__cfl = 0.
print ('')
print ('FD_1D_LAX_WENDROFF:')
print ('Python version')
print ('')
print ('Solve the Burrgers equation with source term in 1D,')
print ('du/dt = -dF(u)/dx + S(u)')
print ('over the interval:')
print ('0.0 <= x <= 1.0')
print ('with fixed boundary conditions, and')
print ('with a given initial condition')
print ('')
print ('Numerics: Lax-Wendroff method with flux blended with Lax-Friedrichs')
def leftbc(self,uleft):
self.__uleft = uleft
def rightbc(self,uright):
self.__uright = uright
def xjump(self,xj):
self.__xj = xj
def initial(self):
for i in range(self.__nx):
if i*self.__dx < self.__xj:
self.__u[i,0] = self.__uleft
else:
self.__u[i,0] = self.__uright
def flux(self,u):
return u#*u/2.
def source(self,u):
my = 1500.
Q = -my*u*(u-1.)*(u-0.5)
# print(Q)
return Q
def timestep(self,u,jt,dt,dx):
eps = 0.0000001
u[0,jt] = u[0,jt-1]
dx = self.__dx
dt = self.__dt
for ix in range(1,self.__nx-2):
lflx = self.flux(u[ix-1,jt-1])
cflx = self.flux(u[ix,jt-1])
rflx = self.flux(u[ix+1,jt-1])
lu = u[ix-1,jt-1]
cu = u[ix ,jt-1]
ru = u[ix+1,jt-1]
lJF = (cflx - lflx)/(u[ix ,jt-1] - u[ix-1,jt-1] + eps)
rJF = (rflx - cflx)/(u[ix+1,jt-1] - u[ix ,jt-1] + eps)
lsrc = self.source(u[ix-1,jt-1])
csrc = self.source(u[ix ,jt-1])
rsrc = self.source(u[ix+1,jt-1])
JS = (rsrc - lsrc)/(u[ix+1,jt-1] - u[ix-1,jt-1] + eps)
src1 = -csrc*(rJF - lJF)/dx - (lJF + rJF)*(rsrc - lsrc)/dx/4.
src2 = csrc*JS - JS*(rflx - lflx)/dx/2.
cfi = self.minmod(u[ix-1,jt-1],u[ix ,jt-1],u[ix+1,jt-1])
lH = lflx - (1.-cfi)*dx/dt*(cu - lu) - (cfi)*lJF*(cflx - lflx)/dx*dt
rH = rflx - (1.-cfi)*dx/dt*(ru - cu) - (cfi)*rJF*(rflx - cflx)/dx*dt
u[ix,jt] = cu - (rH - lH)/2./dx*dt
u[ix,jt] = u[ix,jt] + csrc*dt + (cfi)*(src1 + src2)*dt**2/2.
u[self.__nx-2,jt] = u[self.__nx-3,jt]
u[self.__nx-1,jt] = u[self.__nx-2,jt]
return u
def minmod(self,um1,u,up1):
# calculate r_{i} = \frac{u_i-u_{i-1}}{u_{i+1}-u_i}
nom = (u - um1)
denom = (up1 - u )
# make sure division by 0 does not happen
if(abs(nom) < 1e-14): # nom = 0
nom = 0.
denom = 1.
elif (nom > 1e-14 and abs(denom) < 1e-14): # nom > 0 => r = \inf
nom = 1e14
denom = 1.
elif (nom < -1e-14 and abs(denom) < 1e-14): # nom < 0 => r = 0
nom = -1e14
denom = 1.
r = nom/denom
theta = 2.
return max(0.8,min(theta*r,1.))
def minmod2(self,um1,u,up1):
# calculate r_{i} = \frac{u_i-u_{i-1}}{u_{i+1}-u_i}
nom = (u - um1)
denom = (up1 - u )
if(abs(nom) < 1e-14): # nom = 0
nom = 0.
denom = 1.
elif (nom > 1e-14 and abs(denom) < 1e-14): # nom > 0 => r = \inf
nom = 1e14
denom = 1.
elif (nom < -1e-14 and abs(denom) < 1e-14): # nom < 0 => r = 0
nom = -1e14
denom = 1.
r = nom/denom
theta = 2.
beta = .0
return max(beta,min(theta*r,1.))
def mc(self,um1,u,up1):
# calculate r_{i} = \frac{u_i-u_{i-1}}{u_{i+1}-u_i}
nom = (u - um1)
denom = (up1 - u )
# make sure division by 0 does not happen
if(abs(nom) < 1e-14): # nom = 0
nom = 0.
denom = 1.
elif (nom > 1e-14 and abs(denom) < 1e-14): # nom > 0 => r = \inf
nom = 1e14
denom = 1.
elif (nom < -1e-14 and abs(denom) < 1e-14): # nom < 0 => r = 0
nom = -1e14
denom = 1.
r = nom/denom
theta = 2.
return max(0.,min(theta*r,(1.+r)/2.,1.))
def solve(self):
print ('')
print (' Number of nodes NX = %d' % ( self.__nx ))
print (' Number of time steps NT = %d' % ( self.__nt ))
print (' left BC, right BC = ',self.__uleft, self.__uright)
for j in range(1,self.__nt):
self.__u = self.timestep(self.__u,j,self.__dt,self.__dx)
print ('')
print ('FD_1D_ADVECTION_LAX_WENDROFF_TEST')
print (' Normal end of execution.')
print ('')
def plot3D(self):
X = zeros ( [ self.__nx, self.__nt ] )
Y = zeros ( [ self.__nx, self.__nt ] )
Z = zeros ( [ self.__nx, self.__nt ] )
x = linspace(0.,self.__nx*self.__dx,self.__nx)
for i in range(self.__nt):
X[:,i] = x
Y[:,i] = i*self.__dt
Z = self.__u
fig = plt.figure ( )
ax = fig.gca ( projection = '3d' )
surf = ax.plot_surface ( X, Y, Z, cmap = cm.coolwarm, \
linewidth = 0, antialiased = False )
ax.set_xlabel ( '<--X-->' )
ax.set_ylabel ( '<--T-->' )
ax.set_zlabel ( '<--U(X,T)-->' )
fig.colorbar ( surf, shrink = 0.5, aspect = 10 )
plt.show ( )
def plot2D(self,jt):
x = linspace(0.,self.__nx*self.__dx,self.__nx)
plot(x,self.__u[:,jt])
def cfl(self):
print ('max CFL condition: dt = (%g) <= (%g) = dx / J / dt', self.__cfl)
if __name__ == "__main__":
burgers = Burgers(0.005,0.001,201,500)
burgers.leftbc(1.)
burgers.rightbc(0.0)
burgers.xjump(.3)
burgers.initial()
burgers.solve()
burgers.plot2D(499)
burgers.plot3D()
| gpl-3.0 |
krez13/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
nvoron23/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
xiaoxiamii/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 71 | 18815 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
grv87/thesis-code | ML.py | 1 | 8462 | #!/usr/bin/env python3
# -*- coding, utf-8 -*-
# Оценка параметров смеси гамма-распределений по ММП
# Estimation of parameters of mixture of gamma distributions
# by ML method
# Copyright © 2014 Василий Горохов-Апельсинов
# This file is part of code for my bachelor's thesis.
#
# Code for my bachelor's thesis is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Code for my bachelor's thesis is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with code for my bachelor's thesis. If not, see
# <http://www.gnu.org/licenses/>.
# Requirements: Python 3 (works with 3.3), Python-dateutil, NumPy,
# SciPy, OpenOpt, MatPlotLib
# Data
from dateutil.parser import parse
tableName = 'MICEX_SBER'
startDateTime = parse('2011-06-01T10:30')
# endDateTime = parse('2011-06-01T11:30')
n = 200 # Size of window
k = 2 # Number of components in mixture
precision = 3
# Code
from common.get_data import getData
pos_inf = float('+inf')
neg_inf = float('-inf')
import numpy as np
from math import log as _ln, exp, isfinite, isinf
def ln(x):
if x == 0:
return neg_inf
elif isinf(x) and x > 0:
return pos_inf
else:
try:
return _ln(x)
except:
print(x)
raise
from scipy.stats import gamma # Gamma distribution
# Digamma function
from scipy.special import psi as _psi
def psi(alpha):
if alpha == 0:
return neg_inf
elif isinf(alpha) and alpha > 0:
return pos_inf
else:
return _psi(alpha)
from scipy.optimize import minimize #, basinhopping
from openopt import GLP, NLP
from random import random
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('error')
x = None
f = lambda u, alpha, beta: gamma.pdf(u, alpha, scale = 1 / beta) if u > 0 and alpha != 0 and beta != 0 else 0
k_underline = range(k)
k_m_1_underline = range(k - 1)
n_underline = range(n)
# NLopt
# print(len(theta_tilde))
#opt = nlopt.opt(nlopt.GN_ORIG_DIRECT, len(theta_tilde))
#print(opt.get_algorithm_name())
# Make use constraint: sum(p_ast_m) == 1
# These functions get p_i, alpha_i and beta_i from given theta_tilde tuple
p = lambda theta_tilde, i: theta_tilde[i] if i <= k - 2 else (1 - sum(theta_tilde[0:k-1]))
alpha = lambda theta_tilde, i: theta_tilde[k - 1 + i]
beta = lambda theta_tilde, i: theta_tilde[k * 2 - 1 + i]
d = True
# Negative logarithm of likelihood function
def L_T_tilde(theta_tilde):
# try:
# TODO
# return -sum([ln(sum([p(theta_tilde, i) * f(x[j], alpha(theta_tilde, i), beta(theta_tilde, i)) for i in k_underline])) for j in n_underline])
Q = [[f(x[j], alpha(theta_tilde, i), beta(theta_tilde, i)) for j in n_underline] for i in k_underline]
R = [[p(theta_tilde, i) * Q[i][j] for j in n_underline] for i in k_underline]
S = [sum([R[i][j] for i in k_underline]) for j in n_underline]
# global d
# if d:
# print(Q)
# print(R)
# print(S)
# d = False
return -sum([ln(S[j]) for j in n_underline])
# except ValueError:
# return pos_inf
def L_T_tildewithGrad(theta_tilde, grad):
# try:
Q = [[f(x[j], alpha(theta_tilde, i), beta(theta_tilde, i)) for j in n_underline] for i in k_underline]
R = [[p(theta_tilde, i) * Q[i][j] for j in n_underline] for i in k_underline]
S = [sum([R[i][j] for i in k_underline]) for j in n_underline]
if grad.size > 0:
grad[0:k-1] = [-sum([Q[i][j] / S[j] for j in n_underline]) for i in k_m_1_underline]
grad[k-1:k*2-1] = [-sum([R[i][j] / S[j] * (ln(beta(theta_tilde, i)) + ln(x[j]) - psi(alpha(theta_tilde, i))) for j in n_underline]) for i in k_underline]
grad[k*2-1:k*3-1] = [sum([R[i][j] / S[j] * alpha(theta_tilde, i) * x[j] / beta(theta_tilde, i) for j in n_underline]) for i in k_underline]
return -sum([ln(S[j]) for j in n_underline])
# except:
# return pos_inf
def grad_L_T_tilde(theta_tilde):
try:
Q = [[f(x[j], alpha(theta_tilde, i), beta(theta_tilde, i)) for j in n_underline] for i in k_underline]
R = [[p(theta_tilde, i) * Q[i][j] for j in n_underline] for i in k_underline]
S = [sum([R[i][j] for i in k_underline]) for j in n_underline]
return \
[-sum([Q[i][j] / S[j] for j in n_underline]) for i in k_m_1_underline] + \
[-sum([R[i][j] / S[j] * (ln(beta(theta_tilde, i)) + ln(x[j]) - psi(alpha(theta_tilde, i))) for j in n_underline]) for i in k_underline] + \
[sum([R[i][j] / S[j] * alpha(theta_tilde, i) * x[j] / beta(theta_tilde, i) for j in n_underline]) for i in k_underline]
except:
print(theta_tilde)
raise
#opt.set_min_objective(L_T_tildewithGrad)
# Constraints
# constraints = \
# [{
# 'type': 'ineq',
# 'fun': lambda theta_tilde, i = i: p(theta_tilde, i)
# } for i in k_underline] + \
# [{
# 'type': 'ineq',
# 'fun': lambda theta_tilde, i = i: alpha(theta_tilde, i)
# } for i in k_underline] + \
# [{
# 'type': 'ineq',
# 'fun': lambda theta_tilde, i = i: beta(theta_tilde, i)
# } for i in k_underline]
#bounds = [(0, 1) for i in range(k - 1)] + [(0, None) for i in range(k)] + [(0, None) for i in range(k)]
# For NLopt
# for constraint in constraints:
# if constraint['type'] == 'eq':
# opt.add_equality_constraint(constraint['fun'])
# elif constraint['type'] == 'ineq':
# opt.add_inequality_constraint(constraint['fun'])
# opt.set_lower_bounds([0 for i in range(len(theta_tilde))])
# opt.add_inequality_constraint(lambda theta_tilde, grad: p(theta_tilde, k - 1))
# For OpenOpt
#opt_c = [lambda theta_tilde: -p(theta_tilde, k - 1) <= 0]
#opt_dc = [lambda theta_tilde: [1 for i in k_m_1_underline] + [0 for i in k_underline] + + [0 for i in k_underline]]
opt_A = np.zeros((3 * k, 3 * k - 1))
np.fill_diagonal(opt_A, -1)
opt_A[3 * k - 1] = np.ones(3 * k - 1)
# print(opt_A)
opt_b = [0 for i in range(3 * k - 1)] + [1]
# print(opt_b)
opt_lb = [0 for i in range(3 * k - 1)]
opt_ub = [1 for i in range(3 * k - 1)]
# opt.set_ftol_abs(0.001)
# opt.set_initial_step(0.1)
# def L_T_tildeconstrained(theta_tilde):
# for constraint in constraints:
# if constraint['fun'](theta_tilde) <= 0:
# return pos_inf
# return L_T_tilde(theta_tilde)
cursor = getData(tableName, startDateTime, n, precision)
for row in cursor:
moment = row[1]
print(moment)
x = np.array(row[2])
assert len(x) == n # Make sure we have exactly n values
plt.hist(x, bins = 50)
# plt.show()
# print(x.mean(), x.var())
# Initial values TODO
s, loc, t = gamma.fit(x, loc = 0)
alpha_est = s
beta_est = 1 / t
print(alpha_est, loc, beta_est)
theta_tilde = np.array(
[0.5] + # p_tilde
[alpha_est for i in k_underline] + # alpha
[beta_est for i in k_underline] # beta
)
print(L_T_tilde(theta_tilde))
theta_tilde = np.array(
[0.4] + # TODO # p_tilde
[alpha_est ** (random() * 2) for i in k_underline] + # alpha
[beta_est ** (random() * 2) for i in k_underline] # beta
)
print(theta_tilde, L_T_tilde(theta_tilde))
# print('COBYLA')
# print(L_T_tilde(theta_tilde))
# res = minimize(L_T_tilde, theta_tilde, method = 'COBYLA', constraints = constraints)
# print(res)
# assert isfinite(res.fun)
# theta_tilde = res.x
# print('p', [p(theta_tilde, i) for i in k_underline])
# print('alpha', [alpha(theta_tilde, i) for i in k_underline])
# print('beta', [beta(theta_tilde, i) for i in k_underline])
# # NLopt
# theta_tilde = opt.optimize(theta_tilde)
# L_T_tilde_ast = opt.last_optimum_value()
# print(L_T_tilde_ast)
# result = opt.last_optimize_result()
# print(result)
# OpenOpt
opt_p = GLP(L_T_tilde, theta_tilde, df = grad_L_T_tilde, A = opt_A, b = opt_b, lb = opt_lb, ub = opt_ub)
res = opt_p.solve('de', maxNonSuccess = 32) # maxNonSuccess = round(exp(len(theta_tilde)))
print(res.xf, res.ff)
# print('Basin-Hopping')
# print(L_T_tildeconstrained(theta_tilde))
# res = basinhopping(L_T_tildeconstrained, theta_tilde)
# print(res)
# assert isfinite(res.fun)
# theta_tilde = res.x
# print('p', [p(theta_tilde, i) for i in k_underline])
# print('alpha', [alpha(theta_tilde, i) for i in k_underline])
# print('beta', [beta(theta_tilde, i) for i in k_underline])
| gpl-3.0 |
lokeshpancharia/BuildingMachineLearningSystemsWithPython | ch02/heldout.py | 24 | 1377 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script demonstrates the difference between the training accuracy and
# testing (held-out) accuracy.
import numpy as np
from sklearn.datasets import load_iris
from threshold import fit_model, accuracy
data = load_iris()
features = data['data']
labels = data['target_names'][data['target']]
# We are going to remove the setosa examples as they are too easy:
is_setosa = (labels == 'setosa')
features = features[~is_setosa]
labels = labels[~is_setosa]
# Now we classify virginica vs non-virginica
is_virginica = (labels == 'virginica')
# Split the data in two: testing and training
testing = np.tile([True, False], 50) # testing = [True,False,True,False,True,False...]
# Training is the negation of testing: i.e., datapoints not used for testing,
# will be used for training
training = ~testing
model = fit_model(features[training], is_virginica[training])
train_accuracy = accuracy(features[training], is_virginica[training], model)
test_accuracy = accuracy(features[testing], is_virginica[testing], model)
print('''\
Training accuracy was {0:.1%}.
Testing accuracy was {1:.1%} (N = {2}).
'''.format(train_accuracy, test_accuracy, testing.sum()))
| mit |
zingale/pyro2 | logo/logo.py | 1 | 3223 | import numpy as np
import matplotlib.pyplot as plt
logo_text = """
XXX X X X XX XX
X X X X XX X X
X X X X X X X
XXX XXX X XX
X X
X X
"""
class LogoGrid:
def __init__(self, mask):
logo_lines = mask.split("\n")
self.nx = len(logo_lines[1])
self.ny = len(logo_lines)
self.xl = np.arange(self.nx)
self.xr = np.arange(self.nx) + 1.0
self.x = np.arange(self.nx) + 0.5
self.yl = np.flip(np.arange(self.ny))
self.yr = np.flip(np.arange(self.ny) + 1.0)
self.y = np.flip(np.arange(self.ny) + 0.5)
self.dx = 1
self.dy = 1
self.xmin = self.xl.min()
self.xmax = self.xr.max()
self.ymin = self.yl.min()
self.ymax = self.yr.max()
#print("xmin, xmax", self.xmin, self.xmax)
#print("ymin, ymax", self.ymin, self.ymax)
self.logo = np.zeros((self.nx, self.ny))
for i in range(1, self.nx):
for j in range(1, self.ny-1):
if logo_lines[j][i] == "X":
self.logo[i,j] = 1.0
def draw_grid(self):
# vertical lines
for i in range(0, self.nx):
plt.plot([self.xl[i], self.xl[i]], [self.ymin-0.5*self.dy, self.ymax+0.5*self.dy],
color="C0", lw=3)
plt.plot([self.xr[self.nx-1], self.xr[self.nx-1]],
[self.ymin-0.5*self.dy, self.ymax+0.5*self.dy],
color="C0", lw=3)
# horizontal lines
for j in range(0, self.ny):
plt.plot([self.xmin-0.5*self.dx, self.xmax+0.5*self.dx], [self.yl[j], self.yl[j]],
color="C0", lw=3)
plt.plot([self.xmin-0.5*self.dx, self.xmax+0.5*self.dx],
[self.yr[0], self.yr[0]],
color="C0", lw=3)
def fill_in_logo(self):
for j in range(self.ny):
for i in range(self.nx):
if self.logo[i,j] == 0.0:
continue
plt.fill([self.xl[i], self.xl[i], self.xr[i], self.xr[i], self.xl[i]],
[self.yl[j], self.yr[j], self.yr[j], self.yl[j], self.yl[j]],
color="k", alpha=0.8)
def fill_in_background(self):
xx = np.linspace(self.xmin, self.xmax, 200, endpoint=True)
yy = np.linspace(self.ymin, self.ymax, 200, endpoint=True)
x, y = np.meshgrid(xx, yy)
# a funky function
ff = self._func(x, y)
plt.imshow(ff, extent=[self.xmin, self.xmax, self.ymin, self.ymax], alpha=0.25, cmap="plasma")
def _func(self, x, y):
return (1 + 0.5*(1.0 + np.tanh(y - 0.5*(self.ymin + self.ymax)))) * np.cos(3*np.pi*x/(self.xmax-self.xmin))
lg = LogoGrid(logo_text)
lg.draw_grid()
lg.fill_in_logo()
lg.fill_in_background()
#plt.subplots_adjust(0,0,1,1,0,0)
ax = plt.gca()
ax.set_axis_off()
ax.set_aspect("equal", "datalim")
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
fig = plt.gcf()
fig.set_size_inches(int(lg.nx+1), int(lg.ny+1))
plt.margins(0.0)
#plt.tight_layout()
plt.savefig("pyro_logo.svg", bbox_inches="tight", pad_inches=0)
| bsd-3-clause |
saimn/astropy | astropy/visualization/units.py | 8 | 3945 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
__doctest_skip__ = ['quantity_support']
def quantity_support(format='latex_inline'):
"""
Enable support for plotting `astropy.units.Quantity` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.quantity_support():
... plt.figure()
... plt.plot([1, 2, 3] * u.m)
[...]
... plt.plot([101, 125, 150] * u.cm)
[...]
... plt.draw()
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to ``latex_inline``.
"""
from astropy import units as u
# import Angle just so we have a more or less complete list of Quantity
# subclasses loaded - matplotlib needs them all separately!
# NOTE: in matplotlib >=3.2, subclasses will be recognized automatically,
# and once that becomes our minimum version, we can remove this,
# adding just u.Quantity itself to the registry.
from astropy.coordinates import Angle # noqa
from matplotlib import units
from matplotlib import ticker
# Get all subclass for Quantity, since matplotlib checks on class,
# not subclass.
def all_issubclass(cls):
return {cls}.union(
[s for c in cls.__subclasses__() for s in all_issubclass(c)])
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return 'π/2'
elif n == 2:
return 'π'
elif n % 2 == 0:
return f'{n / 2}π'
else:
return f'{n}π/2'
class MplQuantityConverter(units.ConversionInterface):
_all_issubclass_quantity = all_issubclass(u.Quantity)
def __init__(self):
# Keep track of original converter in case the context manager is
# used in a nested way.
self._original_converter = {}
for cls in self._all_issubclass_quantity:
self._original_converter[cls] = units.registry.get(cls)
units.registry[cls] = self
@staticmethod
def axisinfo(unit, axis):
if unit == u.radian:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.to_string(),
)
elif unit == u.degree:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter('%i°'),
label=unit.to_string(),
)
elif unit is not None:
return units.AxisInfo(label=unit.to_string(format))
return None
@staticmethod
def convert(val, unit, axis):
if isinstance(val, u.Quantity):
return val.to_value(unit)
elif isinstance(val, list) and val and isinstance(val[0], u.Quantity):
return [v.to_value(unit) for v in val]
else:
return val
@staticmethod
def default_units(x, axis):
if hasattr(x, 'unit'):
return x.unit
return None
def __enter__(self):
return self
def __exit__(self, type, value, tb):
for cls in self._all_issubclass_quantity:
if self._original_converter[cls] is None:
del units.registry[cls]
else:
units.registry[cls] = self._original_converter[cls]
return MplQuantityConverter()
| bsd-3-clause |
Cadasta/cadasta-platform | cadasta/organization/tests/test_importers.py | 1 | 46203 | import pytest
import pandas as pd
from core.tests.utils.cases import FileStorageTestCase, UserTestCase
from core.messages import SANITIZE_ERROR
from django.contrib.contenttypes.models import ContentType
from django.contrib.gis.geos import LineString, Point, Polygon
from django.core.exceptions import ValidationError
from django.test import TestCase
from jsonattrs.models import Attribute, AttributeType, Schema
from party.models import Party, TenureRelationship
from party.choices import TENURE_RELATIONSHIP_TYPES
from questionnaires.models import Questionnaire
from questionnaires.tests import factories as q_factories
from resources.tests.utils import clear_temp # noqa
from spatial.models import SpatialUnit
from spatial.choices import TYPE_CHOICES
from ..importers import csv, exceptions, validators, xls
from ..importers.base import Importer
from ..tests.factories import ProjectFactory
class BaseImporterTest(UserTestCase, TestCase):
def test_init(self):
project = ProjectFactory.build()
importer = Importer(project)
assert importer.project == project
def test_get_schema_attrs(self):
project = ProjectFactory.create()
importer = Importer(project)
assert len(importer.get_schema_attrs().keys()) == 5
assert importer.get_schema_attrs()['spatial.spatialunit'] == {}
def test_import_data_not_implemented(self):
project = ProjectFactory.create()
importer = Importer(project)
with pytest.raises(NotImplementedError):
importer.import_data(config=None)
def test_get_headers_not_implemented(self):
project = ProjectFactory.create()
importer = Importer(project)
with pytest.raises(NotImplementedError):
importer.get_headers()
def test_cast_to_type(self):
project = ProjectFactory.create()
importer = Importer(project)
val = importer._cast_to_type('1.0', 'integer')
assert type(val) is int
assert val == 1
val = importer._cast_to_type('not an integer', 'integer')
assert type(val) is int
assert val == 0
val = importer._cast_to_type('1', 'decimal')
assert type(val) is float
assert val == 1.0
val = importer._cast_to_type('not a decimal', 'decimal')
assert type(val) is float
assert val == 0.0
def test_map_attrs_to_content_types_with_emoji(self):
project = ProjectFactory.create(current_questionnaire='123abc')
content_type = ContentType.objects.get(
app_label='party', model='party'
)
schema = Schema.objects.create(
content_type=content_type,
selectors=(
project.organization.id, project.id, '123abc', ))
attr_type = AttributeType.objects.get(name='text')
party_hobby_attr = Attribute.objects.create(
schema=schema,
name='party_hobby', long_name='Party Hobby',
attr_type=attr_type, index=0,
required=False, omit=False
)
headers = ['party_type', 'tenure_type', 'party_name', 'party_hobby']
row = ['IN', 'FH', 'John', 'I 💙 🍻']
contenttypes = {
'party.party': {
'type': 'IN',
'attributes': {},
'name': 'John',
'project': project}
}
attributes = ['party::party_hobby']
attr_map = {
'party.party': {
'DEFAULT': {
'party_hobby': (party_hobby_attr, 'party.party', 'Party')
}
}
}
importer = Importer(project=project)
with pytest.raises(ValidationError) as e:
importer._map_attrs_to_content_types(
headers, row, contenttypes, attributes, attr_map)
assert e.value.message == SANITIZE_ERROR
class ImportValidatorTest(TestCase):
def test_validate_invalid_column(self):
config = {
'party_name_field': 'party_name',
'geometry_field': 'location_geometry',
'type': 'csv'
}
headers = ['party_type', 'tenure_type', 'some_field', 'invalid_column']
row = ['IN', 'FH', 'location_geometry']
with pytest.raises(ValidationError) as e:
validators.validate_row(
headers, row, config)
assert e.value.message == "Number of headers and columns do not match."
def test_validate_party_name_field(self):
config = {
'party_name_field': 'party_name',
'party_type_field': 'party_type',
'geometry_field': 'location_geometry',
'type': 'csv'
}
headers = ['party_type', 'tenure_type', 'some_field']
row = ['IN', 'FH', 'location_geometry']
with pytest.raises(ValidationError) as e:
validators.validate_row(
headers, row, config)
assert e.value.message == "No 'party_name' column found."
def test_validate_party_name_field_with_emoji(self):
config = {
'party_name_field': 'party_name',
'party_type_field': 'party_type',
'type': 'csv'
}
headers = ['party_type', 'tenure_type', 'party_name']
row = ['IN', 'FH', 'I 💙 🍻']
with pytest.raises(ValidationError) as e:
validators.validate_row(
headers, row, config)
assert e.value.message == SANITIZE_ERROR
def test_validate_party_type_field(self):
config = {
'party_name_field': 'party_name',
'party_type_field': 'party_type',
'geometry_field': 'location_geometry',
'type': 'csv'
}
headers = ['party_name', 'tenure_type', 'some_field']
row = ['Test Party', 'FH', 'location_geometry']
with pytest.raises(ValidationError) as e:
validators.validate_row(
headers, row, config)
assert e.value.message == "No 'party_type' column found."
def test_validate_tenure_type(self):
config = {
'party_name_field': 'party_name',
'party_type_field': 'party_type',
'geometry_field': 'location_geometry',
'type': 'csv'
}
geometry = 'SRID=4326;POINT (30 10)'
headers = [
'party_name', 'party_type', 'location_geometry']
row = ['Party Name', 'IN', geometry]
with pytest.raises(ValidationError) as e:
validators.validate_row(
headers, row, config)
assert e.value.message == "No 'tenure_type' column found."
def test_validate_geometry_field(self):
config = {
'party_name_field': 'party_name',
'party_type_field': 'party_type',
'geometry_field': 'location_geometry',
'type': 'csv'
}
geometry = 'SRID=4326;POINT (30 10)'
headers = [
'party_name', 'party_type', 'location_geometry_bad']
row = ['Party Name', 'IN', geometry]
with pytest.raises(ValidationError) as e:
validators.validate_row(
headers, row, config)
assert e.value.message == "No 'geometry_field' column found."
def test_validate_location_type_field(self):
config = {
'party_name_field': 'party_name',
'party_type_field': 'party_type',
'geometry_field': 'location_geometry',
'type': 'xls',
'location_type_field': 'location_type'
}
geometry = 'SRID=4326;POINT (30 10)'
headers = [
'party::party_name', 'party::party_type',
'spatialunit::location_geometry',
'spatialunit::bad_location_type_field']
row = ['Party Name', 'IN', geometry, 'PA']
with pytest.raises(ValidationError) as e:
validators.validate_row(
headers, row, config
)
assert e.value.message == "No 'location_type' column found."
def test_validate_geometry(self):
config = {
'party_name_field': 'party_name',
'party_type_field': 'party_type',
'geometry_field': 'location_geometry',
'type': 'csv'
}
geometry = 'SRID=4326;POINT (30 10, Z)'
headers = [
'party_name', 'party_type', 'location_geometry']
row = ['Party Name', 'IN', geometry]
with pytest.raises(ValidationError) as e:
validators.validate_row(
headers, row, config)
assert e.value.message == "Invalid geometry."
def test_validate_empty_geometry(self):
config = {
'party_type_field': 'party_type',
'geometry_field': 'location_geometry',
'type': 'csv'
}
geometry = 'POLYGON EMPTY'
headers = ['party_type', 'location_geometry']
row = ['IN', geometry]
_, _, geo, _, _ = validators.validate_row(headers, row, config)
assert geo.empty is True
def test_validate_location_type_choice(self):
config = {
'party_name_field': 'party_name',
'party_type_field': 'party_type',
'geometry_field': 'location_geometry',
'type': 'csv',
'location_type_field': 'location_type',
'allowed_location_types': [choice[0] for choice in TYPE_CHOICES]
}
geometry = 'SRID=4326;POINT (30 10)'
headers = [
'party_name', 'party_type', 'location_geometry',
'location_type']
row = ['Party Name', 'IN', geometry, 'WRONG']
with pytest.raises(ValidationError) as e:
validators.validate_row(
headers, row, config
)
assert e.value.message == "Invalid location_type: 'WRONG'."
def test_validate_tenure_type_choice(self):
config = {
'party_name_field': 'party_name',
'party_type_field': 'party_type',
'geometry_field': 'location_geometry',
'type': 'csv',
'location_type_field': 'location_type',
'project': ProjectFactory.create(),
'allowed_tenure_types': [t[0] for t in TENURE_RELATIONSHIP_TYPES],
'allowed_location_types': [choice[0] for choice in TYPE_CHOICES]
}
geometry = 'SRID=4326;POINT (30 10)'
headers = [
'party_name', 'party_type', 'location_geometry',
'location_type', 'tenure_type']
row = ['Party Name', 'IN', geometry, 'PA', 'WRONG']
with pytest.raises(ValidationError) as e:
validators.validate_row(
headers, row, config
)
assert e.value.message == "Invalid tenure_type: 'WRONG'."
@pytest.mark.usefixtures('clear_temp')
class CSVImportTest(UserTestCase, FileStorageTestCase, TestCase):
def setUp(self):
super().setUp()
self.valid_csv = '/organization/tests/files/test.csv'
self.geoshape_csv = '/organization/tests/files/test_geoshape.csv'
self.geotrace_csv = '/organization/tests/files/test_geotrace.csv'
self.test_wkt = '/organization/tests/files/test_wkt.csv'
self.test_wkb = '/organization/tests/files/test_wkb.csv'
self.project = ProjectFactory.create(name='Test CSV Import')
xlscontent = self.get_file(
'/organization/tests/files/uttaran_test.xlsx', 'rb')
form = self.storage.save('xls-forms/uttaran_test.xlsx',
xlscontent.read())
xlscontent.close()
Questionnaire.objects.create_from_form(
xls_form=form,
project=self.project
)
# test for expected schema and attribute creation
assert 3 == Schema.objects.all().count()
assert 42 == Attribute.objects.all().count()
self.party_attributes = [
'party::educational_qualification', 'party::name_mouza',
'party::j_l', 'party::name_father_hus', 'party::village_name',
'party::mobile_no', 'party::occupation_hh', 'party::class_hh'
]
self.party_gr_attributes = [
'party::test_group_attr'
]
self.location_attributes = [
'spatialunit::deed_of_land', 'spatialunit::amount_othersland',
'spatialunit::land_calculation', 'spatialunit::how_aquire_landwh',
'spatialunit::female_member', 'spaitalunit::mutation_of_land',
'spatialunit::amount_agriland', 'spatialunit::nid_number',
'spatialunit::how_aquire_landt', 'spatialunit::boundary_conflict',
'spatialunit::dakhal_on_land', 'spatialunit::how_aquire_landp',
'spatialunit::how_aquire_landd', 'spatialunit::ownership_conflict',
'spatialunit::others_conflict', 'spatialunit::how_aquire_landm',
'spatialunit::khatain_of_land', 'spatialunit::male_member',
'spatialunit::how_aquire_landw', 'spatialunit::everything',
'spatialunit::location_problems'
]
self.tenure_attributes = [
'tenurerelationship::tenure_name',
'tenurerelationship::tenure_notes'
]
self.attributes = (
self.party_attributes + self.location_attributes +
self.tenure_attributes
)
def test_get_schema_attrs(self):
importer = csv.CSVImporter(
project=self.project, path=self.path + self.valid_csv)
attrs = importer.get_schema_attrs()
su_attrs = attrs['spatial.spatialunit']['DEFAULT']
pty_attrs_in = attrs['party.party']['IN']
pty_attrs_gr = attrs['party.party']['GR']
pty_attrs_co = attrs['party.party']['CO']
tenure_attrs = attrs['party.tenurerelationship']['DEFAULT']
assert len(su_attrs) == 29
assert len(pty_attrs_in) == 11
assert len(pty_attrs_gr) == 11
assert len(pty_attrs_co) == 11
assert len(tenure_attrs) == 2
def test_get_attribute_map(self):
importer = csv.CSVImporter(
project=self.project, path=self.path + self.valid_csv)
entity_types = ['PT', 'SU']
attr_map, extra_attrs, extra_headers = importer.get_attribute_map(
'csv', entity_types
)
assert len(attr_map.keys()) == 3
assert len(extra_attrs) == 11
assert len(extra_headers) == 10
# assert correct length of conditional selectors
assert len(attr_map['party.party']) == 3
assert len(attr_map['party.party']['CO']) == 8
assert len(attr_map['party.party']['GR']) == 8
assert len(attr_map['party.party']['IN']) == 8
# assert correct length of default selectors
assert len(attr_map['spatial.spatialunit']) == 1
assert len(attr_map['spatial.spatialunit']['DEFAULT']) == 21
assert len(attr_map['party.tenurerelationship']['DEFAULT']) == 2
# assert correctness of attribute map contents
(attr, content_type, label) = attr_map['party.party'][
'CO']['name_mouza']
assert isinstance(attr, Attribute)
assert attr.name == 'name_mouza'
assert content_type == 'party.party'
assert label == 'Party'
def test_get_attribute_map_parties_only(self):
importer = csv.CSVImporter(
project=self.project, path=self.path + self.valid_csv)
entity_types = ['PT']
attr_map, extra_attrs, extra_headers = importer.get_attribute_map(
'csv', entity_types
)
assert len(attr_map.keys()) == 1
assert len(extra_attrs) == 3
assert len(extra_headers) == 10
# assert correct length of conditional selectors
assert len(attr_map['party.party']) == 3
assert len(attr_map['party.party']['CO']) == 8
assert len(attr_map['party.party']['GR']) == 8
assert len(attr_map['party.party']['IN']) == 8
# assert correctness of attribute map contents
(attr, content_type, label) = attr_map['party.party'][
'CO']['name_mouza']
assert isinstance(attr, Attribute)
assert attr.name == 'name_mouza'
assert content_type == 'party.party'
assert label == 'Party'
def test_get_flattened_attribute_map(self):
importer = csv.CSVImporter(
project=self.project, path=self.path + self.valid_csv)
entity_types = ['PT', 'SU']
attr_map, extra_attrs, extra_headers = importer.get_attribute_map(
'csv', entity_types, flatten=True
)
party_attrs = attr_map['party']
su_attrs = attr_map['spatialunit']
tenure_attrs = attr_map['tenurerelationship']
assert len(party_attrs) == 8
assert len(su_attrs) == 21
assert len(tenure_attrs) == 2
def test_import_data(self):
importer = csv.CSVImporter(
project=self.project, path=self.path + self.valid_csv)
config = {
'file': self.path + self.valid_csv,
'entity_types': ['PT', 'SU'],
'party_name_field': 'name_of_hh',
'party_type_field': 'party_type',
'location_type_field': 'location_type',
'geometry_field': 'location_geometry',
'attributes': self.attributes,
'project': self.project,
'allowed_tenure_types': [t[0] for t in TENURE_RELATIONSHIP_TYPES],
'allowed_location_types': [choice[0] for choice in TYPE_CHOICES]
}
importer.import_data(config)
assert Party.objects.all().count() == 10
assert SpatialUnit.objects.all().count() == 10
assert TenureRelationship.objects.all().count() == 10
for su in SpatialUnit.objects.filter(project_id=self.project.pk).all():
if su.geometry is not None:
assert type(su.geometry) is Point
# test spatial unit attribute creation
sus = SpatialUnit.objects.filter(
attributes__contains={'nid_number': '3913647224045'})
assert len(sus) == 1
su = sus[0]
assert su.type == 'PA'
assert len(su.attributes) == 20
assert 'how_aquire_landwh' not in su.attributes.keys()
assert 'how_aquire_landw' in su.attributes.keys()
assert su.attributes[
'others_conflict'] == ('কিছু বেখলে অাছে জোর করে দখল করে ভোগ করে '
'অন্য জমির মালক।')
assert su.attributes['female_member'] == 4
assert su.attributes['location_problems'] == [
'conflict', 'risk_of_eviction'
]
# test party attribute creation
parties = Party.objects.filter(
attributes__contains={'Mobile_No': '০১৭৭২৫৬০১৯১'})
assert len(parties) == 1
assert parties[0].type == 'IN'
pty_attrs = parties[0].attributes
assert len(pty_attrs) == 8
assert pty_attrs['name_father_hus'] == 'মৃত কুব্বাত মন্ডল'
# test tenure relationship attribute creation
tenure_relationships = TenureRelationship.objects.filter(
party=parties[0])
tr_attrs = {'tenure_name': 'Customary', 'tenure_notes': 'a few notes'}
assert len(tenure_relationships) == 1
assert len(tenure_relationships[0].attributes) == 2
assert tenure_relationships[0].attributes == tr_attrs
def test_import_parties_only(self):
importer = csv.CSVImporter(
project=self.project, path=self.path + self.valid_csv)
config = {
'file': self.path + self.valid_csv,
'entity_types': ['PT'],
'party_name_field': 'name_of_hh',
'party_type_field': 'party_type',
'attributes': self.party_attributes,
'project': self.project
}
importer.import_data(config)
assert Party.objects.all().count() == 10
assert SpatialUnit.objects.all().count() == 0
assert TenureRelationship.objects.all().count() == 0
# test party attribute creation
parties = Party.objects.filter(
attributes__contains={'Mobile_No': '০১৭৭২৫৬০১৯১'})
assert len(parties) == 1
pty_attrs = parties[0].attributes
assert len(pty_attrs) == 8
assert pty_attrs['name_father_hus'] == 'মৃত কুব্বাত মন্ডল'
def test_import_locations_only(self):
importer = csv.CSVImporter(
project=self.project, path=self.path + self.valid_csv)
config = {
'file': self.path + self.valid_csv,
'entity_types': ['SU'],
'location_type_field': 'location_type',
'geometry_field': 'location_geometry',
'attributes': self.location_attributes,
'project': self.project,
'allowed_location_types': [choice[0] for choice in TYPE_CHOICES]
}
importer.import_data(config)
assert Party.objects.all().count() == 0
assert SpatialUnit.objects.all().count() == 10
assert TenureRelationship.objects.all().count() == 0
for su in SpatialUnit.objects.filter(project_id=self.project.pk).all():
if su.geometry is not None:
assert type(su.geometry) is Point
# test spatial unit attribute creation
sus = SpatialUnit.objects.filter(
attributes__contains={'nid_number': '3913647224045'})
assert len(sus) == 1
su = sus[0]
assert su.type == 'PA'
assert len(su.attributes) == 20
assert 'how_aquire_landwh' not in su.attributes.keys()
assert 'how_aquire_landw' in su.attributes.keys()
assert su.attributes[
'others_conflict'] == ('কিছু বেখলে অাছে জোর করে দখল করে ভোগ করে '
'অন্য জমির মালক।')
assert su.attributes['female_member'] == 4
assert su.attributes['location_problems'] == [
'conflict', 'risk_of_eviction'
]
def test_import_with_geoshape(self):
importer = csv.CSVImporter(
project=self.project, path=self.path + self.geoshape_csv)
config = {
'file': self.path + self.geoshape_csv,
'entity_types': ['SU', 'PT'],
'party_name_field': 'name_of_hh',
'party_type_field': 'party_type',
'location_type_field': 'location_type',
'geometry_field': 'location_geoshape',
'attributes': self.attributes,
'project': self.project,
'allowed_tenure_types': [t[0] for t in TENURE_RELATIONSHIP_TYPES],
'allowed_location_types': [choice[0] for choice in TYPE_CHOICES]
}
importer.import_data(config)
assert Party.objects.all().count() == 10
assert SpatialUnit.objects.all().count() == 10
assert TenureRelationship.objects.all().count() == 10
for su in SpatialUnit.objects.filter(project_id=self.project.pk).all():
if su.geometry is not None:
assert type(su.geometry) is Polygon
def test_import_with_geotrace(self):
importer = csv.CSVImporter(
project=self.project, path=self.path + self.geotrace_csv)
config = {
'file': self.path + self.geotrace_csv,
'entity_types': ['SU', 'PT'],
'party_name_field': 'name_of_hh',
'party_type_field': 'party_type',
'location_type_field': 'location_type',
'geometry_field': 'location_geotrace',
'attributes': self.attributes,
'project': self.project,
'allowed_tenure_types': [t[0] for t in TENURE_RELATIONSHIP_TYPES],
'allowed_location_types': [choice[0] for choice in TYPE_CHOICES]
}
importer.import_data(config)
assert Party.objects.all().count() == 10
assert SpatialUnit.objects.all().count() == 10
assert TenureRelationship.objects.all().count() == 10
for su in SpatialUnit.objects.filter(project_id=self.project.pk).all():
if su.geometry is not None:
assert type(su.geometry) is LineString
def _run_import_test(self, filename):
importer = csv.CSVImporter(
project=self.project, path=self.path + filename)
config = {
'file': self.path + filename,
'entity_types': ['SU', 'PT'],
'party_name_field': 'name_of_hh',
'party_type_field': 'party_type',
'location_type_field': 'location_type',
'geometry_field': 'location_geometry',
'attributes': self.attributes,
'project': self.project,
'allowed_tenure_types': [t[0] for t in TENURE_RELATIONSHIP_TYPES],
'allowed_location_types': [choice[0] for choice in TYPE_CHOICES]
}
importer.import_data(config)
assert Party.objects.all().count() == 10
assert SpatialUnit.objects.all().count() == 10
assert TenureRelationship.objects.all().count() == 10
su1 = SpatialUnit.objects.filter(
attributes__contains={'nid_number': '3913647224045'}).first()
su2 = SpatialUnit.objects.filter(
attributes__contains={'nid_number': '3913647224033'}).first()
su3 = SpatialUnit.objects.filter(
attributes__contains={'nid_number': '3913647225965'}).first()
su4 = SpatialUnit.objects.filter(
attributes__contains={'nid_number': '3913647224043'}).first()
su5 = SpatialUnit.objects.filter(
attributes__contains={'nid_number': '3913647224044'}).first()
su6 = SpatialUnit.objects.filter(
attributes__contains={'nid_number': '3913647224185'}).first()
assert su1.geometry.geom_type == 'Point'
assert su2.geometry.geom_type == 'LineString'
assert su3.geometry.geom_type == 'Polygon'
assert su4.geometry.geom_type == 'MultiPoint'
assert su5.geometry.geom_type == 'MultiLineString'
assert su6.geometry.geom_type == 'MultiPolygon'
def test_import_with_wkt(self):
self._run_import_test(self.test_wkt)
def test_import_with_wkb(self):
self._run_import_test(self.test_wkb)
class XLSImportTest(UserTestCase, FileStorageTestCase, TestCase):
def setUp(self):
super().setUp()
self.valid_xls = '/organization/tests/files/test_download.xlsx'
self.one_to_many_xls = (
'/organization/tests/files/test_one_to_many.xlsx')
self.project = ProjectFactory.create(name='Test CSV Import')
xlscontent = self.get_file(
'/organization/tests/files/uttaran_test.xlsx', 'rb')
form = self.storage.save('xls-forms/uttaran_test.xlsx',
xlscontent.read())
xlscontent.close()
Questionnaire.objects.create_from_form(
xls_form=form,
project=self.project
)
# test for expected schema and attribute creation
assert 3 == Schema.objects.all().count()
assert 42 == Attribute.objects.all().count()
self.party_attributes = [
'party::educational_qualification', 'party::name_mouza',
'party::j_l', 'party::name_father_hus', 'party::village_name',
'party::mobile_no', 'party::occupation_hh', 'party::class_hh'
]
self.location_attributes = [
'spatialunit::deed_of_land', 'spatialunit::amount_othersland',
'spatialunit::land_calculation', 'spatialunit::how_aquire_landwh',
'spatialunit::female_member', 'spaitalunit::mutation_of_land',
'spatialunit::amount_agriland', 'spatialunit::nid_number',
'spatialunit::how_aquire_landt', 'spatialunit::boundary_conflict',
'spatialunit::dakhal_on_land', 'spatialunit::how_aquire_landp',
'spatialunit::how_aquire_landd', 'spatialunit::ownership_conflict',
'spatialunit::others_conflict', 'spatialunit::how_aquire_landm',
'spatialunit::khatain_of_land', 'spatialunit::male_member',
'spatialunit::how_aquire_landw', 'spatialunit::everything',
'spatialunit::location_problems',
'spatialunit::multiple_landowners'
]
self.tenure_attributes = [
'tenurerelationship::tenure_name',
'tenurerelationship::tenure_notes'
]
self.attributes = (
self.party_attributes + self.location_attributes +
self.tenure_attributes
)
def test_import_data(self):
importer = xls.XLSImporter(
project=self.project, path=self.path + self.valid_xls)
config = {
'file': self.path + self.valid_xls,
'type': 'xls',
'entity_types': ['SU', 'PT'],
'party_name_field': 'name',
'party_type_field': 'type',
'location_type_field': 'type',
'geometry_field': 'geometry.ewkt',
'attributes': self.attributes,
'project': self.project,
'allowed_tenure_types': [t[0] for t in TENURE_RELATIONSHIP_TYPES],
'allowed_location_types': [choice[0] for choice in TYPE_CHOICES]
}
importer.import_data(config)
assert Party.objects.all().count() == 10
assert SpatialUnit.objects.all().count() == 10
assert TenureRelationship.objects.all().count() == 10
for su in SpatialUnit.objects.filter(project_id=self.project.pk).all():
if su.geometry is not None:
assert type(su.geometry) is Point
# test spatial unit attribute creation
sus = SpatialUnit.objects.filter(
attributes__contains={'nid_number': '3913647224045'})
assert len(sus) == 1
su = sus[0]
assert su.type == 'PA'
assert len(su.attributes) == 20
assert 'how_aquire_landwh' not in su.attributes.keys()
assert 'how_aquire_landw' in su.attributes.keys()
assert su.attributes[
'others_conflict'] == ('কিছু বেখলে অাছে জোর করে দখল করে ভোগ করে '
'অন্য জমির মালক।')
assert su.attributes['female_member'] == 4
assert su.attributes['location_problems'] == [
'conflict', 'risk_of_eviction'
]
# test party attribute creation
parties = Party.objects.filter(
attributes__contains={'Mobile_No': '০১৭৭২৫৬০১৯১'})
assert len(parties) == 1
assert parties[0].type == 'IN'
pty_attrs = parties[0].attributes
assert len(pty_attrs) == 8
assert pty_attrs['name_father_hus'] == 'মৃত কুব্বাত মন্ডল'
# test tenure relationship attribute creation
tenure_relationships = TenureRelationship.objects.filter(
party=parties[0])
tr_attrs = {'tenure_name': 'Customary', 'tenure_notes': 'a few notes'}
assert len(tenure_relationships) == 1
assert len(tenure_relationships[0].attributes) == 2
assert tenure_relationships[0].attributes == tr_attrs
def test_import_locations_only(self):
importer = xls.XLSImporter(
project=self.project, path=self.path + self.valid_xls)
config = {
'file': self.path + self.valid_xls,
'type': 'xls',
'entity_types': ['SU'],
'location_type_field': 'type',
'geometry_field': 'geometry.ewkt',
'attributes': self.location_attributes,
'project': self.project,
'allowed_location_types': [choice[0] for choice in TYPE_CHOICES]
}
importer.import_data(config)
assert Party.objects.all().count() == 0
assert SpatialUnit.objects.all().count() == 10
assert TenureRelationship.objects.all().count() == 0
for su in SpatialUnit.objects.filter(project_id=self.project.pk).all():
if su.geometry is not None:
assert type(su.geometry) is Point
# test spatial unit attribute creation
sus = SpatialUnit.objects.filter(
attributes__contains={'nid_number': '3913647224045'})
assert len(sus) == 1
su = sus[0]
assert len(su.attributes) == 20
assert 'how_aquire_landwh' not in su.attributes.keys()
assert 'how_aquire_landw' in su.attributes.keys()
assert su.attributes[
'others_conflict'] == ('কিছু বেখলে অাছে জোর করে দখল করে ভোগ করে '
'অন্য জমির মালক।')
assert su.attributes['female_member'] == 4
assert su.attributes['location_problems'] == [
'conflict', 'risk_of_eviction'
]
def test_import_parties_only(self):
importer = xls.XLSImporter(
project=self.project, path=self.path + self.valid_xls)
config = {
'file': self.path + self.valid_xls,
'type': 'xls',
'entity_types': ['PT'],
'party_name_field': 'name',
'party_type_field': 'type',
'attributes': self.party_attributes,
'project': self.project
}
importer.import_data(config)
assert Party.objects.all().count() == 10
assert SpatialUnit.objects.all().count() == 0
assert TenureRelationship.objects.all().count() == 0
# test party attribute creation
parties = Party.objects.filter(
attributes__contains={'Mobile_No': '০১৭৭২৫৬০১৯১'})
assert len(parties) == 1
assert parties[0].type == 'IN'
pty_attrs = parties[0].attributes
assert len(pty_attrs) == 8
assert pty_attrs['name_father_hus'] == 'মৃত কুব্বাত মন্ডল'
def test_one_to_many_relationships(self):
importer = xls.XLSImporter(
project=self.project, path=self.path + self.one_to_many_xls)
config = {
'file': self.path + self.one_to_many_xls,
'type': 'xls',
'entity_types': ['SU', 'PT'],
'party_name_field': 'name',
'party_type_field': 'type',
'location_type_field': 'type',
'geometry_field': 'geometry.ewkt',
'attributes': self.attributes,
'project': self.project,
'allowed_tenure_types': [t[0] for t in TENURE_RELATIONSHIP_TYPES],
'allowed_location_types': [choice[0] for choice in TYPE_CHOICES]
}
importer.import_data(config)
assert Party.objects.all().count() == 10
assert SpatialUnit.objects.all().count() == 9
assert TenureRelationship.objects.all().count() == 6
# test one-to-many location-party
sus = SpatialUnit.objects.filter(
attributes__contains={'multiple_landowners': 'yes'})
assert sus.count() == 1
su = sus[0]
assert len(su.tenure_relationships.all()) == 2
# test one-to-many party-location
parties = Party.objects.filter(name='অাব্দুল জলিল মন্ডল')
assert parties.count() == 1
party = parties[0]
assert party.tenure_relationships.all().count() == 3
def test_missing_relationship_tab(self):
df = pd.read_excel(self.path + self.valid_xls, sheetname=None)
del df['relationships']
entity_types = ['SU', 'PT']
with pytest.raises(exceptions.DataImportError) as e:
xls.get_csv_from_dataframe(df, entity_types)
assert e is not None
assert str(e.value) == (
"Error importing file: Missing 'relationships' worksheet."
)
def test_empty_party_data(self):
df = pd.read_excel(self.path + self.valid_xls, sheetname=None)
empty_parties = pd.DataFrame()
df['parties'] = empty_parties
entity_types = ['SU', 'PT']
with pytest.raises(exceptions.DataImportError) as e:
xls.get_csv_from_dataframe(df, entity_types)
assert e is not None
assert str(e.value) == (
'Error importing file: Empty worksheet.'
)
def test_invalid_entity_type(self):
df = pd.read_excel(self.path + self.valid_xls, sheetname=None)
entity_types = ['INVALID']
with pytest.raises(exceptions.DataImportError) as e:
xls.get_csv_from_dataframe(df, entity_types)
assert e is not None
assert str(e.value) == (
'Error importing file: Unsupported import format.'
)
class ImportConditionalAttributesTest(UserTestCase, FileStorageTestCase,
TestCase):
def setUp(self):
super().setUp()
self.conditionals_csv = (
'/organization/tests/files/test_conditionals.csv')
self.conditionals_xls = (
'/organization/tests/files/test_conditionals.xlsx')
self.project = ProjectFactory.create(current_questionnaire='123abc')
q_factories.QuestionnaireFactory.create(id='123abc',
project=self.project)
question = q_factories.QuestionFactory.create(
questionnaire_id='123abc',
type='S1',
name='tenure_type',
label={'en': 'Type', 'de': 'Typ'})
q_factories.QuestionOptionFactory.create(question=question, name='CU')
q_factories.QuestionOptionFactory.create(question=question, name='WR')
q_factories.QuestionOptionFactory.create(question=question, name='LH')
q_factories.QuestionOptionFactory.create(question=question, name='FH')
self.spatial_content_type = ContentType.objects.get(
app_label='spatial', model='spatialunit'
)
sp_schema = Schema.objects.create(
content_type=self.spatial_content_type,
selectors=(
self.project.organization.id, self.project.id, '123abc', ))
attr_type = AttributeType.objects.get(name='text')
Attribute.objects.create(
schema=sp_schema,
name='su_attr', long_name='Test field',
attr_type=attr_type, index=0,
required=False, omit=False
)
self.party_content_type = ContentType.objects.get(
app_label='party', model='party'
)
pt_schema = Schema.objects.create(
content_type=self.party_content_type,
selectors=(
self.project.organization.id, self.project.id, '123abc', ))
pt_schema_in = Schema.objects.create(
content_type=self.party_content_type,
selectors=(
self.project.organization.id, self.project.id, '123abc', 'IN'))
pt_schema_gr = Schema.objects.create(
content_type=self.party_content_type,
selectors=(
self.project.organization.id, self.project.id, '123abc', 'GR'))
attr_type_txt = AttributeType.objects.get(name='text')
attr_type_int = AttributeType.objects.get(name='integer')
attr_type_dec = AttributeType.objects.get(name='decimal')
Attribute.objects.create(
schema=pt_schema,
name='default_attr', long_name='Test field',
attr_type=attr_type_txt, index=0,
required=False, omit=False
)
Attribute.objects.create(
schema=pt_schema,
name='default_int_attr', long_name='Test integer field',
attr_type=attr_type_int, index=1,
required=False, omit=False
)
Attribute.objects.create(
schema=pt_schema_in,
name='party_in', long_name='Test IN field',
attr_type=attr_type_txt, index=0,
required=False, omit=False
)
Attribute.objects.create(
schema=pt_schema_gr,
name='party_gr', long_name='Test GR field',
attr_type=attr_type_txt, index=0,
required=False, omit=False
)
Attribute.objects.create(
schema=pt_schema_gr,
name='party_gr_dec', long_name='Test GR dec field',
attr_type=attr_type_dec, index=1,
required=False, omit=False
)
self.party_default_attributes = [
'party::default_attr', 'party::default_int_attr'
]
self.party_in_attributes = [
'party::party_in'
]
self.party_gr_attributes = [
'party::party_gr', 'party::party_gr_dec'
]
self.party_attributes = (
self.party_default_attributes + self.party_in_attributes +
self.party_gr_attributes
)
self.location_attributes = ['spatialunit::su_attr']
self.attributes = (
self.location_attributes + self.party_attributes
)
def test_get_flattened_attribute_map(self):
importer = csv.CSVImporter(
project=self.project, path=self.path + self.conditionals_csv)
entity_types = ['PT', 'SU']
attr_map, extra_attrs, extra_headers = importer.get_attribute_map(
'csv', entity_types, flatten=True
)
party_attrs = attr_map['party']
assert len(party_attrs) == 5
def test_import_csv(self):
importer = csv.CSVImporter(
project=self.project, path=self.path + self.conditionals_csv)
config = {
'file': self.path + self.conditionals_csv,
'entity_types': ['PT', 'SU'],
'party_name_field': 'party_name',
'party_type_field': 'party_type',
'location_type_field': 'location_type',
'geometry_field': 'location_geometry',
'attributes': self.attributes,
'project': self.project,
'allowed_tenure_types': [t[0] for t in TENURE_RELATIONSHIP_TYPES],
'allowed_location_types': [choice[0] for choice in TYPE_CHOICES]
}
importer.import_data(config)
assert Party.objects.all().count() == 2
assert SpatialUnit.objects.all().count() == 2
assert TenureRelationship.objects.all().count() == 2
for su in SpatialUnit.objects.filter(project_id=self.project.pk).all():
if su.geometry is not None:
assert type(su.geometry) is Point
# test spatial unit attribute creation
sus = SpatialUnit.objects.filter(
attributes__contains={'su_attr': 'some value'})
assert len(sus) == 1
su = sus[0]
assert su.type == 'PA'
assert len(su.attributes) == 1
# test party attribute creation
party_in = Party.objects.filter(type='IN').first()
assert party_in.name == 'Test Party'
pty_attrs = party_in.attributes
assert len(pty_attrs) == 3
assert pty_attrs['default_attr'] == 'test default attr in'
assert pty_attrs['default_int_attr'] == 10
assert pty_attrs['party_in'] == 'some individual value'
party_gr = Party.objects.filter(type='GR').first()
assert party_gr.name == 'Another Test Party'
pty_attrs = party_gr.attributes
assert len(pty_attrs) == 4
assert pty_attrs['default_attr'] == 'test default attr gr'
assert pty_attrs['default_int_attr'] == 20
assert pty_attrs['party_gr'] == 'some group value'
assert pty_attrs['party_gr_dec'] == 2.00
# test tenure relationship creation
tenure_relationships = TenureRelationship.objects.all()
assert len(tenure_relationships) == 2
def test_import_xlsx(self):
importer = xls.XLSImporter(
project=self.project, path=self.path + self.conditionals_xls)
config = {
'file': self.path + self.conditionals_xls,
'type': 'xls',
'entity_types': ['SU', 'PT'],
'party_name_field': 'name',
'party_type_field': 'type',
'location_type_field': 'type',
'geometry_field': 'geometry.ewkt',
'attributes': self.attributes,
'project': self.project,
'allowed_tenure_types': [t[0] for t in TENURE_RELATIONSHIP_TYPES],
'allowed_location_types': [choice[0] for choice in TYPE_CHOICES]
}
importer.import_data(config)
assert Party.objects.all().count() == 2
assert SpatialUnit.objects.all().count() == 2
assert TenureRelationship.objects.all().count() == 2
for su in SpatialUnit.objects.filter(project_id=self.project.pk).all():
if su.geometry is not None:
assert type(su.geometry) is Point
# test spatial unit attribute creation
sus = SpatialUnit.objects.filter(
attributes__contains={'su_attr': 'some value'})
assert len(sus) == 1
su = sus[0]
assert su.type == 'PA'
assert len(su.attributes) == 1
# test party attribute creation
party_in = Party.objects.filter(type='IN').first()
assert party_in.name == 'Another Test Party'
pty_attrs = party_in.attributes
assert len(pty_attrs) == 3
assert pty_attrs['default_attr'] == 'test default attr in'
assert pty_attrs['default_int_attr'] == 10
assert pty_attrs['party_in'] == 'some individual value'
party_gr = Party.objects.filter(type='GR').first()
assert party_gr.name == 'Test Party'
pty_attrs = party_gr.attributes
assert len(pty_attrs) == 4
assert pty_attrs['default_attr'] == 'test default attr gr'
assert pty_attrs['default_int_attr'] == 20
assert pty_attrs['party_gr'] == 'some group value'
assert pty_attrs['party_gr_dec'] == 2.0
# test tenure relationship creation
tenure_relationships = TenureRelationship.objects.all()
assert len(tenure_relationships) == 2
| agpl-3.0 |
louisdijkstra/gonl-sv | back-up/version3/sample.py | 1 | 23439 | #!/usr/bin/env python
"""
Copyright (C) 2015 Louis Dijkstra
This file is part of gonl-sv
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import operator
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import pyplot
from random import choice
import scipy as sp
import scipy.stats
# Global variables
LD_THRES = .8 # R^2 threshold for a deletion and SNP to be considered 'related'
D_TSS_THRES = 1000
D_SNP_DEL_THRES = 100000
AF_THRES = 0.05
N_SAMPLES = 1000 # number of samples
def snpsSimilar (hit_snp, nonhit_snp, af_thres = AF_THRES, d_tss_thres = D_TSS_THRES):
if hit_snp['type'] == nonhit_snp['type'] and abs(hit_snp['d_tss'] - nonhit_snp['d_tss']) <= d_tss_thres:
if abs(hit_snp['af'] - nonhit_snp['af']) <= af_thres or abs(hit_snp['af'] - (1 - nonhit_snp['af'])) <= af_thres:
return True
return False
def returnControlIndices (hit_snps, nonhit_snps, af_thres = AF_THRES, d_tss_thres = D_TSS_THRES):
control_indices = [] # will contain lists of indices. Each list contains indices for potential control snps
for hit_snp in hit_snps:
similar_nonhitsnps = []
for i in range(len(nonhit_snps)):
if snpsSimilar(hit_snp, nonhit_snps[i], af_thres, d_tss_thres):
similar_nonhitsnps.append(i)
if len(similar_nonhitsnps) == 0:
print 'WARNING: no matching non-hit-SNP found. Constraints too stringent'
control_indices.append(similar_nonhitsnps)
return control_indices
def snpsSimilar3 (hit_snp, nonhit_snp, af_thres = AF_THRES, d_tss_thres = D_TSS_THRES):
if hit_snp['type'] == nonhit_snp['type'] and abs(hit_snp['d_tss'] - nonhit_snp['d_tss']) <= d_tss_thres:
if abs(hit_snp['af'] - nonhit_snp['af']) <= af_thres or abs(hit_snp['af'] - (1 - nonhit_snp['af'])) <= af_thres:
if abs(hit_snp['d_snp_del'] - nonhit_snp['d_snp_del']) <= D_SNP_DEL_THRES:
return True
return False
def returnControlIndices3 (hit_snps, nonhit_snps, af_thres = AF_THRES, d_tss_thres = D_TSS_THRES):
control_indices = [] # will contain lists of indices. Each list contains indices for potential control snps
for hit_snp in hit_snps:
similar_nonhitsnps = []
for i in range(len(nonhit_snps)):
if snpsSimilar3(hit_snp, nonhit_snps[i], af_thres, d_tss_thres):
similar_nonhitsnps.append(i)
if len(similar_nonhitsnps) == 0:
print 'WARNING: no matching non-hit-SNP found. Constraints too stringent'
control_indices.append(similar_nonhitsnps)
return control_indices
def sampleChromosome3 (chromosome, ld_thres = LD_THRES, d_tss_thres = D_TSS_THRES, af_thres = AF_THRES, n_samples = N_SAMPLES):
[hit_snps, nonhit_snps] = readSNPData(chromosome)
n_hit_snps = len(hit_snps)
# control_indices contain lists of indices. Each list contains indices for potential control snps
control_indices = returnControlIndices (hit_snps, nonhit_snps, af_thres, d_tss_thres)
sample_results = [0.0] * n_samples
for sample in range(n_samples):
for i in range(n_hit_snps):
if len(control_indices[i]) != 0:
hit_snp = hit_snps[i]
nonhit_snp = nonhit_snps[choice(control_indices[i])] # randomly select a similar nonhit SNP
if abs(hit_snp['af'] - nonhit_snp['af']) <= abs(hit_snp['af'] - (1 - nonhit_snp['af'])):
if nonhit_snp['r'] >= .84:
sample_results[sample] = sample_results[sample] + 1
else:
if -1 * nonhit_snp['r'] >= .84:
sample_results[sample] = sample_results[sample] + 1
case = 0.0
for hit_snp in hit_snps:
if hit_snp['r'] >= .84:
case = case + 1
return [n_hit_snps, case, sample_results]
def plotSampleResults3 (case, control, n_samples = N_SAMPLES):
bins = np.linspace(min(control),max(control),50)
w = np.ones_like(control) / len(control)
plt.hist(control, bins, weights=w, facecolor='blue', alpha=1)
plt.axvline(x=case, color='r', linewidth=4)
plt.xlabel(r'Percentage of SNPs with $R^2 > \ 0.7$')
plt.ylabel('Probability')
plt.show()
def sample3(ld_thres = LD_THRES, d_tss_thres = D_TSS_THRES, af_thres = AF_THRES, n_samples = N_SAMPLES):
sample_results = [0.0] * n_samples
case = 0.0
n_hit_snps = 0.0
for chromosome in range(1,23):
print 'Processing chromosome', chromosome
[hits, case_chr, sample_chr] = sampleChromosome3(chromosome, ld_thres, d_tss_thres, af_thres, n_samples)
case = case + case_chr
n_hit_snps = n_hit_snps + hits
sample_results = map(operator.add, sample_results, sample_chr)
print 'Chr ' + str(chromosome) + '\t', hits, case_chr / hits
for i in range(n_samples):
sample_results[i] = sample_results[i] / n_hit_snps
print '\nOVERALL: ', n_hit_snps, case / n_hit_snps, sample_results
plotSampleResults3 (case / n_hit_snps, sample_results)
def sampleChromosome2 (chromosome, ld_thres = LD_THRES, d_tss_thres = D_TSS_THRES, af_thres = AF_THRES, n_samples = N_SAMPLES):
[hit_snps, nonhit_snps] = readSNPData(chromosome)
n_hit_snps = len(hit_snps)
# control_indices contain lists of indices. Each list contains indices for potential control snps
control_indices = returnControlIndices (hit_snps, nonhit_snps, af_thres, d_tss_thres)
sample_results = [0.0] * n_samples
for sample in range(n_samples):
for i in range(n_hit_snps):
if len(control_indices[i]) != 0:
hit_snp = hit_snps[i]
nonhit_snp = nonhit_snps[choice(control_indices[i])] # randomly select a similar nonhit SNP
if abs(hit_snp['af'] - nonhit_snp['af']) <= abs(hit_snp['af'] - (1 - nonhit_snp['af'])):
if nonhit_snp['r'] >= 0:
sample_results[sample] = sample_results[sample] + 1
else:
if -1 * nonhit_snp['r'] >= 0:
sample_results[sample] = sample_results[sample] + 1
case = 0.0
for hit_snp in hit_snps:
if hit_snp['r'] >= 0:
case = case + 1
return [n_hit_snps, case, sample_results]
def sample2(ld_thres = LD_THRES, d_tss_thres = D_TSS_THRES, af_thres = AF_THRES, n_samples = N_SAMPLES):
sample_results = [0.0] * n_samples
case = 0.0
n_hit_snps = 0.0
for chromosome in range(1,23):
print 'Processing chromosome', chromosome
[hits, case_chr, sample_chr] = sampleChromosome2(chromosome, ld_thres, d_tss_thres, af_thres, n_samples)
case = case + case_chr
n_hit_snps = n_hit_snps + hits
sample_results = map(operator.add, sample_results, sample_chr)
print 'Chr ' + str(chromosome) + '\t', hits, case_chr / hits
for i in range(n_samples):
sample_results[i] = sample_results[i] / n_hit_snps
print '\nOVERALL: ', n_hit_snps, case / n_hit_snps, sample_results
plotSampleResults (case / n_hit_snps, sample_results)
def sampleChromosome (chromosome, ld_thres = LD_THRES, d_tss_thres = D_TSS_THRES, af_thres = AF_THRES, n_samples = N_SAMPLES):
[hit_snps, nonhit_snps] = readSNPData(chromosome)
n_hit_snps = len(hit_snps)
# control_indices contain lists of indices. Each list contains indices for potential control snps
control_indices = returnControlIndices (hit_snps, nonhit_snps, af_thres, d_tss_thres)
sample_results = [0.0] * n_samples
for sample in range(n_samples):
for i in range(n_hit_snps):
if len(control_indices[i]) != 0:
nonhit_snp = nonhit_snps[choice(control_indices[i])] # randomly select a similar nonhit SNP
if nonhit_snp['r']**2 >= ld_thres:
sample_results[sample] = sample_results[sample] + 1
case = 0.0
for hit_snp in hit_snps:
if hit_snp['r']**2 >= ld_thres:
case = case + 1
return [n_hit_snps, case, sample_results]
def plotSampleResults (case, control, n_samples = N_SAMPLES):
bins = np.linspace(min(control),max(control),50)
w = np.ones_like(control) / len(control)
plt.hist(control, bins, weights=w, facecolor='blue', alpha=1)
plt.axvline(x=case, color='r', linewidth=4)
plt.xlabel(r'Percentage of SNPs with $R > 0$')
plt.ylabel('Probability')
plt.show()
def sample(ld_thres = LD_THRES, d_tss_thres = D_TSS_THRES, af_thres = AF_THRES, n_samples = N_SAMPLES):
sample_results = [0.0] * n_samples
case = 0.0
n_hit_snps = 0.0
for chromosome in range(1,23):
print 'Processing chromosome', chromosome
[hits, case_chr, sample_chr] = sampleChromosome(chromosome, ld_thres, d_tss_thres, af_thres, n_samples)
case = case + case_chr
n_hit_snps = n_hit_snps + hits
sample_results = map(operator.add, sample_results, sample_chr)
print 'Chr ' + str(chromosome) + '\t', hits, case_chr / hits
for i in range(n_samples):
sample_results[i] = sample_results[i] / n_hit_snps
print '\nOVERALL: ', n_hit_snps, case / n_hit_snps, sample_results
plotSampleResults (case / n_hit_snps, sample_results)
def returnHitSNPPositions (chromosome):
file_snps = open('/ufs/dijkstra/Projects/SNPs_LD_deletions/SNP_max_r2_chr' + str(chromosome) + '.txt', 'r')
positions = [] # hit-SNP positions are stored here
for snp in file_snps.readlines():
data = [value for value in snp.split()]
if int(data[1]) == 1:
positions.append(int(data[2]))
else:
break
file_snps.close()
return positions
# Writes arrays to a prescripted output file
def writeToOutputFile(output, chr, individuals):
filename = None
if 'children' in individuals:
filename = 'chr' + str(chr) + '_children_metrics.txt'
else:
filename = 'chr' + str(chr) + '_parents_metrics.txt'
output_file = open(filename, 'a')
for line in output:
for item in line:
output_file.write(str(item) + '\t')
output_file.write('\n')
output_file.close()
# Returns the distance between the SNP and the deletion (CNV)
# Returns 0 when the SNP lies within the deletion
def distanceSNP_DEL (snp_pos, del_start, del_length):
if snp_pos < del_start:
return del_start - snp_pos
elif snp_pos > del_start + del_length:
return snp_pos - del_start + del_length
else: # snp overlaps with the deletion
return 0
def readSNPDeletionPairs(chromosome, individuals):
hitsnp_del_data = [] # output is stored here
nonhitsnp_del_data = []
file_snps = None
if 'children' in individuals:
file_snps = open('chr' + str(chromosome) + '_children.txt', 'r')
else:
file_snps = open('chr' + str(chromosome) + '_children.txt', 'r')
for line in file_snps.readlines():
l = [value for value in line.split()]
snp_del = dict([('pos', int(l[1])), ('type', int(l[2])), ('dist_tss', int(l[3])), ('del_pos', int(l[4])), ('del_length', int(l[5])), ('a', int(l[6])), ('b', int(l[7])), ('c', int(l[8]))])
if l[0] == '0':
nonhitsnp_del_data.append(snp_del)
else:
hitsnp_del_data.append(snp_del)
return [hitsnp_del_data, nonhitsnp_del_data]
def determineGamma_N(a,b,c,N):
a = float(a)
b = float(b)
c = float(c)
gamma = a / (a + c) - (a + b) / N
if gamma <= 0:
return gamma * N / (a + b)
elif a+c <= a + b:
return gamma / (1 - (a+b)/N)
else:
return gamma / ((a + b)/(a+c) - (a+b)/N)
def determineR(a,b,c,N):
d = N - a - b -c
return float((a * d - b * c)) / math.sqrt((a+b)*(a+c)*(b+d)*(c+d))
def fisher(a,b,c,N):
table = [[a,c], [b, N - a - b - c]]
[odds_ratio, p2] = sp.stats.fisher_exact(table, alternative='two-sided')
[odds_ratio, pg] = sp.stats.fisher_exact(table, alternative='greater')
[odds_ratio, pl] = sp.stats.fisher_exact(table, alternative='less')
return [p2,pg,pl]
def compute(chromosome, individuals):
N = 470
if 'parents' in individuals:
N = None # TODO!
[hitsnp_del, nonhitsnp_del] = readSNPDeletionPairs(chromosome, individuals)
output = []
for snp in hitsnp_del:
dist_snp_del = distanceSNP_DEL(snp['pos'], snp['del_pos'], snp['del_length'])
r = determineR(snp['a'], snp['b'], snp['c'], N)
gamma_n = determineGamma_N(snp['a'], snp['b'], snp['c'], N)
[p2,pg,pl] = fisher(snp['a'], snp['b'], snp['c'], N)
output.append([1, snp['type'], snp['dist_tss'], dist_snp_del, r, gamma_n, p2,pg,pl])
#print [1, snp['type'], snp['dist_tss'], dist_snp_del, r, gamma_n, p2,pg,pl]
for snp in nonhitsnp_del:
dist_snp_del = distanceSNP_DEL(snp['pos'], snp['del_pos'], snp['del_length'])
r = determineR(snp['a'], snp['b'], snp['c'], N)
gamma_n = determineGamma_N(snp['a'], snp['b'], snp['c'], N)
[p2,pg, pl] = fisher(snp['a'], snp['b'], snp['c'], N)
output.append([0, snp['type'], snp['dist_tss'], dist_snp_del, r, gamma_n, p2,pg,pl])
#print [0, snp['type'], snp['dist_tss'], dist_snp_del, r, gamma_n, p2,pg,pl]
writeToOutputFile(output, chromosome, individuals)
def readSNPData (chromosome):
hit_snp_positions = returnHitSNPPositions (chromosome)
file_snps = open('chr' + str(chromosome) + '.txt', 'r')
hit_snps = [] # hit-snp data is stored here
nonhit_snps = [] # non hit-snp data is stored here
# Read through all lines
for line in file_snps.readlines():
l = [value for value in line.split()]
snp = [('hit', int(l[1])),('pos', int(l[2])), ('type', l[3]), ('af', float(l[4]))]
snp.append(('del_pos', int(l[5])))
snp.append(('del_length', int(l[6])))
snp.append(('del_af', float(l[7])))
snp.append(('r', float(l[8])))
snp.append(('p', float(l[9])))
snp.append(('d_snp_del', int(l[10])))
snp.append(('d_tss', int(l[11])))
snp.append(('min_dist_snp_del', int(l[12])))
snp.append(('g', float(l[13])))
snp.append(('g_n', float(l[14])))
snp.append(('a', int(l[15])))
snp.append(('b', int(l[16])))
snp.append(('c', int(l[17])))
snp.append(('d', int(l[18])))
if int(l[2]) in hit_snp_positions:
hit_snps.append(dict(snp))
else:
nonhit_snps.append(dict(snp))
file_snps.close()
return [hit_snps, nonhit_snps]
def plotRDistribution_nonhit ():
nonhit_r = []
for chromosome in range(1,23):
print 'Processing chromosome', chromosome
[hit_snps, nonhit_snps] = readSNPData(chromosome)
for snp in nonhit_snps:
nonhit_r.append(snp['r'])
#print snp['r']
bins = np.linspace(-1.0,1.0,100)
w = np.ones_like(nonhit_r) / len(nonhit_r)
plt.hist(nonhit_r, bins, weights=w, facecolor='blue', alpha=1)
plt.xlabel(r'$R$')
plt.ylabel('Probability')
plt.title(r'non-GWAS SNP - Deletion combinations')
plt.savefig('R_nonhit.png')
plt.show()
def readSNPData_Androniki ():
hit_snp_positions = returnHitSNPPositions (20)
file_snps = open('androniki_r_chr20.phased', 'r')
hit_snps = [] # hit-snp data is stored here
nonhit_snps = [] # non hit-snp data is stored here
# Read through all lines
for line in file_snps.readlines():
l = [value for value in line.split()]
snp = [('hit', int(l[1])),('pos', int(l[2])), ('type', l[3]), ('af', float(l[4]))]
snp.append(('del_pos', int(l[5])))
snp.append(('del_length', int(l[6])))
snp.append(('del_af', float(l[7])))
snp.append(('r', float(l[8])))
snp.append(('p', float(l[9])))
snp.append(('d_snp_del', int(l[10])))
snp.append(('d_tss', int(l[11])))
if int(l[2]) in hit_snp_positions:
hit_snps.append(dict(snp))
else:
nonhit_snps.append(dict(snp))
file_snps.close()
return [hit_snps, nonhit_snps]
def plotRDistribution_hit_androniki ():
hit_r = []
[hit_snps, nonhit_snps] = readSNPData_Androniki()
for snp in hit_snps:
hit_r.append(snp['r'])
bins = np.linspace(-1.0,1.0,20)
w = np.ones_like(hit_r) / len(hit_r)
plt.hist(hit_r, bins, weights=w, facecolor='blue', alpha=1)
plt.xlabel(r'$R$')
plt.ylabel('Probability')
plt.title(r'GWAS SNP - Deletion combinations on chromosome 20')
plt.savefig('R_chr20.png')
plt.show()
def plotRDistribution_hit ():
hit_r = []
for chromosome in range(1,23):
print 'Processing chromosome', chromosome
[hit_snps, nonhit_snps] = readSNPData(chromosome)
for snp in hit_snps:
hit_r.append(snp['r'])
#print snp['r']
bins = np.linspace(-1.0,1.0,100)
w = np.ones_like(hit_r) / len(hit_r)
plt.hist(hit_r, bins, weights=w, facecolor='blue', alpha=1)
plt.xlabel(r'$R$')
plt.ylabel('Probability')
plt.title(r'GWAS SNP - Deletion combinations')
plt.savefig('R.png')
plt.show()
def plotGamma_n ():
hit, nonhit = [], []
for chromosome in range(22,23):
print 'Processing chromosome', chromosome
[hit_snps, nonhit_snps] = readSNPData(chromosome)
for snp in hit_snps:
hit.append(snp['g_n'])
for snp in nonhit_snps:
nonhit.append(snp['g_n'])
bins = np.linspace(-1.0,1.0,100)
w = np.ones_like(hit) / len(hit)
plot1 = plt.hist(hit, bins, weights=w, facecolor='red', alpha=0.5, label='GWAS SNPs')
w = np.ones_like(nonhit) / len(nonhit)
plot2 = plt.hist(nonhit, bins, weights=w, facecolor='blue', alpha=0.5, label='non GWAS SNPs')
plt.xlabel(r'$\gamma_n$')
plt.ylabel('Probability')
plt.title('SNP-Deletion Association')
plt.legend()
plt.show()
def plotMaxR2 ():
hit, nonhit = [], []
for chromosome in range(1,23):
print 'Processing chromosome', chromosome
[hit_snps, nonhit_snps] = readSNPData(chromosome)
for snp in hit_snps:
hit.append(snp['r']**2)
for snp in nonhit_snps:
nonhit.append(snp['r']**2)
bins = np.linspace(0.0,1.0,100)
w = np.ones_like(hit) / len(hit)
plot1 = plt.hist(hit, bins, weights=w, facecolor='red', alpha=0.5, label='GWAS SNPs')
w = np.ones_like(nonhit) / len(nonhit)
plot2 = plt.hist(nonhit, bins, weights=w, facecolor='blue', alpha=0.5, label='non GWAS SNPs')
plt.xlabel(r'$R^2$')
plt.ylabel('Probability')
plt.title('SNP-Deletion Association')
plt.legend()
plt.show()
def plotECDF ():
hit, nonhit = [], []
for chromosome in range(1,23):
print 'Processing chromosome', chromosome
snps = readSNPData(chromosome)
for snp in snps:
if snp['hit'] == 1:
hit.append(snp['r2'])
else:
nonhit.append(snp['r2'])
num_bins = 100
counts, bin_edges = np.histogram(nonhit, bins=num_bins, normed=True)
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf)
plt.show()
# Writes arrays to a prescripted output file
def writeSNPsToFile(snps, filename):
output_file = open(filename, 'a')
output_file.write('pos\tsnp_af\ttype\tR\tp\tdel_pos\tdel_length\tdel_af\td_tss\n')
for snp in snps:
output_file.write(str(snp['pos']) + '\t')
output_file.write(str(snp['af']) + '\t')
output_file.write(str(snp['type']) + '\t')
output_file.write(str(snp['r']) + '\t')
output_file.write(str(snp['p']) + '\t')
output_file.write(str(snp['del_pos']) + '\t')
output_file.write(str(snp['del_length']) + '\t')
output_file.write(str(snp['del_af']) + '\t')
output_file.write(str(snp['d_tss']) + '\n')
output_file.close()
#plotRDistribution_nonhit()
#plotMaxR2()
#sample3()
#[hit_snps, nonhit_snps] = readSNPData(20)
#writeSNPsToFile(hit_snps, 'hitSNPsChromosome20.txt')
#for c in range(1,22):
# compute(c, ('children'))
# Read in the data for one chromosome and returns the data for the hit- and nonhit-SNPs and deletion pairs
def readData(chromosome, individuals):
file_snps = None
if 'children' in individuals:
file_snps = open('chr' + str(chromosome) + '_children_last.txt', 'r')
else:
file_snps = open('chr' + str(chromosome) + '_parents_last.txt', 'r')
hit_snps, nonhit_snps = [], []
# Read through all lines
for line in file_snps.readlines():
l = [value for value in line.split()]
snp = [('pos', int(l[1])), ('del_pos', int(l[2])), ('type', int(l[3])), ('d_tss', int(l[4])), ('d_del', int(l[5])), ('r', float(l[6])), ('p', float(l[7])), ('a', int(l[8])), ('b', int(l[9])), ('c', int(l[10]))]
if int(l[0]) == 1:
hit_snps.append(dict(snp))
else:
nonhit_snps.append(dict(snp))
file_snps.close()
return [hit_snps, nonhit_snps]
# Determines whether two SNP-Deletion pairs are similar
def snpsSimilarP (hit_snp, nonhit_snp, d_tss_thres = D_TSS_THRES, d_snp_del_thres = D_SNP_DEL_THRES):
if hit_snp['type'] == nonhit_snp['type'] and abs(hit_snp['d_tss'] - nonhit_snp['d_tss']) <= d_tss_thres and abs(hit_snp['d_del'] - nonhit_snp['d_del']) <= d_snp_del_thres:
af = (hit_snp['a'] + hit_snp['b']) / 470.0 # TODO chance for parents!
n_af = (nonhit_snp['a'] + nonhit_snp['b']) / 470.0
if abs(af - n_af) <= 0.05 or abs(af - (1.0 - n_af)) <= 0.05:
return True
else:
return False
else:
return False
# Returns a list that contains for every hit snp a list of indices of nonhit-snps that are similar
def returnControlIndicesP (hit_snps, nonhit_snps, d_tss_thres = D_TSS_THRES, d_snp_del_thres = D_SNP_DEL_THRES):
control_indices = [] # will contain lists of indices. Each list contains indices for potential control snps
n_no_similar_nonhitsnps = 0
for hit_snp in hit_snps:
similar_nonhitsnps = []
for i in range(len(nonhit_snps)):
if snpsSimilarP(hit_snp, nonhit_snps[i], d_tss_thres, d_snp_del_thres):
similar_nonhitsnps.append(i)
if len(similar_nonhitsnps) == 0:
n_no_similar_nonhitsnps = n_no_similar_nonhitsnps + 1
control_indices.append(similar_nonhitsnps)
print '# of hit SNPs with no similar non-hit SNPs:', n_no_similar_nonhitsnps
return control_indices
def fdr (p_values, q, m):
p_values.sort()
for k in range(1,m+1):
if p_values[k-1] > (k / float(m)) * q:
return k-1
return m
def sampleP_final(chromosomes, d_tss_thres = D_TSS_THRES, d_snp_del_thres = D_SNP_DEL_THRES, n_samples = N_SAMPLES, individuals=('children')):
sample_p = [[]] * n_samples # p-values are stored here for every sample
hit_p = [] # p-values for hit snps are stored here
n_hit_snps = 0.0
for chromosome in chromosomes:
print 'Chromosome', chromosome
[hit_snps, nonhit_snps] = readData(chromosome, individuals)
n_hit_snps = n_hit_snps + len(hit_snps)
control_indices = returnControlIndicesP (hit_snps, nonhit_snps, d_tss_thres, d_snp_del_thres)
for hit_snp in hit_snps:
hit_p.append(hit_snp['p'])
for sample in range(n_samples):
for i in range(len(hit_snps)):
if len(control_indices[i]) != 0:
p = nonhit_snps[choice(control_indices[i])]['p']
sample_p[sample] = sample_p[sample] + [p]
else:
p = choice(nonhit_snps)['p']
sample_p[sample] = sample_p[sample] + [p]
for sample in range(n_samples):
sample_p[sample] = fdr(sample_p[sample], .05, int(n_hit_snps))
case = fdr(hit_p, .05, int(n_hit_snps))
return [n_hit_snps, case, sample_p]
def sampleP (chromosome, d_tss_thres = D_TSS_THRES, d_snp_del_thres = D_SNP_DEL_THRES, n_samples = N_SAMPLES, individuals=('children')):
[hit_snps, nonhit_snps] = readData(chromosome, individuals)
n_hit_snps = len(hit_snps)
print n_hit_snps
# control_indices contain lists of indices. Each list contains indices for potential control snps
control_indices = returnControlIndicesP (hit_snps, nonhit_snps, d_tss_thres, d_snp_del_thres)
print len(control_indices)
sample_results = [None] * n_samples
for sample in range(n_samples):
p_values = []
for i in range(n_hit_snps):
if len(control_indices[i]) != 0:
p_values.append(nonhit_snps[choice(control_indices[i])]['p'])
else:
p_values.append(choice(nonhit_snps)['p'])
sample_results[sample] = fdr(p_values, .10, n_hit_snps)
print sample_results[sample]
p_values_hit = []
for hit_snp in hit_snps:
p_values_hit.append(hit_snp['p'])
case = fdr(p_values_hit, .10, n_hit_snps)
return [n_hit_snps, case, sample_results]
print sampleP_final(range(1,23), d_tss_thres = 10000, d_snp_del_thres = 25000, n_samples = 10000, individuals=('parents'))
| gpl-3.0 |
GuessWhoSamFoo/pandas | pandas/tests/util/test_safe_import.py | 2 | 1062 | # -*- coding: utf-8 -*-
import sys
import types
import pytest
import pandas.util._test_decorators as td
@pytest.mark.parametrize("name", ["foo", "hello123"])
def test_safe_import_non_existent(name):
assert not td.safe_import(name)
def test_safe_import_exists():
assert td.safe_import("pandas")
@pytest.mark.parametrize("min_version,valid", [
("0.0.0", True),
("99.99.99", False)
])
def test_safe_import_versions(min_version, valid):
result = td.safe_import("pandas", min_version=min_version)
result = result if valid else not result
assert result
@pytest.mark.parametrize("min_version,valid", [
(None, False),
("1.0", True),
("2.0", False)
])
def test_safe_import_dummy(monkeypatch, min_version, valid):
mod_name = "hello123"
mod = types.ModuleType(mod_name)
mod.__version__ = "1.5"
if min_version is not None:
monkeypatch.setitem(sys.modules, mod_name, mod)
result = td.safe_import(mod_name, min_version=min_version)
result = result if valid else not result
assert result
| bsd-3-clause |
yinpatt/thinkstats | code/scatter.py | 69 | 4281 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
import numpy as np
import math
import brfss
import thinkplot
import thinkstats2
def GetHeightWeight(df, hjitter=0.0, wjitter=0.0):
"""Get sequences of height and weight.
df: DataFrame with htm3 and wtkg2
hjitter: float magnitude of random noise added to heights
wjitter: float magnitude of random noise added to weights
returns: tuple of sequences (heights, weights)
"""
heights = df.htm3
if hjitter:
heights = thinkstats2.Jitter(heights, hjitter)
weights = df.wtkg2
if wjitter:
weights = thinkstats2.Jitter(weights, wjitter)
return heights, weights
def ScatterPlot(heights, weights, alpha=1.0):
"""Make a scatter plot and save it.
heights: sequence of float
weights: sequence of float
alpha: float
"""
thinkplot.Scatter(heights, weights, alpha=alpha)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def HexBin(heights, weights, bins=None):
"""Make a hexbin plot and save it.
heights: sequence of float
weights: sequence of float
bins: 'log' or None for linear
"""
thinkplot.HexBin(heights, weights, bins=bins)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def MakeFigures(df):
"""Make scatterplots.
"""
sample = thinkstats2.SampleRows(df, 5000)
# simple scatter plot
thinkplot.PrePlot(cols=2)
heights, weights = GetHeightWeight(sample)
ScatterPlot(heights, weights)
# scatter plot with jitter
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(sample, hjitter=1.3, wjitter=0.5)
ScatterPlot(heights, weights)
thinkplot.Save(root='scatter1')
# with jitter and transparency
thinkplot.PrePlot(cols=2)
ScatterPlot(heights, weights, alpha=0.1)
# hexbin plot
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(df, hjitter=1.3, wjitter=0.5)
HexBin(heights, weights)
thinkplot.Save(root='scatter2')
def BinnedPercentiles(df):
"""Bin the data by height and plot percentiles of weight for eachbin.
df: DataFrame
"""
cdf = thinkstats2.Cdf(df.htm3)
print('Fraction between 140 and 200 cm', cdf[200] - cdf[140])
bins = np.arange(135, 210, 5)
indices = np.digitize(df.htm3, bins)
groups = df.groupby(indices)
heights = [group.htm3.mean() for i, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.wtkg2) for i, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
weights = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(heights, weights, label=label)
thinkplot.Save(root='scatter3',
xlabel='height (cm)',
ylabel='weight (kg)')
def Correlations(df):
print('pandas cov', df.htm3.cov(df.wtkg2))
#print('NumPy cov', np.cov(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Cov', thinkstats2.Cov(df.htm3, df.wtkg2))
print()
print('pandas corr', df.htm3.corr(df.wtkg2))
#print('NumPy corrcoef', np.corrcoef(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Corr', thinkstats2.Corr(df.htm3, df.wtkg2))
print()
print('pandas corr spearman', df.htm3.corr(df.wtkg2, method='spearman'))
print('thinkstats2 SpearmanCorr',
thinkstats2.SpearmanCorr(df.htm3, df.wtkg2))
print('thinkstats2 SpearmanCorr log wtkg3',
thinkstats2.SpearmanCorr(df.htm3, np.log(df.wtkg2)))
print()
print('thinkstats2 Corr log wtkg3',
thinkstats2.Corr(df.htm3, np.log(df.wtkg2)))
print()
def main(script):
thinkstats2.RandomSeed(17)
df = brfss.ReadBrfss(nrows=None)
df = df.dropna(subset=['htm3', 'wtkg2'])
Correlations(df)
return
MakeFigures(df)
BinnedPercentiles(df)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
pllim/ginga | mkapp/setup.py | 3 | 2723 | # -*- coding: iso-8859-1 -*-
"""
Build a standalone application for Mac OS X and MS Windows platforms
Usage (Mac OS X):
python setup.py py2app
Usage (Windows):
python setup.py py2exe
"""
import sys
from setuptools import setup
info_plist_template = u"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleName</key>
<string>Ginga</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Copyright © 2010-2016, Eric Jeschke ([email protected])</string>
<key>CFBundleIconFile</key>
<string>Ginga.icns</string>
<!-- Version number - appears in About box -->
<key>CFBundleShortVersionString</key>
<string>%(version)s</string>
<!-- Build number - appears in About box -->
<key>CFBundleVersion</key>
<string>%(build)s</string>
<!-- Copyright notice - apears in About box -->
<key>NSHumanReadableCopyright</key>
<string>Copyright © 2010-2016, Eric Jeschke ([email protected])</string>
<!-- Globally unique identifier -->
<key>CFBundleIdentifier</key>
<string>org.naoj.Ginga</string>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<string>Ginga</string>
<key>CFBundleDisplayName</key>
<string>Ginga</string>
</dict>
</plist>
"""
from ginga import __version__
d = dict(version=__version__, build=__version__.replace('.', ''))
plist = info_plist_template % d
with open('Info.plist', 'w') as out_f:
out_f.write(plist)
APP = ['Ginga.py']
DATA_FILES = []
OPTIONS = {'argv_emulation': True,
'compressed': True,
#'packages': 'ginga,scipy,numpy,kapteyn,astropy,PIL,matplotlib',
'packages': 'ginga,scipy,numpy,astropy,PIL,matplotlib',
'includes': ['sip', 'PyQt4._qt',],
# currently creating some problems with the app build on mac os x
# so exclude
'excludes': ['cv2',],
'matplotlib_backends': 'Qt4Agg',
}
if sys.platform == 'darwin':
# mac-specific options
OPTIONS['plist'] = 'Info.plist'
OPTIONS['iconfile'] = 'Ginga.icns'
extra_options = dict(
setup_requires=['py2app'],
options={'py2app': OPTIONS},
)
elif sys.platform == 'win32':
extra_options = dict(
setup_requires=['py2exe'],
options={'py2exe': OPTIONS},
)
else:
extra_options = dict(
# Normally unix-like platforms will use "setup.py install"
# and install the main script as such
scripts=["ginga"],
)
setup_requires=['py2app'],
setup(
name="Ginga",
app=APP,
data_files=DATA_FILES,
**extra_options
)
| bsd-3-clause |
mlperf/training_results_v0.7 | NVIDIA/benchmarks/ssd/implementations/pytorch/visualize.py | 5 | 5886 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import numpy as np
from time import time
import os
import random
import time
import io
import json
import torch
from PIL import Image
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from argparse import ArgumentParser
from utils import DefaultBoxes, Encoder, COCODetection
from utils import SSDTransformer
from ssd300 import SSD300
from train import load_checkpoint, dboxes300_coco
def parse_args():
parser = ArgumentParser(description="Visualize models predictions on image")
parser.add_argument('--images', '-i', nargs='*', type=str,
help='path to jpg image')
parser.add_argument('--model', '-m', type=str, default='iter_240000.pt',
help='path to trained model')
parser.add_argument('--threshold', '-t', type=float, default=0.10,
help='threshold for predictions probabilities')
parser.add_argument('--annotations', '-a', type=str,
default='/coco/annotations/instances_val2017.json',
help='path to json with annotations')
return parser.parse_args()
def print_image(image, model, encoder, inv_map, name_map, category_id_to_color, threshold):
# Open image for printing
im = Image.open(image)
W, H = im.size
# Prepare tensor input for model
tmp = im.copy()
tmp = tmp.resize((300, 300))
img = transforms.ToTensor()(tmp)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
img = normalize(img).unsqueeze(dim = 0)
# Find predictions
with torch.no_grad():
ploc, plabel = model(img)
ploc, plabel = ploc.float(), plabel.float()
ret = []
for idx in range(ploc.shape[0]):
# ease-of-use for specific predictions
ploc_i = ploc[idx, :, :].unsqueeze(0)
plabel_i = plabel[idx, :, :].unsqueeze(0)
try:
result = encoder.decode_batch(ploc_i, plabel_i, 0.50, 200)[0]
except:
print("No object detected in image {}".format(image))
continue
htot, wtot = (H, W)
loc, label, prob = [r.cpu().numpy() for r in result]
for loc_, label_, prob_ in zip(loc, label, prob):
ret.append([0, loc_[0]*wtot, \
loc_[1]*htot,
(loc_[2] - loc_[0])*wtot,
(loc_[3] - loc_[1])*htot,
prob_,
inv_map[label_]])
ret = np.array(ret).astype(np.float32)
# Choose bounding boxes for printing
bboxes = []
for re in ret:
if re[5] > threshold:
bboxes.append(re)
print("Bounding boxes detected in image {}:".format(image))
print(bboxes)
# Prepare image for plotting
img = transforms.ToTensor()(im)
img = img.permute(1, 2, 0)
H = img.shape[0]
W = img.shape[1]
fig,ax = plt.subplots(1)
ax.imshow(img)
# Add bboxes with labels
used = set()
for bbox in bboxes:
if (bbox[6] in used):
rect = patches.Rectangle((bbox[1], bbox[2]), bbox[3], bbox[4],
edgecolor=category_id_to_color[bbox[6]],
linewidth=2, facecolor='none')
else:
rect = patches.Rectangle((bbox[1], bbox[2]), bbox[3], bbox[4],
label = name_map[bbox[6]],
edgecolor=category_id_to_color[bbox[6]],
linewidth=2, facecolor='none')
used.add(bbox[6])
ax.add_patch(rect)
# Show image
plt.legend(ncol=1, bbox_to_anchor=(1.04,1), loc="upper left")
plt.show()
def main():
# Parse arguments
args = parse_args()
# Get categories names
with open(args.annotations,'r') as anno:
js = json.loads(anno.read())
coco_names = js['categories']
# Prepare map of COCO labels to COCO names
name_map = {}
for name in coco_names:
name_map[name['id']] = name['name']
# Prepare map of SSD to COCO labels
deleted = [12, 26, 29, 30, 45, 66, 68, 69, 71, 83]
inv_map = {}
cnt = 0
for i in range(1, 81):
while i + cnt in deleted:
cnt += 1
inv_map[i] = i + cnt
# Prepare colors for categories
category_id_to_color = dict([(cat_id, [random.uniform(0, 1) ,random.uniform(0, 1), random.uniform(0, 1)]) for cat_id in range(1, 91)])
# Set math plot lib size
plt.rcParams["figure.figsize"] = (12, 8)
# Build and load SSD model
ssd300 = SSD300(81, backbone="resnet34", model_path=None, dilation=None)
load_checkpoint(ssd300, args.model)
ssd300.eval()
# Prepare encoder
dboxes = dboxes300_coco()
encoder = Encoder(dboxes)
# Print images
for image in args.images:
print_image(image, ssd300, encoder, inv_map, name_map, category_id_to_color, args.threshold)
if __name__ == "__main__":
main()
| apache-2.0 |
ran5515/DeepDecision | tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py | 71 | 12923 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for k, v in list(feeder.input_dtype.items()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.test_session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for k, v in list(inp.items()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.test_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.test_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
def test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
| apache-2.0 |
vladpopovici/WSItk | WSItk/segm/tissue.py | 1 | 5399 | # -*- coding: utf-8 -*-
"""
SEGM.TISSUE: try to segment the tissue regions from a pathology slide.
@author: vlad
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
__version__ = 0.01
__author__ = 'Vlad Popovici'
__all__ = ['tissue_region_from_rgb', 'tissue_fat', 'tissue_chromatin', 'tissue_connective',
'tissue_components', 'superpixels']
import numpy as np
import skimage.morphology as skm
from skimage.segmentation import slic
from skimage.util import img_as_bool
from sklearn.cluster import MiniBatchKMeans
import mahotas as mh
from util.intensity import _R, _G, _B
from stain.he import rgb2he2
def tissue_region_from_rgb(_img, _min_area=150, _g_th=None):
"""
TISSUE_REGION_FROM_RGB detects the region(s) of the image containing the
tissue. The original image is supposed to represent a haematoxylin-eosin
-stained pathology slide.
The main purpose of this function is to detect the parts of a large image
which most probably contain tissue material, and to discard the background.
Usage:
tissue_mask = tissue_from_rgb(img, _min_area=150, _g_th=None)
Args:
img (numpy.ndarray): the original image in RGB color space
_min_area (int, default: 150): any object with an area smaller than
the indicated value, will be discarded
_g_th (int, default: None): the processing is done on the GREEN channel
and all pixels below _g_th are considered candidates for "tissue
pixels". If no value is given to _g_th, one is computed by K-Means
clustering (K=2), and is returned.
Returns:
numpy.ndarray: a binary image containing the mask of the regions
considered to represent tissue fragments
int: threshold used for GREEN channel
"""
if _g_th is None:
# Apply vector quantization to remove the "white" background - work in the
# green channel:
vq = MiniBatchKMeans(n_clusters=2)
_g_th = int(np.round(0.95 * np.max(vq.fit(_G(_img).reshape((-1,1)))
.cluster_centers_.squeeze())))
mask = _G(_img) < _g_th
skm.binary_closing(mask, skm.disk(3), out=mask)
mask = img_as_bool(mask)
mask = skm.remove_small_objects(mask, min_size=_min_area, in_place=True)
# Some hand-picked rules:
# -at least 5% H and E
# -at most 25% background
# for a region to be considered tissue
h, e, b = rgb2he2(_img)
mask &= (h > np.percentile(h, 5)) | (e > np.percentile(e, 5))
mask &= (b < np.percentile(b, 50)) # at most at 50% of "other components"
mask = mh.close_holes(mask)
return img_as_bool(mask), _g_th
def tissue_fat(_img, _clf):
"""
Segment fat regions from a slide.
Args:
_img
_clf
Returns:
"""
p = _clf.predict_proba(_img.reshape((-1,3)))[:,1]
p = p.reshape(_img.shape[:-1])
return p
def tissue_chromatin(_img, _clf):
"""
:param _img:
:param _clf:
:return:
"""
p = _clf.predict_proba(_img.reshape((-1,3)))[:,1]
p = p.reshape(_img.shape[:-1])
return p
def tissue_connective(_img, _clf):
"""
:param _img:
:param _clf:
:return:
"""
p = _clf.predict_proba(_img.reshape((-1,3)))[:,1]
p = p.reshape(_img.shape[:-1])
return p
def tissue_components(_img, _models, _min_prob=0.4999999999):
w, h, _ = _img.shape
n = w * h
# "background": if no class has a posterior of at least 0.5
# the pixel is considered "background"
p_bkg = np.zeros((n, ))
p_bkg.fill(_min_prob)
p_chrm = tissue_chromatin(_img, _models['chromatin']).reshape((-1,))
p_conn = tissue_connective(_img, _models['connective']).reshape((-1,))
p_fat = tissue_fat(_img, _models['fat']).reshape((-1,))
prbs = np.array([p_bkg, p_chrm, p_conn, p_fat])
comp_map = np.argmax(prbs, axis=1) # 0 = background, 1 = chromatin, 2 = connective, 3 = fat
comp_map = comp_map.reshape((w, h))
return comp_map
def superpixels(img, slide_magnif='x40'):
"""
SUPERPIXELS: produces a super-pixel representation of the image, with the new
super-pixels being the average (separate by channel) of the pixels in the
original image falling in the same "cell".
:param img: numpy.ndarray
RGB image
:param slide_magnif: string
Indicates the microscope magnification at which the image was acquired.
It is used to set some parameters, depending on the magnification.
:return: numpy.ndarray
The RGB super-pixel image.
"""
params = dict([('x40', dict([('n_segments', int(10*np.log2(img.size/3))), ('compactness', 50), ('sigma', 2.0)])),
('x20', dict([('n_segments', int(100*np.log2(img.size/3))), ('compactness', 50), ('sigma', 1.5)]))])
p = params[slide_magnif]
sp = slic(img, n_segments=p['n_segments'], compactness=p['compactness'], sigma=p['sigma'],
multichannel=True, convert2lab=True)
n_sp = sp.max() + 1
img_res = np.ndarray(img.shape, dtype=img.dtype)
for i in np.arange(n_sp):
img_res[sp == i, 0] = int(np.mean(img[sp == i, 0]))
img_res[sp == i, 1] = int(np.mean(img[sp == i, 1]))
img_res[sp == i, 2] = int(np.mean(img[sp == i, 2]))
return img_res
| mit |
thorwhalen/ut | sound/others/audio_tools.py | 1 | 158402 | """Audio Utils"""
# Collected from various sources:
# License: BSD 3-clause
# Authors: Kyle Kastner
# LTSD routine from jfsantos (Joao Felipe Santos)
# Harvest, Cheaptrick, D4C, WORLD routines based on MATLAB code from M. Morise
# http://ml.cs.yamanashi.ac.jp/world/english/
# MGC code based on r9y9 (Ryuichi Yamamoto) MelGeneralizedCepstrums.jl
# Pieces also adapted from SPTK
import numpy as np
import scipy as sp
from numpy.lib.stride_tricks import as_strided
import scipy.signal as sg
from scipy.interpolate import interp1d
import wave
from scipy.cluster.vq import vq
from scipy import linalg, fftpack
from numpy.testing import assert_almost_equal
from scipy.linalg import svd
from scipy.io import wavfile
from scipy.signal import firwin
import zipfile
import tarfile
import os
import copy
import multiprocessing
from multiprocessing import Pool
import functools
import time
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib2 as urllib
def download(url, server_fname, local_fname=None, progress_update_percentage=5,
bypass_certificate_check=False):
"""
An internet download utility modified from
http://stackoverflow.com/questions/22676/
how-do-i-download-a-file-over-http-using-python/22776#22776
"""
if bypass_certificate_check:
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
u = urllib.request.urlopen(url, context=ctx)
else:
u = urllib.request.urlopen(url)
if local_fname is None:
local_fname = server_fname
full_path = local_fname
meta = u.info()
with open(full_path, 'wb') as f:
try:
file_size = int(meta.get("Content-Length"))
except TypeError:
print("WARNING: Cannot get file size, displaying bytes instead!")
file_size = 100
print(("Downloading: %s Bytes: %s" % (server_fname, file_size)))
file_size_dl = 0
block_sz = int(1E7)
p = 0
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if (file_size_dl * 100. / file_size) > p:
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl *
100. / file_size)
print(status)
p += progress_update_percentage
def fetch_sample_speech_tapestry():
url = "https://www.dropbox.com/s/qte66a7haqspq2g/tapestry.wav?dl=1"
wav_path = "tapestry.wav"
if not os.path.exists(wav_path):
download(url, wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo? - just choose one channel
return fs, d
def fetch_sample_file(wav_path):
if not os.path.exists(wav_path):
raise ValueError("Unable to find file at path %s" % wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo - just choose one channel
if len(d.shape) > 1:
d = d[:, 0]
return fs, d
def fetch_sample_music():
url = "http://www.music.helsinki.fi/tmt/opetus/uusmedia/esim/"
url += "a2002011001-e02-16kHz.wav"
wav_path = "test.wav"
if not os.path.exists(wav_path):
download(url, wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo - just choose one channel
d = d[:, 0]
return fs, d
def fetch_sample_speech_fruit(n_samples=None):
url = 'https://dl.dropboxusercontent.com/u/15378192/audio.tar.gz'
wav_path = "audio.tar.gz"
if not os.path.exists(wav_path):
download(url, wav_path)
tf = tarfile.open(wav_path)
wav_names = [fname for fname in tf.getnames()
if ".wav" in fname.split(os.sep)[-1]]
speech = []
print("Loading speech files...")
for wav_name in wav_names[:n_samples]:
f = tf.extractfile(wav_name)
fs, d = wavfile.read(f)
d = d.astype('float32') / (2 ** 15)
speech.append(d)
return fs, speech
def fetch_sample_speech_eustace(n_samples=None):
"""
http://www.cstr.ed.ac.uk/projects/eustace/download.html
"""
# data
url = "http://www.cstr.ed.ac.uk/projects/eustace/down/eustace_wav.zip"
wav_path = "eustace_wav.zip"
if not os.path.exists(wav_path):
download(url, wav_path)
# labels
url = "http://www.cstr.ed.ac.uk/projects/eustace/down/eustace_labels.zip"
labels_path = "eustace_labels.zip"
if not os.path.exists(labels_path):
download(url, labels_path)
# Read wavfiles
# 16 kHz wav
zf = zipfile.ZipFile(wav_path, 'r')
wav_names = [fname for fname in zf.namelist()
if ".wav" in fname.split(os.sep)[-1]]
fs = 16000
speech = []
print("Loading speech files...")
for wav_name in wav_names[:n_samples]:
wav_str = zf.read(wav_name)
d = np.frombuffer(wav_str, dtype=np.int16)
d = d.astype('float32') / (2 ** 15)
speech.append(d)
zf = zipfile.ZipFile(labels_path, 'r')
label_names = [fname for fname in zf.namelist()
if ".lab" in fname.split(os.sep)[-1]]
labels = []
print("Loading label files...")
for label_name in label_names[:n_samples]:
label_file_str = zf.read(label_name)
labels.append(label_file_str)
return fs, speech
def stft(X, fftsize=128, step="half", mean_normalize=True, real=False,
compute_onesided=True):
"""
Compute STFT for 1D real valued input X
"""
if real:
local_fft = fftpack.rfft
cut = -1
else:
local_fft = fftpack.fft
cut = None
if compute_onesided:
cut = fftsize // 2 + 1
if mean_normalize:
X -= X.mean()
if step == "half":
X = halfoverlap(X, fftsize)
else:
X = overlap(X, fftsize, step)
size = fftsize
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(size) / (size - 1))
X = X * win[None]
X = local_fft(X)[:, :cut]
return X
def istft(X, fftsize=128, step="half", wsola=False, mean_normalize=True,
real=False, compute_onesided=True):
"""
Compute ISTFT for STFT transformed X
"""
if real:
local_ifft = fftpack.irfft
X_pad = np.zeros((X.shape[0], X.shape[1] + 1)) + 0j
X_pad[:, :-1] = X
X = X_pad
else:
local_ifft = fftpack.ifft
if compute_onesided:
X_pad = np.zeros((X.shape[0], 2 * X.shape[1])) + 0j
X_pad[:, :fftsize // 2 + 1] = X
X_pad[:, fftsize // 2 + 1:] = 0
X = X_pad
X = local_ifft(X).astype("float64")
if step == "half":
X = invert_halfoverlap(X)
else:
X = overlap_add(X, step, wsola=wsola)
if mean_normalize:
X -= np.mean(X)
return X
def mdct_slow(X, dctsize=128):
M = dctsize
N = 2 * dctsize
N_0 = (M + 1) / 2
X = halfoverlap(X, N)
X = sine_window(X)
n, k = np.meshgrid(np.arange(N), np.arange(M))
# Use transpose due to "samples as rows" convention
tf = np.cos(np.pi * (n + N_0) * (k + 0.5) / M).T
return np.dot(X, tf)
def imdct_slow(X, dctsize=128):
M = dctsize
N = 2 * dctsize
N_0 = (M + 1) / 2
N_4 = N / 4
n, k = np.meshgrid(np.arange(N), np.arange(M))
# inverse *is not* transposed
tf = np.cos(np.pi * (n + N_0) * (k + 0.5) / M)
X_r = np.dot(X, tf) / N_4
X_r = sine_window(X_r)
X = invert_halfoverlap(X_r)
return X
def nsgcwin(fmin, fmax, n_bins, fs, signal_len, gamma):
"""
Nonstationary Gabor window calculation
References
----------
Velasco G. A., Holighaus N., Dorfler M., Grill T.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
Holighaus N., Dorfler M., Velasco G. A. and Grill T.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : Monika Dorfler, Gino Angelo Velasco, Nicki Holighaus, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
# use a hanning window
# no fractional shifts
fftres = fs / signal_len
fmin = float(fmin)
fmax = float(fmax)
gamma = float(gamma)
nyq = fs / 2.
b = np.floor(n_bins * np.log2(fmax / fmin))
fbas = fmin * 2 ** (np.arange(b + 1) / float(n_bins))
Q = 2 ** (1. / n_bins) - 2 ** (-1. / n_bins)
cqtbw = Q * fbas + gamma
cqtbw = cqtbw.ravel()
maxidx = np.where(fbas + cqtbw / 2. > nyq)[0]
if len(maxidx) > 0:
# replicate bug in MATLAB version...
# or is it a feature
if sum(maxidx) == 0:
first = len(cqtbw) - 1
else:
first = maxidx[0]
fbas = fbas[:first]
cqtbw = cqtbw[:first]
minidx = np.where(fbas - cqtbw / 2. < 0)[0]
if len(minidx) > 0:
fbas = fbas[minidx[-1] + 1:]
cqtbw = cqtbw[minidx[-1] + 1:]
fbas_len = len(fbas)
fbas_new = np.zeros((2 * (len(fbas) + 1)))
fbas_new[1:len(fbas) + 1] = fbas
fbas = fbas_new
fbas[fbas_len + 1] = nyq
fbas[fbas_len + 2:] = fs - fbas[1:fbas_len + 1][::-1]
bw = np.zeros_like(fbas)
bw[0] = 2 * fmin
bw[1:len(cqtbw) + 1] = cqtbw
bw[len(cqtbw) + 1] = fbas[fbas_len + 2] - fbas[fbas_len]
bw[-len(cqtbw):] = cqtbw[::-1]
bw = bw / fftres
fbas = fbas / fftres
posit = np.zeros_like(fbas)
posit[:fbas_len + 2] = np.floor(fbas[:fbas_len + 2])
posit[fbas_len + 2:] = np.ceil(fbas[fbas_len + 2:])
base_shift = -posit[-1] % signal_len
shift = np.zeros_like(posit).astype("int32")
shift[1:] = (posit[1:] - posit[:-1]).astype("int32")
shift[0] = base_shift
bw = np.round(bw)
bwfac = 1
M = bw
min_win = 4
for ii in range(len(bw)):
if bw[ii] < min_win:
bw[ii] = min_win
M[ii] = bw[ii]
def _win(numel):
if numel % 2 == 0:
s1 = np.arange(0, .5, 1. / numel)
if len(s1) != numel // 2:
# edge case with small floating point numbers...
s1 = s1[:-1]
s2 = np.arange(-.5, 0, 1. / numel)
if len(s2) != numel // 2:
# edge case with small floating point numbers...
s2 = s2[:-1]
x = np.concatenate((s1, s2))
else:
s1 = np.arange(0, .5, 1. / numel)
s2 = np.arange(-.5 + .5 / numel, 0, 1. / numel)
if len(s2) != numel // 2: # assume integer truncate 27 // 2 = 13
s2 = s2[:-1]
x = np.concatenate((s1, s2))
assert len(x) == numel
g = .5 + .5 * np.cos(2 * np.pi * x)
return g
multiscale = [_win(bi) for bi in bw]
bw = bwfac * np.ceil(M / bwfac)
for kk in [0, fbas_len + 1]:
if M[kk] > M[kk + 1]:
multiscale[kk] = np.ones(M[kk]).astype(multiscale[0].dtype)
i1 = np.floor(M[kk] / 2) - np.floor(M[kk + 1] / 2)
i2 = np.floor(M[kk] / 2) + np.ceil(M[kk + 1] / 2)
# Very rarely, gets an off by 1 error? Seems to be at the end...
# for now, slice
multiscale[kk][i1:i2] = _win(M[kk + 1])
multiscale[kk] = multiscale[kk] / np.sqrt(M[kk])
return multiscale, shift, M
def nsgtf_real(X, multiscale, shift, window_lens):
"""
Nonstationary Gabor Transform for real values
References
----------
Velasco G. A., Holighaus N., Dorfler M., Grill T.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
Holighaus N., Dorfler M., Velasco G. A. and Grill T.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : Monika Dorfler, Gino Angelo Velasco, Nicki Holighaus, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
# This will break with multchannel input
signal_len = len(X)
N = len(shift)
X_fft = np.fft.fft(X)
fill = np.sum(shift) - signal_len
if fill > 0:
X_fft_tmp = np.zeros((signal_len + shift))
X_fft_tmp[:len(X_fft)] = X_fft
X_fft = X_fft_tmp
posit = np.cumsum(shift) - shift[0]
scale_lens = np.array([len(m) for m in multiscale])
N = np.where(posit - np.floor(scale_lens) <= (signal_len + fill) / 2)[0][-1]
c = []
# c[0] is almost exact
for ii in range(N):
idx_l = np.arange(np.ceil(scale_lens[ii] / 2), scale_lens[ii])
idx_r = np.arange(np.ceil(scale_lens[ii] / 2))
idx = np.concatenate((idx_l, idx_r))
idx = idx.astype("int32")
subwin_range = posit[ii] + np.arange(-np.floor(scale_lens[ii] / 2),
np.ceil(scale_lens[ii] / 2))
win_range = subwin_range % (signal_len + fill)
win_range = win_range.astype("int32")
if window_lens[ii] < scale_lens[ii]:
raise ValueError("Not handling 'not enough channels' case")
else:
temp = np.zeros((window_lens[ii],)).astype(X_fft.dtype)
temp_idx_l = np.arange(len(temp) - np.floor(scale_lens[ii] / 2),
len(temp))
temp_idx_r = np.arange(np.ceil(scale_lens[ii] / 2))
temp_idx = np.concatenate((temp_idx_l, temp_idx_r))
temp_idx = temp_idx.astype("int32")
temp[temp_idx] = X_fft[win_range] * multiscale[ii][idx]
fs_new_bins = window_lens[ii]
fk_bins = posit[ii]
displace = fk_bins - np.floor(fk_bins / fs_new_bins) * fs_new_bins
displace = displace.astype("int32")
temp = np.roll(temp, displace)
c.append(np.fft.ifft(temp))
if 0:
# cell2mat concatenation
c = np.concatenate(c)
return c
def nsdual(multiscale, shift, window_lens):
"""
Calculation of nonstationary inverse gabor filters
References
----------
Velasco G. A., Holighaus N., Dorfler M., Grill T.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
Holighaus N., Dorfler M., Velasco G. A. and Grill T.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : Monika Dorfler, Gino Angelo Velasco, Nicki Holighaus, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
N = len(shift)
posit = np.cumsum(shift)
seq_len = posit[-1]
posit = posit - shift[0]
diagonal = np.zeros((seq_len,))
win_range = []
for ii in range(N):
filt_len = len(multiscale[ii])
idx = np.arange(-np.floor(filt_len / 2), np.ceil(filt_len / 2))
win_range.append((posit[ii] + idx) % seq_len)
subdiag = window_lens[ii] * np.fft.fftshift(multiscale[ii]) ** 2
ind = win_range[ii].astype(np.int)
diagonal[ind] = diagonal[ind] + subdiag
dual_multiscale = multiscale
for ii in range(N):
ind = win_range[ii].astype(np.int)
dual_multiscale[ii] = np.fft.ifftshift(
np.fft.fftshift(dual_multiscale[ii]) / diagonal[ind])
return dual_multiscale
def nsgitf_real(c, c_dc, c_nyq, multiscale, shift):
"""
Nonstationary Inverse Gabor Transform on real valued signal
References
----------
Velasco G. A., Holighaus N., Dorfler M., Grill T.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
Holighaus N., Dorfler M., Velasco G. A. and Grill T.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : Monika Dorfler, Gino Angelo Velasco, Nicki Holighaus, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
c_l = []
c_l.append(c_dc)
c_l.extend([ci for ci in c])
c_l.append(c_nyq)
posit = np.cumsum(shift)
seq_len = posit[-1]
posit -= shift[0]
out = np.zeros((seq_len,)).astype(c_l[1].dtype)
for ii in range(len(c_l)):
filt_len = len(multiscale[ii])
win_range = posit[ii] + np.arange(-np.floor(filt_len / 2),
np.ceil(filt_len / 2))
win_range = (win_range % seq_len).astype(np.int)
temp = np.fft.fft(c_l[ii]) * len(c_l[ii])
fs_new_bins = len(c_l[ii])
fk_bins = posit[ii]
displace = int(fk_bins - np.floor(fk_bins / fs_new_bins) * fs_new_bins)
temp = np.roll(temp, -displace)
l = np.arange(len(temp) - np.floor(filt_len / 2), len(temp))
r = np.arange(np.ceil(filt_len / 2))
temp_idx = (np.concatenate((l, r)) % len(temp)).astype(np.int)
temp = temp[temp_idx]
lf = np.arange(filt_len - np.floor(filt_len / 2), filt_len)
rf = np.arange(np.ceil(filt_len / 2))
filt_idx = np.concatenate((lf, rf)).astype(np.int)
m = multiscale[ii][filt_idx]
out[win_range] = out[win_range] + m * temp
nyq_bin = np.floor(seq_len / 2) + 1
out_idx = np.arange(
nyq_bin - np.abs(1 - seq_len % 2) - 1, 0, -1).astype(np.int)
out[nyq_bin:] = np.conj(out[out_idx])
t_out = np.real(np.fft.ifft(out)).astype(np.float64)
return t_out
def cqt(X, fs, n_bins=48, fmin=27.5, fmax="nyq", gamma=20):
"""
Constant Q Transform
References
----------
Velasco G. A., Holighaus N., Dorfler M., Grill T.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
Holighaus N., Dorfler M., Velasco G. A. and Grill T.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : Monika Dorfler, Gino Angelo Velasco, Nicki Holighaus, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
if fmax == "nyq":
fmax = fs / 2.
multiscale, shift, window_lens = nsgcwin(fmin, fmax, n_bins, fs,
len(X), gamma)
fbas = fs * np.cumsum(shift[1:]) / len(X)
fbas = fbas[:len(window_lens) // 2 - 1]
bins = window_lens.shape[0] // 2 - 1
window_lens[1:bins + 1] = window_lens[bins + 2]
window_lens[bins + 2:] = window_lens[1:bins + 1][::-1]
norm = 2. * window_lens[:bins + 2] / float(len(X))
norm = np.concatenate((norm, norm[1:-1][::-1]))
multiscale = [norm[ii] * multiscale[ii] for ii in range(2 * (bins + 1))]
c = nsgtf_real(X, multiscale, shift, window_lens)
c_dc = c[0]
c_nyq = c[-1]
c_sub = c[1:-1]
c = np.vstack(c_sub)
return c, c_dc, c_nyq, multiscale, shift, window_lens
def icqt(X_cq, c_dc, c_nyq, multiscale, shift, window_lens):
"""
Inverse constant Q Transform
References
----------
Velasco G. A., Holighaus N., Dorfler M., Grill T.
Constructing an invertible constant-Q transform with nonstationary Gabor
frames, Proceedings of the 14th International Conference on Digital
Audio Effects (DAFx 11), Paris, France, 2011
Holighaus N., Dorfler M., Velasco G. A. and Grill T.
A framework for invertible, real-time constant-Q transforms, submitted.
Original matlab code copyright follows:
AUTHOR(s) : Monika Dorfler, Gino Angelo Velasco, Nicki Holighaus, 2010-2011
COPYRIGHT : (c) NUHAG, Dept.Math., University of Vienna, AUSTRIA
http://nuhag.eu/
Permission is granted to modify and re-distribute this
code in any manner as long as this notice is preserved.
All standard disclaimers apply.
"""
new_multiscale = nsdual(multiscale, shift, window_lens)
X = nsgitf_real(X_cq, c_dc, c_nyq, new_multiscale, shift)
return X
def rolling_mean(X, window_size):
w = 1.0 / window_size * np.ones((window_size))
return np.correlate(X, w, 'valid')
def rolling_window(X, window_size):
# for 1d data
shape = X.shape[:-1] + (X.shape[-1] - window_size + 1, window_size)
strides = X.strides + (X.strides[-1],)
return np.lib.stride_tricks.as_strided(X, shape=shape, strides=strides)
def voiced_unvoiced(X, window_size=256, window_step=128, copy=True):
"""
Voiced unvoiced detection from a raw signal
Based on code from:
https://www.clear.rice.edu/elec532/PROJECTS96/lpc/code.html
Other references:
http://www.seas.ucla.edu/spapl/code/harmfreq_MOLRT_VAD.m
Parameters
----------
X : ndarray
Raw input signal
window_size : int, optional (default=256)
The window size to use, in samples.
window_step : int, optional (default=128)
How far the window steps after each calculation, in samples.
copy : bool, optional (default=True)
Whether to make a copy of the input array or allow in place changes.
"""
X = np.array(X, copy=copy)
if len(X.shape) < 2:
X = X[None]
n_points = X.shape[1]
n_windows = n_points // window_step
# Padding
pad_sizes = [(window_size - window_step) // 2,
window_size - window_step // 2]
# TODO: Handling for odd window sizes / steps
X = np.hstack((np.zeros((X.shape[0], pad_sizes[0])), X,
np.zeros((X.shape[0], pad_sizes[1]))))
clipping_factor = 0.6
b, a = sg.butter(10, np.pi * 9 / 40)
voiced_unvoiced = np.zeros((n_windows, 1))
period = np.zeros((n_windows, 1))
for window in range(max(n_windows - 1, 1)):
XX = X.ravel()[window * window_step + np.arange(window_size)]
XX *= sg.hamming(len(XX))
XX = sg.lfilter(b, a, XX)
left_max = np.max(np.abs(XX[:len(XX) // 3]))
right_max = np.max(np.abs(XX[-len(XX) // 3:]))
clip_value = clipping_factor * np.min([left_max, right_max])
XX_clip = np.clip(XX, clip_value, -clip_value)
XX_corr = np.correlate(XX_clip, XX_clip, mode='full')
center = np.argmax(XX_corr)
right_XX_corr = XX_corr[center:]
prev_window = max([window - 1, 0])
if voiced_unvoiced[prev_window] > 0:
# Want it to be harder to turn off than turn on
strength_factor = .29
else:
strength_factor = .3
start = np.where(right_XX_corr < .3 * XX_corr[center])[0]
# 20 is hardcoded but should depend on samplerate?
try:
start = np.max([20, start[0]])
except IndexError:
start = 20
search_corr = right_XX_corr[start:]
index = np.argmax(search_corr)
second_max = search_corr[index]
if (second_max > strength_factor * XX_corr[center]):
voiced_unvoiced[window] = 1
period[window] = start + index - 1
else:
voiced_unvoiced[window] = 0
period[window] = 0
return np.array(voiced_unvoiced), np.array(period)
def lpc_analysis(X, order=8, window_step=128, window_size=2 * 128,
emphasis=0.9, voiced_start_threshold=.9,
voiced_stop_threshold=.6, truncate=False, copy=True):
"""
Extract LPC coefficients from a signal
Based on code from:
http://labrosa.ee.columbia.edu/matlab/sws/
_rParameters
----------
X : ndarray
Signals to extract LPC coefficients from
order : int, optional (default=8)
Order of the LPC coefficients. For speech, use the general rule that the
order is two times the expected number of formants plus 2.
This can be formulated as 2 + 2 * (fs // 2000). For approx. signals
with fs = 7000, this is 8 coefficients - 2 + 2 * (7000 // 2000).
window_step : int, optional (default=128)
The size (in samples) of the space between each window
window_size : int, optional (default=2 * 128)
The size of each window (in samples) to extract coefficients over
emphasis : float, optional (default=0.9)
The emphasis coefficient to use for filtering
voiced_start_threshold : float, optional (default=0.9)
Upper power threshold for estimating when speech has started
voiced_stop_threshold : float, optional (default=0.6)
Lower power threshold for estimating when speech has stopped
truncate : bool, optional (default=False)
Whether to cut the data at the last window or do zero padding.
copy : bool, optional (default=True)
Whether to copy the input X or modify in place
Returns
-------
lp_coefficients : ndarray
lp coefficients to describe the frame
per_frame_gain : ndarray
calculated gain for each frame
residual_excitation : ndarray
leftover energy which is not described by lp coefficents and gain
voiced_frames : ndarray
array of [0, 1] values which holds voiced/unvoiced decision for each
frame.
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
X = np.array(X, copy=copy)
if len(X.shape) < 2:
X = X[None]
n_points = X.shape[1]
n_windows = int(n_points // window_step)
if not truncate:
pad_sizes = [(window_size - window_step) // 2,
window_size - window_step // 2]
# TODO: Handling for odd window sizes / steps
X = np.hstack((np.zeros((X.shape[0], int(pad_sizes[0]))), X,
np.zeros((X.shape[0], int(pad_sizes[1])))))
else:
pad_sizes = [0, 0]
X = X[0, :n_windows * window_step]
lp_coefficients = np.zeros((n_windows, order + 1))
per_frame_gain = np.zeros((n_windows, 1))
residual_excitation = np.zeros(
int(((n_windows - 1) * window_step + window_size)))
# Pre-emphasis high-pass filter
X = sg.lfilter([1, -emphasis], 1, X)
# stride_tricks.as_strided?
autocorr_X = np.zeros((n_windows, int(2 * window_size - 1)))
for window in range(max(n_windows - 1, 1)):
wtws = int(window * window_step)
XX = X.ravel()[wtws + np.arange(window_size, dtype="int32")]
WXX = XX * sg.hanning(window_size)
autocorr_X[window] = np.correlate(WXX, WXX, mode='full')
center = np.argmax(autocorr_X[window])
RXX = autocorr_X[window,
np.arange(center, window_size + order, dtype="int32")]
R = linalg.toeplitz(RXX[:-1])
solved_R = linalg.pinv(R).dot(RXX[1:])
filter_coefs = np.hstack((1, -solved_R))
residual_signal = sg.lfilter(filter_coefs, 1, WXX)
gain = np.sqrt(np.mean(residual_signal ** 2))
lp_coefficients[window] = filter_coefs
per_frame_gain[window] = gain
assign_range = wtws + np.arange(window_size, dtype="int32")
residual_excitation[assign_range] += residual_signal / gain
# Throw away first part in overlap mode for proper synthesis
residual_excitation = residual_excitation[int(pad_sizes[0]):]
return lp_coefficients, per_frame_gain, residual_excitation
def lpc_to_frequency(lp_coefficients, per_frame_gain):
"""
Extract resonant frequencies and magnitudes from LPC coefficients and gains.
Parameters
----------
lp_coefficients : ndarray
LPC coefficients, such as those calculated by ``lpc_analysis``
per_frame_gain : ndarray
Gain calculated for each frame, such as those calculated
by ``lpc_analysis``
Returns
-------
frequencies : ndarray
Resonant frequencies calculated from LPC coefficients and gain. Returned
frequencies are from 0 to 2 * pi
magnitudes : ndarray
Magnitudes of resonant frequencies
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
n_windows, order = lp_coefficients.shape
frame_frequencies = np.zeros((n_windows, (order - 1) // 2))
frame_magnitudes = np.zeros_like(frame_frequencies)
for window in range(n_windows):
w_coefs = lp_coefficients[window]
g_coefs = per_frame_gain[window]
roots = np.roots(np.hstack(([1], w_coefs[1:])))
# Roots doesn't return the same thing as MATLAB... agh
frequencies, index = np.unique(
np.abs(np.angle(roots)), return_index=True)
# Make sure 0 doesn't show up...
gtz = np.where(frequencies > 0)[0]
frequencies = frequencies[gtz]
index = index[gtz]
magnitudes = g_coefs / (1. - np.abs(roots))
sort_index = np.argsort(frequencies)
frame_frequencies[window, :len(sort_index)] = frequencies[sort_index]
frame_magnitudes[window, :len(sort_index)] = magnitudes[sort_index]
return frame_frequencies, frame_magnitudes
def lpc_to_lsf(all_lpc):
if len(all_lpc.shape) < 2:
all_lpc = all_lpc[None]
order = all_lpc.shape[1] - 1
all_lsf = np.zeros((len(all_lpc), order))
for i in range(len(all_lpc)):
lpc = all_lpc[i]
lpc1 = np.append(lpc, 0)
lpc2 = lpc1[::-1]
sum_filt = lpc1 + lpc2
diff_filt = lpc1 - lpc2
if order % 2 != 0:
deconv_diff, _ = sg.deconvolve(diff_filt, [1, 0, -1])
deconv_sum = sum_filt
else:
deconv_diff, _ = sg.deconvolve(diff_filt, [1, -1])
deconv_sum, _ = sg.deconvolve(sum_filt, [1, 1])
roots_diff = np.roots(deconv_diff)
roots_sum = np.roots(deconv_sum)
angle_diff = np.angle(roots_diff[::2])
angle_sum = np.angle(roots_sum[::2])
lsf = np.sort(np.hstack((angle_diff, angle_sum)))
if len(lsf) != 0:
all_lsf[i] = lsf
return np.squeeze(all_lsf)
def lsf_to_lpc(all_lsf):
if len(all_lsf.shape) < 2:
all_lsf = all_lsf[None]
order = all_lsf.shape[1]
all_lpc = np.zeros((len(all_lsf), order + 1))
for i in range(len(all_lsf)):
lsf = all_lsf[i]
zeros = np.exp(1j * lsf)
sum_zeros = zeros[::2]
diff_zeros = zeros[1::2]
sum_zeros = np.hstack((sum_zeros, np.conj(sum_zeros)))
diff_zeros = np.hstack((diff_zeros, np.conj(diff_zeros)))
sum_filt = np.poly(sum_zeros)
diff_filt = np.poly(diff_zeros)
if order % 2 != 0:
deconv_diff = sg.convolve(diff_filt, [1, 0, -1])
deconv_sum = sum_filt
else:
deconv_diff = sg.convolve(diff_filt, [1, -1])
deconv_sum = sg.convolve(sum_filt, [1, 1])
lpc = .5 * (deconv_sum + deconv_diff)
# Last coefficient is 0 and not returned
all_lpc[i] = lpc[:-1]
return np.squeeze(all_lpc)
def lpc_synthesis(lp_coefficients, per_frame_gain, residual_excitation=None,
voiced_frames=None, window_step=128, emphasis=0.9):
"""
Synthesize a signal from LPC coefficients
Based on code from:
http://labrosa.ee.columbia.edu/matlab/sws/
http://web.uvic.ca/~tyoon/resource/auditorytoolbox/auditorytoolbox/synlpc.html
Parameters
----------
lp_coefficients : ndarray
Linear prediction coefficients
per_frame_gain : ndarray
Gain coefficients
residual_excitation : ndarray or None, optional (default=None)
Residual excitations. If None, this will be synthesized with white noise
voiced_frames : ndarray or None, optional (default=None)
Voiced frames. If None, all frames assumed to be voiced.
window_step : int, optional (default=128)
The size (in samples) of the space between each window
emphasis : float, optional (default=0.9)
The emphasis coefficient to use for filtering
overlap_add : bool, optional (default=True)
What type of processing to use when joining windows
copy : bool, optional (default=True)
Whether to copy the input X or modify in place
Returns
-------
synthesized : ndarray
Sound vector synthesized from input arguments
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
# TODO: Incorporate better synthesis from
# http://eecs.oregonstate.edu/education/docs/ece352/CompleteManual.pdf
window_size = 2 * window_step
[n_windows, order] = lp_coefficients.shape
n_points = (n_windows + 1) * window_step
n_excitation_points = n_points + window_step + window_step // 2
random_state = np.random.RandomState(1999)
if residual_excitation is None:
# Need to generate excitation
if voiced_frames is None:
# No voiced/unvoiced info
voiced_frames = np.ones((lp_coefficients.shape[0], 1))
residual_excitation = np.zeros((n_excitation_points))
f, m = lpc_to_frequency(lp_coefficients, per_frame_gain)
t = np.linspace(0, 1, window_size, endpoint=False)
hanning = sg.hanning(window_size)
for window in range(n_windows):
window_base = window * window_step
index = window_base + np.arange(window_size)
if voiced_frames[window]:
sig = np.zeros_like(t)
cycles = np.cumsum(f[window][0] * t)
sig += sg.sawtooth(cycles, 0.001)
residual_excitation[index] += hanning * sig
residual_excitation[index] += hanning * 0.01 * random_state.randn(
window_size)
else:
n_excitation_points = residual_excitation.shape[0]
n_points = n_excitation_points + window_step + window_step // 2
residual_excitation = np.hstack((residual_excitation,
np.zeros(window_size)))
if voiced_frames is None:
voiced_frames = np.ones_like(per_frame_gain)
synthesized = np.zeros((n_points))
for window in range(n_windows):
window_base = window * window_step
oldbit = synthesized[window_base + np.arange(window_step)]
w_coefs = lp_coefficients[window]
if not np.all(w_coefs):
# Hack to make lfilter avoid
# ValueError: BUG: filter coefficient a[0] == 0 not supported yet
# when all coeffs are 0
w_coefs = [1]
g_coefs = voiced_frames[window] * per_frame_gain[window]
index = window_base + np.arange(window_size)
newbit = g_coefs * sg.lfilter([1], w_coefs,
residual_excitation[index])
synthesized[index] = np.hstack((oldbit, np.zeros(
(window_size - window_step))))
synthesized[index] += sg.hanning(window_size) * newbit
synthesized = sg.lfilter([1], [1, -emphasis], synthesized)
return synthesized
def soundsc(X, gain_scale=.9, copy=True):
"""
Approximate implementation of soundsc from MATLAB without the audio playing.
Parameters
----------
X : ndarray
Signal to be rescaled
gain_scale : float
Gain multipler, default .9 (90% of maximum representation)
copy : bool, optional (default=True)
Whether to make a copy of input signal or operate in place.
Returns
-------
X_sc : ndarray
(-32767, 32767) scaled version of X as int16, suitable for writing
with scipy.io.wavfile
"""
X = np.array(X, copy=copy)
X = (X - X.min()) / (X.max() - X.min())
X = 2 * X - 1
X = gain_scale * X
X = X * 2 ** 15
return X.astype('int16')
def _wav2array(nchannels, sampwidth, data):
# wavio.py
# Author: Warren Weckesser
# License: BSD 3-Clause (http://opensource.org/licenses/BSD-3-Clause)
"""data must be the string containing the bytes from the wav file."""
num_samples, remainder = divmod(len(data), sampwidth * nchannels)
if remainder > 0:
raise ValueError('The length of data is not a multiple of '
'sampwidth * num_channels.')
if sampwidth > 4:
raise ValueError("sampwidth must not be greater than 4.")
if sampwidth == 3:
a = np.empty((num_samples, nchannels, 4), dtype=np.uint8)
raw_bytes = np.fromstring(data, dtype=np.uint8)
a[:, :, :sampwidth] = raw_bytes.reshape(-1, nchannels, sampwidth)
a[:, :, sampwidth:] = (a[:, :, sampwidth - 1:sampwidth] >> 7) * 255
result = a.view('<i4').reshape(a.shape[:-1])
else:
# 8 bit samples are stored as unsigned ints; others as signed ints.
dt_char = 'u' if sampwidth == 1 else 'i'
a = np.fromstring(data, dtype='<%s%d' % (dt_char, sampwidth))
result = a.reshape(-1, nchannels)
return result
def readwav(file):
# wavio.py
# Author: Warren Weckesser
# License: BSD 3-Clause (http://opensource.org/licenses/BSD-3-Clause)
"""
Read a wav file.
Returns the frame rate, sample width (in bytes) and a numpy array
containing the data.
This function does not read compressed wav files.
"""
wav = wave.open(file)
rate = wav.getframerate()
nchannels = wav.getnchannels()
sampwidth = wav.getsampwidth()
nframes = wav.getnframes()
data = wav.readframes(nframes)
wav.close()
array = _wav2array(nchannels, sampwidth, data)
return rate, sampwidth, array
def csvd(arr):
"""
Do the complex SVD of a 2D array, returning real valued U, S, VT
http://stemblab.github.io/complex-svd/
"""
C_r = arr.real
C_i = arr.imag
block_x = C_r.shape[0]
block_y = C_r.shape[1]
K = np.zeros((2 * block_x, 2 * block_y))
# Upper left
K[:block_x, :block_y] = C_r
# Lower left
K[:block_x, block_y:] = C_i
# Upper right
K[block_x:, :block_y] = -C_i
# Lower right
K[block_x:, block_y:] = C_r
return svd(K, full_matrices=False)
def icsvd(U, S, VT):
"""
Invert back to complex values from the output of csvd
U, S, VT = csvd(X)
X_rec = inv_csvd(U, S, VT)
"""
K = U.dot(np.diag(S)).dot(VT)
block_x = U.shape[0] // 2
block_y = U.shape[1] // 2
arr_rec = np.zeros((block_x, block_y)) + 0j
arr_rec.real = K[:block_x, :block_y]
arr_rec.imag = K[:block_x, block_y:]
return arr_rec
def sinusoid_analysis(X, input_sample_rate, resample_block=128, copy=True):
"""
Contruct a sinusoidal model for the input signal.
Parameters
----------
X : ndarray
Input signal to model
input_sample_rate : int
The sample rate of the input signal
resample_block : int, optional (default=128)
Controls the step size of the sinusoidal model
Returns
-------
frequencies_hz : ndarray
Frequencies for the sinusoids, in Hz.
magnitudes : ndarray
Magnitudes of sinusoids returned in ``frequencies``
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
X = np.array(X, copy=copy)
resample_to = 8000
if input_sample_rate != resample_to:
if input_sample_rate % resample_to != 0:
raise ValueError("Input sample rate must be a multiple of 8k!")
# Should be able to use resample... ?
# resampled_count = round(len(X) * resample_to / input_sample_rate)
# X = sg.resample(X, resampled_count, window=sg.hanning(len(X)))
X = sg.decimate(X, input_sample_rate // resample_to, zero_phase=True)
step_size = 2 * round(resample_block / input_sample_rate * resample_to / 2.)
a, g, e = lpc_analysis(X, order=8, window_step=step_size,
window_size=2 * step_size)
f, m = lpc_to_frequency(a, g)
f_hz = f * resample_to / (2 * np.pi)
return f_hz, m
def slinterp(X, factor, copy=True):
"""
Slow-ish linear interpolation of a 1D numpy array. There must be some
better function to do this in numpy.
Parameters
----------
X : ndarray
1D input array to interpolate
factor : int
Integer factor to interpolate by
Return
------
X_r : ndarray
"""
sz = np.product(X.shape)
X = np.array(X, copy=copy)
X_s = np.hstack((X[1:], [0]))
X_r = np.zeros((factor, sz))
for i in range(factor):
X_r[i, :] = (factor - i) / float(factor) * X + (i / float(factor)) * X_s
return X_r.T.ravel()[:(sz - 1) * factor + 1]
def sinusoid_synthesis(frequencies_hz, magnitudes, input_sample_rate=16000,
resample_block=128):
"""
Create a time series based on input frequencies and magnitudes.
Parameters
----------
frequencies_hz : ndarray
Input signal to model
magnitudes : int
The sample rate of the input signal
input_sample_rate : int, optional (default=16000)
The sample rate parameter that the sinusoid analysis was run with
resample_block : int, optional (default=128)
Controls the step size of the sinusoidal model
Returns
-------
synthesized : ndarray
Sound vector synthesized from input arguments
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
rows, cols = frequencies_hz.shape
synthesized = np.zeros((1 + ((rows - 1) * resample_block),))
for col in range(cols):
mags = slinterp(magnitudes[:, col], resample_block)
freqs = slinterp(frequencies_hz[:, col], resample_block)
cycles = np.cumsum(2 * np.pi * freqs / float(input_sample_rate))
sines = mags * np.cos(cycles)
synthesized += sines
return synthesized
def dct_compress(X, n_components, window_size=128):
"""
Compress using the DCT
Parameters
----------
X : ndarray, shape=(n_samples,)
The input signal to compress. Should be 1-dimensional
n_components : int
The number of DCT components to keep. Setting n_components to about
.5 * window_size can give compression with fairly good reconstruction.
window_size : int
The input X is broken into windows of window_size, each of which are
then compressed with the DCT.
Returns
-------
X_compressed : ndarray, shape=(num_windows, window_size)
A 2D array of non-overlapping DCT coefficients. For use with uncompress
Reference
---------
http://nbviewer.ipython.org/github/craffel/crucialpython/blob/master/week3/stride_tricks.ipynb
"""
if len(X) % window_size != 0:
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_size
X_strided = X.reshape((num_frames, window_size))
X_dct = fftpack.dct(X_strided, norm='ortho')
if n_components is not None:
X_dct = X_dct[:, :n_components]
return X_dct
def dct_uncompress(X_compressed, window_size=128):
"""
Uncompress a DCT compressed signal (such as returned by ``compress``).
Parameters
----------
X_compressed : ndarray, shape=(n_samples, n_features)
Windowed and compressed array.
window_size : int, optional (default=128)
Size of the window used when ``compress`` was called.
Returns
-------
X_reconstructed : ndarray, shape=(n_samples)
Reconstructed version of X.
"""
if X_compressed.shape[1] % window_size != 0:
append = np.zeros((X_compressed.shape[0],
window_size - X_compressed.shape[1] % window_size))
X_compressed = np.hstack((X_compressed, append))
X_r = fftpack.idct(X_compressed, norm='ortho')
return X_r.ravel()
def sine_window(X):
"""
Apply a sinusoid window to X.
Parameters
----------
X : ndarray, shape=(n_samples, n_features)
Input array of samples
Returns
-------
X_windowed : ndarray, shape=(n_samples, n_features)
Windowed version of X.
"""
i = np.arange(X.shape[1])
win = np.sin(np.pi * (i + 0.5) / X.shape[1])
row_stride = 0
col_stride = win.itemsize
strided_win = as_strided(win, shape=X.shape,
strides=(row_stride, col_stride))
return X * strided_win
def kaiserbessel_window(X, alpha=6.5):
"""
Apply a Kaiser-Bessel window to X.
Parameters
----------
X : ndarray, shape=(n_samples, n_features)
Input array of samples
alpha : float, optional (default=6.5)
Tuning parameter for Kaiser-Bessel function. alpha=6.5 should make
perfect reconstruction possible for DCT.
Returns
-------
X_windowed : ndarray, shape=(n_samples, n_features)
Windowed version of X.
"""
beta = np.pi * alpha
win = sg.kaiser(X.shape[1], beta)
row_stride = 0
col_stride = win.itemsize
strided_win = as_strided(win, shape=X.shape,
strides=(row_stride, col_stride))
return X * strided_win
def overlap(X, window_size, window_step):
"""
Create an overlapped version of X
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
window_step : int
Step size between windows
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError("Window size must be even!")
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
overlap_sz = window_size - window_step
new_shape = X.shape[:-1] + ((X.shape[-1] - overlap_sz) // window_step, window_size)
new_strides = X.strides[:-1] + (window_step * X.strides[-1],) + X.strides[-1:]
X_strided = as_strided(X, shape=new_shape, strides=new_strides)
return X_strided
def halfoverlap(X, window_size):
"""
Create an overlapped version of X using 50% of window_size as overlap.
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError("Window size must be even!")
window_step = window_size // 2
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_step - 1
row_stride = X.itemsize * window_step
col_stride = X.itemsize
X_strided = as_strided(X, shape=(num_frames, window_size),
strides=(row_stride, col_stride))
return X_strided
def invert_halfoverlap(X_strided):
"""
Invert ``halfoverlap`` function to reconstruct X
Parameters
----------
X_strided : ndarray, shape=(n_windows, window_size)
X as overlapped windows
Returns
-------
X : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
# Hardcoded 50% overlap! Can generalize later...
n_rows, n_cols = X_strided.shape
X = np.zeros((((int(n_rows // 2) + 1) * n_cols),)).astype(X_strided.dtype)
start_index = 0
end_index = n_cols
window_step = n_cols // 2
for row in range(X_strided.shape[0]):
X[start_index:end_index] += X_strided[row]
start_index += window_step
end_index += window_step
return X
def overlap_add(X_strided, window_step, wsola=False):
"""
overlap add to reconstruct X
Parameters
----------
X_strided : ndarray, shape=(n_windows, window_size)
X as overlapped windows
window_step : int
step size for overlap add
Returns
-------
X : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
n_rows, window_size = X_strided.shape
# Start with largest size (no overlap) then truncate after we finish
# +2 for one window on each side
X = np.zeros(((n_rows + 2) * window_size,)).astype(X_strided.dtype)
start_index = 0
total_windowing_sum = np.zeros((X.shape[0]))
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(window_size) / (
window_size - 1))
for i in range(n_rows):
end_index = start_index + window_size
if wsola:
offset_size = window_size - window_step
offset = xcorr_offset(X[start_index:start_index + offset_size],
X_strided[i, :offset_size])
ss = start_index - offset
st = end_index - offset
if start_index - offset < 0:
ss = 0
st = 0 + (end_index - start_index)
X[ss:st] += X_strided[i]
total_windowing_sum[ss:st] += win
start_index = start_index + window_step
else:
X[start_index:end_index] += X_strided[i]
total_windowing_sum[start_index:end_index] += win
start_index += window_step
# Not using this right now
# X = np.real(X) / (total_windowing_sum + 1)
X = X[:end_index]
return X
def overlap_dct_compress(X, n_components, window_size):
"""
Overlap (at 50% of window_size) and compress X.
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to compress
n_components : int
number of DCT components to keep
window_size : int
Size of windows to take
Returns
-------
X_dct : ndarray, shape=(n_windows, n_components)
Windowed and compressed version of X
"""
X_strided = halfoverlap(X, window_size)
X_dct = fftpack.dct(X_strided, norm='ortho')
if n_components is not None:
X_dct = X_dct[:, :n_components]
return X_dct
# Evil voice is caused by adding double the zeros before inverse DCT...
# Very cool bug but makes sense
def overlap_dct_uncompress(X_compressed, window_size):
"""
Uncompress X as returned from ``overlap_compress``.
Parameters
----------
X_compressed : ndarray, shape=(n_windows, n_components)
Windowed and compressed version of X
window_size : int
Size of windows originally used when compressing X
Returns
-------
X_reconstructed : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
if X_compressed.shape[1] % window_size != 0:
append = np.zeros((X_compressed.shape[0], window_size -
X_compressed.shape[1] % window_size))
X_compressed = np.hstack((X_compressed, append))
X_r = fftpack.idct(X_compressed, norm='ortho')
return invert_halfoverlap(X_r)
def herz_to_mel(freqs):
"""
Based on code by Dan Ellis
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
f_0 = 0 # 133.33333
f_sp = 200 / 3. # 66.66667
bark_freq = 1000.
bark_pt = (bark_freq - f_0) / f_sp
# The magic 1.0711703 which is the ratio needed to get from 1000 Hz
# to 6400 Hz in 27 steps, and is *almost* the ratio between 1000 Hz
# and the preceding linear filter center at 933.33333 Hz
# (actually 1000/933.33333 = 1.07142857142857 and
# exp(log(6.4)/27) = 1.07117028749447)
if not isinstance(freqs, np.ndarray):
freqs = np.array(freqs)[None]
log_step = np.exp(np.log(6.4) / 27)
lin_pts = (freqs < bark_freq)
mel = 0. * freqs
mel[lin_pts] = (freqs[lin_pts] - f_0) / f_sp
mel[~lin_pts] = bark_pt + np.log(freqs[~lin_pts] / bark_freq) / np.log(
log_step)
return mel
def mel_to_herz(mel):
"""
Based on code by Dan Ellis
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
f_0 = 0 # 133.33333
f_sp = 200 / 3. # 66.66667
bark_freq = 1000.
bark_pt = (bark_freq - f_0) / f_sp
# The magic 1.0711703 which is the ratio needed to get from 1000 Hz
# to 6400 Hz in 27 steps, and is *almost* the ratio between 1000 Hz
# and the preceding linear filter center at 933.33333 Hz
# (actually 1000/933.33333 = 1.07142857142857 and
# exp(log(6.4)/27) = 1.07117028749447)
if not isinstance(mel, np.ndarray):
mel = np.array(mel)[None]
log_step = np.exp(np.log(6.4) / 27)
lin_pts = (mel < bark_pt)
freqs = 0. * mel
freqs[lin_pts] = f_0 + f_sp * mel[lin_pts]
freqs[~lin_pts] = bark_freq * np.exp(np.log(log_step) * (
mel[~lin_pts] - bark_pt))
return freqs
def mel_freq_weights(n_fft, fs, n_filts=None, width=None):
"""
Based on code by Dan Ellis
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
min_freq = 0
max_freq = fs // 2
if width is None:
width = 1.
if n_filts is None:
n_filts = int(herz_to_mel(max_freq) / 2) + 1
else:
n_filts = int(n_filts)
assert n_filts > 0
weights = np.zeros((n_filts, n_fft))
fft_freqs = np.arange(n_fft // 2) / n_fft * fs
min_mel = herz_to_mel(min_freq)
max_mel = herz_to_mel(max_freq)
partial = np.arange(n_filts + 2) / (n_filts + 1.) * (max_mel - min_mel)
bin_freqs = mel_to_herz(min_mel + partial)
bin_bin = np.round(bin_freqs / fs * (n_fft - 1))
for i in range(n_filts):
fs_i = bin_freqs[i + np.arange(3)]
fs_i = fs_i[1] + width * (fs_i - fs_i[1])
lo_slope = (fft_freqs - fs_i[0]) / float(fs_i[1] - fs_i[0])
hi_slope = (fs_i[2] - fft_freqs) / float(fs_i[2] - fs_i[1])
weights[i, :n_fft // 2] = np.maximum(
0, np.minimum(lo_slope, hi_slope))
# Constant amplitude multiplier
weights = np.diag(2. / (bin_freqs[2:n_filts + 2]
- bin_freqs[:n_filts])).dot(weights)
weights[:, n_fft // 2:] = 0
return weights
def time_attack_agc(X, fs, t_scale=0.5, f_scale=1.):
"""
AGC based on code by Dan Ellis
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
# 32 ms grid for FFT
n_fft = 2 ** int(np.log(0.032 * fs) / np.log(2))
f_scale = float(f_scale)
window_size = n_fft
window_step = window_size // 2
X_freq = stft(X, window_size, mean_normalize=False)
fft_fs = fs / window_step
n_bands = max(10, 20 / f_scale)
mel_width = f_scale * n_bands / 10.
f_to_a = mel_freq_weights(n_fft, fs, n_bands, mel_width)
f_to_a = f_to_a[:, :n_fft // 2 + 1]
audiogram = np.abs(X_freq).dot(f_to_a.T)
fbg = np.zeros_like(audiogram)
state = np.zeros((audiogram.shape[1],))
alpha = np.exp(-(1. / fft_fs) / t_scale)
for i in range(len(audiogram)):
state = np.maximum(alpha * state, audiogram[i])
fbg[i] = state
sf_to_a = np.sum(f_to_a, axis=0)
E = np.diag(1. / (sf_to_a + (sf_to_a == 0)))
E = E.dot(f_to_a.T)
E = fbg.dot(E.T)
E[E <= 0] = np.min(E[E > 0])
ts = istft(X_freq / E, window_size, mean_normalize=False)
return ts, X_freq, E
def hebbian_kmeans(X, n_clusters=10, n_epochs=10, W=None, learning_rate=0.01,
batch_size=100, random_state=None, verbose=True):
"""
Modified from existing code from R. Memisevic
See http://www.cs.toronto.edu/~rfm/code/hebbian_kmeans.py
"""
if W is None:
if random_state is None:
random_state = np.random.RandomState()
W = 0.1 * random_state.randn(n_clusters, X.shape[1])
else:
assert n_clusters == W.shape[0]
X2 = (X ** 2).sum(axis=1, keepdims=True)
last_print = 0
for e in range(n_epochs):
for i in range(0, X.shape[0], batch_size):
X_i = X[i: i + batch_size]
X2_i = X2[i: i + batch_size]
D = -2 * np.dot(W, X_i.T)
D += (W ** 2).sum(axis=1, keepdims=True)
D += X2_i.T
S = (D == D.min(axis=0)[None, :]).astype("float").T
W += learning_rate * (
np.dot(S.T, X_i) - S.sum(axis=0)[:, None] * W)
if verbose:
if e == 0 or e > (.05 * n_epochs + last_print):
last_print = e
print(("Epoch %i of %i, cost %.4f" % (
e + 1, n_epochs, D.min(axis=0).sum())))
return W
def complex_to_real_view(arr_c):
# Inplace view from complex to r, i as separate columns
assert arr_c.dtype in [np.complex64, np.complex128]
shp = arr_c.shape
dtype = np.float64 if arr_c.dtype == np.complex128 else np.float32
arr_r = arr_c.ravel().view(dtype=dtype).reshape(shp[0], 2 * shp[1])
return arr_r
def real_to_complex_view(arr_r):
# Inplace view from real, image as columns to complex
assert arr_r.dtype not in [np.complex64, np.complex128]
shp = arr_r.shape
dtype = np.complex128 if arr_r.dtype == np.float64 else np.complex64
arr_c = arr_r.ravel().view(dtype=dtype).reshape(shp[0], shp[1] // 2)
return arr_c
def complex_to_abs(arr_c):
return np.abs(arr_c)
def complex_to_angle(arr_c):
return np.angle(arr_c)
def abs_and_angle_to_complex(arr_abs, arr_angle):
# abs(f_c2 - f_c) < 1E-15
return arr_abs * np.exp(1j * arr_angle)
def angle_to_sin_cos(arr_angle):
return np.hstack((np.sin(arr_angle), np.cos(arr_angle)))
def sin_cos_to_angle(arr_sin, arr_cos):
return np.arctan2(arr_sin, arr_cos)
def polyphase_core(x, m, f):
# x = input data
# m = decimation rate
# f = filter
# Hack job - append zeros to match decimation rate
if x.shape[0] % m != 0:
x = np.append(x, np.zeros((m - x.shape[0] % m,)))
if f.shape[0] % m != 0:
f = np.append(f, np.zeros((m - f.shape[0] % m,)))
polyphase = p = np.zeros((m, (x.shape[0] + f.shape[0]) / m), dtype=x.dtype)
p[0, :-1] = np.convolve(x[::m], f[::m])
# Invert the x values when applying filters
for i in range(1, m):
p[i, 1:] = np.convolve(x[m - i::m], f[i::m])
return p
def polyphase_single_filter(x, m, f):
return np.sum(polyphase_core(x, m, f), axis=0)
def polyphase_lowpass(arr, downsample=2, n_taps=50, filter_pad=1.1):
filt = firwin(downsample * n_taps, 1 / (downsample * filter_pad))
filtered = polyphase_single_filter(arr, downsample, filt)
return filtered
def window(arr, window_size, window_step=1, axis=0):
"""
Directly taken from Erik Rigtorp's post to numpy-discussion.
<http://www.mail-archive.com/[email protected]/msg29450.html>
<http://stackoverflow.com/questions/4936620/using-strides-for-an-efficient-moving-average-filter>
"""
if window_size < 1:
raise ValueError("`window_size` must be at least 1.")
if window_size > arr.shape[-1]:
raise ValueError("`window_size` is too long.")
orig = list(range(len(arr.shape)))
trans = list(range(len(arr.shape)))
trans[axis] = orig[-1]
trans[-1] = orig[axis]
arr = arr.transpose(trans)
shape = arr.shape[:-1] + (arr.shape[-1] - window_size + 1, window_size)
strides = arr.strides + (arr.strides[-1],)
strided = as_strided(arr, shape=shape, strides=strides)
if window_step > 1:
strided = strided[..., ::window_step, :]
orig = list(range(len(strided.shape)))
trans = list(range(len(strided.shape)))
trans[-2] = orig[-1]
trans[-1] = orig[-2]
trans = trans[::-1]
strided = strided.transpose(trans)
return strided
def unwindow(arr, window_size, window_step=1, axis=0):
# undo windows by broadcast
if axis != 0:
raise ValueError("axis != 0 currently unsupported")
shp = arr.shape
unwindowed = np.tile(arr[:, None, ...], (1, window_step, 1, 1))
unwindowed = unwindowed.reshape(shp[0] * window_step, *shp[1:])
return unwindowed.mean(axis=1)
def xcorr_offset(x1, x2):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
D. Griffin and J. Lim. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
x1 = x1 - x1.mean()
x2 = x2 - x2.mean()
frame_size = len(x2)
half = frame_size // 2
corrs = np.convolve(x1.astype('float32'), x2[::-1].astype('float32'))
corrs[:half] = -1E30
corrs[-half:] = -1E30
offset = corrs.argmax() - len(x1)
return offset
def invert_spectrogram(X_s, step, calculate_offset=True, set_zero_phase=True):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
D. Griffin and J. Lim. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
size = int(X_s.shape[1] // 2)
wave = np.zeros((X_s.shape[0] * step + size))
# Getting overflow warnings with 32 bit...
wave = wave.astype('float64')
total_windowing_sum = np.zeros((X_s.shape[0] * step + size))
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(size) / (size - 1))
est_start = int(size // 2) - 1
est_end = est_start + size
for i in range(X_s.shape[0]):
wave_start = int(step * i)
wave_end = wave_start + size
if set_zero_phase:
spectral_slice = X_s[i].real + 0j
else:
# already complex
spectral_slice = X_s[i]
# Don't need fftshift due to different impl.
wave_est = np.real(np.fft.ifft(spectral_slice))[::-1]
if calculate_offset and i > 0:
offset_size = size - step
if offset_size <= 0:
print("WARNING: Large step size >50\% detected! "
"This code works best with high overlap - try "
"with 75% or greater")
offset_size = step
offset = xcorr_offset(wave[wave_start:wave_start + offset_size],
wave_est[est_start:est_start + offset_size])
else:
offset = 0
wave[wave_start:wave_end] += win * wave_est[
est_start - offset:est_end - offset]
total_windowing_sum[wave_start:wave_end] += win
wave = np.real(wave) / (total_windowing_sum + 1E-6)
return wave
def iterate_invert_spectrogram(X_s, fftsize, step, n_iter=10, verbose=False,
complex_input=False):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
D. Griffin and J. Lim. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
reg = np.max(X_s) / 1E8
X_best = copy.deepcopy(X_s)
try:
for i in range(n_iter):
if verbose:
print(("Runnning iter %i" % i))
if i == 0 and not complex_input:
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=True)
else:
# Calculate offset was False in the MATLAB version
# but in mine it massively improves the result
# Possible bug in my impl?
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=False)
est = stft(X_t, fftsize=fftsize, step=step, compute_onesided=False)
phase = est / np.maximum(reg, np.abs(est))
phase = phase[:len(X_s)]
X_s = X_s[:len(phase)]
X_best = X_s * phase
except ValueError:
raise ValueError("The iterate_invert_spectrogram algorithm requires"
" stft(..., compute_onesided=False),",
" be sure you have calculated stft with this argument")
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=False)
return np.real(X_t)
def pretty_spectrogram(d, log=True, thresh=5, fft_size=512, step_size=64):
"""
creates a spectrogram
log: take the log of the spectrgram
thresh: threshold minimum power for log spectrogram
"""
specgram = np.abs(stft(d, fftsize=fft_size, step=step_size, real=False,
compute_onesided=True))
if log == True:
specgram /= specgram.max() # volume normalize to max 1
specgram = np.log10(specgram) # take log
specgram[specgram < -thresh] = -thresh # set anything less than the threshold as the threshold
else:
specgram[specgram < thresh] = thresh # set anything less than the threshold as the threshold
return specgram
# Also mostly modified or taken from https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe
def invert_pretty_spectrogram(X_s, log=True, fft_size=512, step_size=512 / 4, n_iter=10):
if log == True:
X_s = np.power(10, X_s)
X_s = np.concatenate([X_s, X_s[:, ::-1]], axis=1)
X_t = iterate_invert_spectrogram(X_s, fft_size, step_size, n_iter=n_iter)
return X_t
def harvest_get_downsampled_signal(x, fs, target_fs):
decimation_ratio = np.round(fs / target_fs)
offset = np.ceil(140. / decimation_ratio) * decimation_ratio
start_pad = x[0] * np.ones(int(offset), dtype=np.float32)
end_pad = x[-1] * np.ones(int(offset), dtype=np.float32)
x = np.concatenate((start_pad, x, end_pad), axis=0)
if fs < target_fs:
raise ValueError("CASE NOT HANDLED IN harvest_get_downsampled_signal")
else:
try:
y0 = sg.decimate(x, int(decimation_ratio), 3, zero_phase=True)
except:
y0 = sg.decimate(x, int(decimation_ratio), 3)
actual_fs = fs / decimation_ratio
y = y0[int(offset / decimation_ratio):-int(offset / decimation_ratio)]
y = y - np.mean(y)
return y, actual_fs
def harvest_get_raw_f0_candidates(number_of_frames, boundary_f0_list,
y_length, temporal_positions, actual_fs, y_spectrum, f0_floor,
f0_ceil):
raw_f0_candidates = np.zeros((len(boundary_f0_list), number_of_frames), dtype=np.float32)
for i in range(len(boundary_f0_list)):
raw_f0_candidates[i, :] = harvest_get_f0_candidate_from_raw_event(
boundary_f0_list[i], actual_fs, y_spectrum, y_length,
temporal_positions, f0_floor, f0_ceil)
return raw_f0_candidates
def harvest_nuttall(N):
t = np.arange(0, N) * 2 * np.pi / (N - 1)
coefs = np.array([0.355768, -0.487396, 0.144232, -0.012604])
window = np.cos(t[:, None].dot(np.array([0., 1., 2., 3.])[None])).dot(coefs[:, None])
# 1D window...
return window.ravel()
def harvest_get_f0_candidate_from_raw_event(boundary_f0,
fs, y_spectrum, y_length, temporal_positions, f0_floor,
f0_ceil):
filter_length_half = int(np.round(fs / boundary_f0 * 2))
band_pass_filter_base = harvest_nuttall(filter_length_half * 2 + 1)
shifter = np.cos(2 * np.pi * boundary_f0 * np.arange(-filter_length_half, filter_length_half + 1) / float(fs))
band_pass_filter = band_pass_filter_base * shifter
index_bias = filter_length_half
# possible numerical issues if 32 bit
spectrum_low_pass_filter = np.fft.fft(band_pass_filter.astype("float64"), len(y_spectrum))
filtered_signal = np.real(np.fft.ifft(spectrum_low_pass_filter * y_spectrum))
index_bias = filter_length_half + 1
filtered_signal = filtered_signal[index_bias + np.arange(y_length).astype("int32")]
negative_zero_cross = harvest_zero_crossing_engine(filtered_signal, fs)
positive_zero_cross = harvest_zero_crossing_engine(-filtered_signal, fs)
d_filtered_signal = filtered_signal[1:] - filtered_signal[:-1]
peak = harvest_zero_crossing_engine(d_filtered_signal, fs)
dip = harvest_zero_crossing_engine(-d_filtered_signal, fs)
f0_candidate = harvest_get_f0_candidate_contour(negative_zero_cross,
positive_zero_cross, peak, dip, temporal_positions)
f0_candidate[f0_candidate > (boundary_f0 * 1.1)] = 0.
f0_candidate[f0_candidate < (boundary_f0 * .9)] = 0.
f0_candidate[f0_candidate > f0_ceil] = 0.
f0_candidate[f0_candidate < f0_floor] = 0.
return f0_candidate
def harvest_get_f0_candidate_contour(negative_zero_cross_tup,
positive_zero_cross_tup, peak_tup, dip_tup, temporal_positions):
# 0 is inteval locations
# 1 is interval based f0
usable_channel = max(0, len(negative_zero_cross_tup[0]) - 2)
usable_channel *= max(0, len(positive_zero_cross_tup[0]) - 2)
usable_channel *= max(0, len(peak_tup[0]) - 2)
usable_channel *= max(0, len(dip_tup[0]) - 2)
if usable_channel > 0:
interpolated_f0_list = np.zeros((4, len(temporal_positions)))
nz = interp1d(negative_zero_cross_tup[0], negative_zero_cross_tup[1],
kind="linear", bounds_error=False, fill_value="extrapolate")
pz = interp1d(positive_zero_cross_tup[0], positive_zero_cross_tup[1],
kind="linear", bounds_error=False, fill_value="extrapolate")
pkz = interp1d(peak_tup[0], peak_tup[1],
kind="linear", bounds_error=False, fill_value="extrapolate")
dz = interp1d(dip_tup[0], dip_tup[1],
kind="linear", bounds_error=False, fill_value="extrapolate")
interpolated_f0_list[0, :] = nz(temporal_positions)
interpolated_f0_list[1, :] = pz(temporal_positions)
interpolated_f0_list[2, :] = pkz(temporal_positions)
interpolated_f0_list[3, :] = dz(temporal_positions)
f0_candidate = np.mean(interpolated_f0_list, axis=0)
else:
f0_candidate = temporal_positions * 0
return f0_candidate
def harvest_zero_crossing_engine(x, fs, debug=False):
# negative zero crossing, going from positive to negative
x_shift = x.copy()
x_shift[:-1] = x_shift[1:]
x_shift[-1] = x[-1]
# +1 here to avoid edge case at 0
points = np.arange(len(x)) + 1
negative_going_points = points * ((x_shift * x < 0) * (x_shift < x))
edge_list = negative_going_points[negative_going_points > 0]
# -1 to correct index
fine_edge_list = edge_list - x[edge_list - 1] / (x[edge_list] - x[edge_list - 1]).astype("float32")
interval_locations = (fine_edge_list[:-1] + fine_edge_list[1:]) / float(2) / fs
interval_based_f0 = float(fs) / (fine_edge_list[1:] - fine_edge_list[:-1])
return interval_locations, interval_based_f0
def harvest_detect_official_f0_candidates(raw_f0_candidates):
number_of_channels, number_of_frames = raw_f0_candidates.shape
f0_candidates = np.zeros((int(np.round(number_of_channels / 10.)), number_of_frames))
number_of_candidates = 0
threshold = 10
for i in range(number_of_frames):
tmp = raw_f0_candidates[:, i].copy()
tmp[tmp > 0] = 1.
tmp[0] = 0
tmp[-1] = 0
tmp = tmp[1:] - tmp[:-1]
st = np.where(tmp == 1)[0]
ed = np.where(tmp == -1)[0]
count = 0
for j in range(len(st)):
dif = ed[j] - st[j]
if dif >= threshold:
tmp_f0 = raw_f0_candidates[st[j] + 1: ed[j] + 1, i]
f0_candidates[count, i] = np.mean(tmp_f0)
count = count + 1
number_of_candidates = max(number_of_candidates, count)
return f0_candidates, number_of_candidates
def harvest_overlap_f0_candidates(f0_candidates, max_number_of_f0_candidates):
n = 3 # this is the optimized parameter... apparently
number_of_candidates = n * 2 + 1
new_f0_candidates = f0_candidates[number_of_candidates, :].copy()
new_f0_candidates = new_f0_candidates[None]
# hack to bypass magic matlab-isms of allocating when indexing OOB
new_f0_candidates = np.vstack(
[new_f0_candidates] + (new_f0_candidates.shape[-1] - 1) * [np.zeros_like(new_f0_candidates)])
# this indexing is megagross, possible source for bugs!
all_nonzero = []
for i in range(number_of_candidates):
st = max(-(i - n), 0)
ed = min(-(i - n), 0)
f1_b = np.arange(max_number_of_f0_candidates).astype("int32")
f1 = f1_b + int(i * max_number_of_f0_candidates)
all_nonzero = list(set(all_nonzero + list(f1)))
f2 = None if ed == 0 else ed
f3 = -ed
f4 = None if st == 0 else -st
new_f0_candidates[f1, st:f2] = f0_candidates[f1_b, f3:f4]
new_f0_candidates = new_f0_candidates[all_nonzero, :]
return new_f0_candidates
def harvest_refine_candidates(x, fs, temporal_positions, f0_candidates,
f0_floor, f0_ceil):
new_f0_candidates = f0_candidates.copy()
f0_scores = f0_candidates * 0.
for i in range(len(temporal_positions)):
for j in range(len(f0_candidates)):
tmp_f0 = f0_candidates[j, i]
if tmp_f0 == 0:
continue
res = harvest_get_refined_f0(x, fs, temporal_positions[i],
tmp_f0, f0_floor, f0_ceil)
new_f0_candidates[j, i] = res[0]
f0_scores[j, i] = res[1]
return new_f0_candidates, f0_scores
def harvest_get_refined_f0(x, fs, current_time, current_f0, f0_floor,
f0_ceil):
half_window_length = np.ceil(3. * fs / current_f0 / 2.)
window_length_in_time = (2. * half_window_length + 1) / float(fs)
base_time = np.arange(-half_window_length, half_window_length + 1) / float(fs)
fft_size = int(2 ** np.ceil(np.log2((half_window_length * 2 + 1)) + 1))
frequency_axis = np.arange(fft_size) / fft_size * float(fs)
base_index = np.round((current_time + base_time) * fs + 0.001)
index_time = (base_index - 1) / float(fs)
window_time = index_time - current_time
part1 = np.cos(2 * np.pi * window_time / window_length_in_time)
part2 = np.cos(4 * np.pi * window_time / window_length_in_time)
main_window = 0.42 + 0.5 * part1 + 0.08 * part2
ext = np.zeros((len(main_window) + 2))
ext[1:-1] = main_window
diff_window = -((ext[1:-1] - ext[:-2]) + (ext[2:] - ext[1:-1])) / float(2)
safe_index = np.maximum(1, np.minimum(len(x), base_index)).astype("int32") - 1
spectrum = np.fft.fft(x[safe_index] * main_window, fft_size)
diff_spectrum = np.fft.fft(x[safe_index] * diff_window, fft_size)
numerator_i = np.real(spectrum) * np.imag(diff_spectrum) - np.imag(spectrum) * np.real(diff_spectrum)
power_spectrum = np.abs(spectrum) ** 2
instantaneous_frequency = frequency_axis + numerator_i / power_spectrum * float(fs) / 2. / np.pi
number_of_harmonics = int(min(np.floor(float(fs) / 2. / current_f0), 6.))
harmonics_index = np.arange(number_of_harmonics) + 1
index_list = np.round(current_f0 * fft_size / fs * harmonics_index).astype("int32")
instantaneous_frequency_list = instantaneous_frequency[index_list]
amplitude_list = np.sqrt(power_spectrum[index_list])
refined_f0 = np.sum(amplitude_list * instantaneous_frequency_list)
refined_f0 /= np.sum(amplitude_list * harmonics_index.astype("float32"))
variation = np.abs(
((instantaneous_frequency_list / harmonics_index.astype("float32")) - current_f0) / float(current_f0))
refined_score = 1. / (0.000000000001 + np.mean(variation))
if (refined_f0 < f0_floor) or (refined_f0 > f0_ceil) or (refined_score < 2.5):
refined_f0 = 0.
redined_score = 0.
return refined_f0, refined_score
def harvest_select_best_f0(reference_f0, f0_candidates, allowed_range):
best_f0 = 0
best_error = allowed_range
for i in range(len(f0_candidates)):
tmp = np.abs(reference_f0 - f0_candidates[i]) / reference_f0
if tmp > best_error:
continue
best_f0 = f0_candidates[i]
best_error = tmp
return best_f0, best_error
def harvest_remove_unreliable_candidates(f0_candidates, f0_scores):
new_f0_candidates = f0_candidates.copy()
new_f0_scores = f0_scores.copy()
threshold = 0.05
f0_length = f0_candidates.shape[1]
number_of_candidates = len(f0_candidates)
for i in range(1, f0_length - 1):
for j in range(number_of_candidates):
reference_f0 = f0_candidates[j, i]
if reference_f0 == 0:
continue
_, min_error1 = harvest_select_best_f0(reference_f0, f0_candidates[:, i + 1], 1)
_, min_error2 = harvest_select_best_f0(reference_f0, f0_candidates[:, i - 1], 1)
min_error = min([min_error1, min_error2])
if min_error > threshold:
new_f0_candidates[j, i] = 0
new_f0_scores[j, i] = 0
return new_f0_candidates, new_f0_scores
def harvest_search_f0_base(f0_candidates, f0_scores):
f0_base = f0_candidates[0, :] * 0.
for i in range(len(f0_base)):
max_index = np.argmax(f0_scores[:, i])
f0_base[i] = f0_candidates[max_index, i]
return f0_base
def harvest_fix_step_1(f0_base, allowed_range):
# Step 1: Rapid change of f0 contour is replaced by 0
f0_step1 = f0_base.copy()
f0_step1[0] = 0.
f0_step1[1] = 0.
for i in range(2, len(f0_base)):
if f0_base[i] == 0:
continue
reference_f0 = f0_base[i - 1] * 2 - f0_base[i - 2]
c1 = np.abs((f0_base[i] - reference_f0) / reference_f0) > allowed_range
c2 = np.abs((f0_base[i] - f0_base[i - 1]) / f0_base[i - 1]) > allowed_range
if c1 and c2:
f0_step1[i] = 0.
return f0_step1
def harvest_fix_step_2(f0_step1, voice_range_minimum):
f0_step2 = f0_step1.copy()
boundary_list = harvest_get_boundary_list(f0_step1)
for i in range(1, int(len(boundary_list) / 2.) + 1):
distance = boundary_list[(2 * i) - 1] - boundary_list[(2 * i) - 2]
if distance < voice_range_minimum:
# need one more due to range not including last index
lb = boundary_list[(2 * i) - 2]
ub = boundary_list[(2 * i) - 1] + 1
f0_step2[lb:ub] = 0.
return f0_step2
def harvest_fix_step_3(f0_step2, f0_candidates, allowed_range, f0_scores):
f0_step3 = f0_step2.copy()
boundary_list = harvest_get_boundary_list(f0_step2)
multichannel_f0 = harvest_get_multichannel_f0(f0_step2, boundary_list)
rrange = np.zeros((int(len(boundary_list) / 2), 2))
threshold1 = 100
threshold2 = 2200
count = 0
for i in range(1, int(len(boundary_list) / 2) + 1):
# changed to 2 * i - 2
extended_f0, tmp_range_1 = harvest_extend_f0(multichannel_f0[i - 1, :],
boundary_list[(2 * i) - 1],
min([len(f0_step2) - 1, boundary_list[(2 * i) - 1] + threshold1]),
1, f0_candidates, allowed_range)
tmp_f0_sequence, tmp_range_0 = harvest_extend_f0(extended_f0,
boundary_list[(2 * i) - 2],
max([2, boundary_list[(2 * i) - 2] - threshold1]), -1,
f0_candidates, allowed_range)
mean_f0 = np.mean(tmp_f0_sequence[tmp_range_0: tmp_range_1 + 1])
if threshold2 / mean_f0 < (tmp_range_1 - tmp_range_0):
multichannel_f0[count, :] = tmp_f0_sequence
rrange[count, :] = np.array([tmp_range_0, tmp_range_1])
count = count + 1
if count > 0:
multichannel_f0 = multichannel_f0[:count, :]
rrange = rrange[:count, :]
f0_step3 = harvest_merge_f0(multichannel_f0, rrange, f0_candidates,
f0_scores)
return f0_step3
def harvest_merge_f0(multichannel_f0, rrange, f0_candidates, f0_scores):
number_of_channels = len(multichannel_f0)
sorted_order = np.argsort(rrange[:, 0])
f0 = multichannel_f0[sorted_order[0], :]
for i in range(1, number_of_channels):
if rrange[sorted_order[i], 0] - rrange[sorted_order[0], 1] > 0:
# no overlapping
f0[int(rrange[sorted_order[i], 0]):int(rrange[sorted_order[i], 1])] = multichannel_f0[sorted_order[i],
int(rrange[sorted_order[i], 0]):int(
rrange[sorted_order[i], 1])]
cp = rrange.copy()
rrange[sorted_order[0], 0] = cp[sorted_order[i], 0]
rrange[sorted_order[0], 1] = cp[sorted_order[i], 1]
else:
cp = rrange.copy()
res = harvest_merge_f0_sub(f0, cp[sorted_order[0], 0],
cp[sorted_order[0], 1],
multichannel_f0[sorted_order[i], :],
cp[sorted_order[i], 0],
cp[sorted_order[i], 1], f0_candidates, f0_scores)
f0 = res[0]
rrange[sorted_order[0], 1] = res[1]
return f0
def harvest_merge_f0_sub(f0_1, st1, ed1, f0_2, st2, ed2, f0_candidates,
f0_scores):
merged_f0 = f0_1
if (st1 <= st2) and (ed1 >= ed2):
new_ed = ed1
return merged_f0, new_ed
new_ed = ed2
score1 = 0.
score2 = 0.
for i in range(int(st2), int(ed1) + 1):
score1 = score1 + harvest_serach_score(f0_1[i], f0_candidates[:, i], f0_scores[:, i])
score2 = score2 + harvest_serach_score(f0_2[i], f0_candidates[:, i], f0_scores[:, i])
if score1 > score2:
merged_f0[int(ed1):int(ed2) + 1] = f0_2[int(ed1):int(ed2) + 1]
else:
merged_f0[int(st2):int(ed2) + 1] = f0_2[int(st2):int(ed2) + 1]
return merged_f0, new_ed
def harvest_serach_score(f0, f0_candidates, f0_scores):
score = 0
for i in range(len(f0_candidates)):
if (f0 == f0_candidates[i]) and (score < f0_scores[i]):
score = f0_scores[i]
return score
def harvest_extend_f0(f0, origin, last_point, shift, f0_candidates,
allowed_range):
threshold = 4
extended_f0 = f0.copy()
tmp_f0 = extended_f0[origin]
shifted_origin = origin
count = 0
for i in np.arange(origin, last_point + shift, shift):
# off by 1 issues
if (i + shift) >= f0_candidates.shape[1]:
continue
bf0, bs = harvest_select_best_f0(tmp_f0,
f0_candidates[:, i + shift], allowed_range)
extended_f0[i + shift] = bf0
if extended_f0[i + shift] != 0:
tmp_f0 = extended_f0[i + shift]
count = 0
shifted_origin = i + shift
else:
count = count + 1
if count == threshold:
break
return extended_f0, shifted_origin
def harvest_get_multichannel_f0(f0, boundary_list):
multichannel_f0 = np.zeros((int(len(boundary_list) / 2), len(f0)))
for i in range(1, int(len(boundary_list) / 2) + 1):
sl = boundary_list[(2 * i) - 2]
el = boundary_list[(2 * i) - 1] + 1
multichannel_f0[i - 1, sl:el] = f0[sl:el]
return multichannel_f0
def harvest_get_boundary_list(f0):
vuv = f0.copy()
vuv[vuv != 0] = 1.
vuv[0] = 0
vuv[-1] = 0
diff_vuv = vuv[1:] - vuv[:-1]
boundary_list = np.where(diff_vuv != 0)[0]
boundary_list[::2] = boundary_list[::2] + 1
return boundary_list
def harvest_fix_step_4(f0_step3, threshold):
f0_step4 = f0_step3.copy()
boundary_list = harvest_get_boundary_list(f0_step3)
for i in range(1, int(len(boundary_list) / 2.)):
distance = boundary_list[(2 * i)] - boundary_list[(2 * i) - 1] - 1
if distance >= threshold:
continue
boundary0 = f0_step3[boundary_list[(2 * i) - 1]] + 1
boundary1 = f0_step3[boundary_list[(2 * i)]] - 1
coefficient = (boundary1 - boundary0) / float((distance + 1))
count = 1
st = boundary_list[(2 * i) - 1] + 1
ed = boundary_list[(2 * i)]
for j in range(st, ed):
f0_step4[j] = boundary0 + coefficient * count
count = count + 1
return f0_step4
def harvest_fix_f0_contour(f0_candidates, f0_scores):
f0_base = harvest_search_f0_base(f0_candidates, f0_scores)
f0_step1 = harvest_fix_step_1(f0_base, 0.008) # optimized?
f0_step2 = harvest_fix_step_2(f0_step1, 6) # optimized?
f0_step3 = harvest_fix_step_3(f0_step2, f0_candidates, 0.18, f0_scores) # optimized?
f0 = harvest_fix_step_4(f0_step3, 9) # optimized
vuv = f0.copy()
vuv[vuv != 0] = 1.
return f0, vuv
def harvest_filter_f0_contour(f0, st, ed, b, a):
smoothed_f0 = f0.copy()
smoothed_f0[:st] = smoothed_f0[st]
smoothed_f0[ed + 1:] = smoothed_f0[ed]
aaa = sg.lfilter(b, a, smoothed_f0)
bbb = sg.lfilter(b, a, aaa[::-1])
smoothed_f0 = bbb[::-1].copy()
smoothed_f0[:st] = 0.
smoothed_f0[ed + 1:] = 0.
return smoothed_f0
def harvest_smooth_f0_contour(f0):
b = np.array([0.0078202080334971724, 0.015640416066994345, 0.0078202080334971724])
a = np.array([1.0, -1.7347257688092754, 0.76600660094326412])
smoothed_f0 = np.concatenate([np.zeros(300, ), f0, np.zeros(300, )])
boundary_list = harvest_get_boundary_list(smoothed_f0)
multichannel_f0 = harvest_get_multichannel_f0(smoothed_f0, boundary_list)
for i in range(1, int(len(boundary_list) / 2) + 1):
tmp_f0_contour = harvest_filter_f0_contour(multichannel_f0[i - 1, :],
boundary_list[(2 * i) - 2], boundary_list[(2 * i) - 1], b, a)
st = boundary_list[(2 * i) - 2]
ed = boundary_list[(2 * i) - 1] + 1
smoothed_f0[st:ed] = tmp_f0_contour[st:ed]
smoothed_f0 = smoothed_f0[300:-300]
return smoothed_f0
def _world_get_temporal_positions(x_len, fs):
frame_period = 5
basic_frame_period = 1
basic_temporal_positions = np.arange(0, x_len / float(fs), basic_frame_period / float(1000))
temporal_positions = np.arange(0,
x_len / float(fs),
frame_period / float(1000))
return basic_temporal_positions, temporal_positions
def harvest(x, fs):
f0_floor = 71
f0_ceil = 800
target_fs = 8000
channels_in_octave = 40.
basic_temporal_positions, temporal_positions = _world_get_temporal_positions(len(x), fs)
adjusted_f0_floor = f0_floor * 0.9
adjusted_f0_ceil = f0_ceil * 1.1
boundary_f0_list = np.arange(1, np.ceil(
np.log2(adjusted_f0_ceil / adjusted_f0_floor) * channels_in_octave) + 1) / float(channels_in_octave)
boundary_f0_list = adjusted_f0_floor * 2.0 ** boundary_f0_list
y, actual_fs = harvest_get_downsampled_signal(x, fs, target_fs)
fft_size = 2. ** np.ceil(np.log2(len(y) + np.round(fs / f0_floor * 4) + 1))
y_spectrum = np.fft.fft(y, int(fft_size))
raw_f0_candidates = harvest_get_raw_f0_candidates(
len(basic_temporal_positions),
boundary_f0_list, len(y), basic_temporal_positions, actual_fs,
y_spectrum, f0_floor, f0_ceil)
f0_candidates, number_of_candidates = harvest_detect_official_f0_candidates(raw_f0_candidates)
f0_candidates = harvest_overlap_f0_candidates(f0_candidates, number_of_candidates)
f0_candidates, f0_scores = harvest_refine_candidates(y, actual_fs,
basic_temporal_positions, f0_candidates, f0_floor, f0_ceil)
f0_candidates, f0_scores = harvest_remove_unreliable_candidates(f0_candidates, f0_scores)
connected_f0, vuv = harvest_fix_f0_contour(f0_candidates, f0_scores)
smoothed_f0 = harvest_smooth_f0_contour(connected_f0)
idx = np.minimum(len(smoothed_f0) - 1, np.round(temporal_positions * 1000)).astype("int32")
f0 = smoothed_f0[idx]
vuv = vuv[idx]
f0_candidates = f0_candidates
return temporal_positions, f0, vuv, f0_candidates
def cheaptrick_get_windowed_waveform(x, fs, current_f0, current_position):
half_window_length = np.round(1.5 * fs / float(current_f0))
base_index = np.arange(-half_window_length, half_window_length + 1)
index = np.round(current_position * fs + 0.001) + base_index + 1
safe_index = np.minimum(len(x), np.maximum(1, np.round(index))).astype("int32")
safe_index = safe_index - 1
segment = x[safe_index]
time_axis = base_index / float(fs) / 1.5
window1 = 0.5 * np.cos(np.pi * time_axis * float(current_f0)) + 0.5
window1 = window1 / np.sqrt(np.sum(window1 ** 2))
waveform = segment * window1 - window1 * np.mean(segment * window1) / np.mean(window1)
return waveform
def cheaptrick_get_power_spectrum(waveform, fs, fft_size, f0):
power_spectrum = np.abs(np.fft.fft(waveform, fft_size)) ** 2
frequency_axis = np.arange(fft_size) / float(fft_size) * float(fs)
ind = frequency_axis < (f0 + fs / fft_size)
low_frequency_axis = frequency_axis[ind]
low_frequency_replica = interp1d(f0 - low_frequency_axis,
power_spectrum[ind], kind="linear",
fill_value="extrapolate")(low_frequency_axis)
p1 = low_frequency_replica[(frequency_axis < f0)[:len(low_frequency_replica)]]
p2 = power_spectrum[(frequency_axis < f0)[:len(power_spectrum)]]
power_spectrum[frequency_axis < f0] = p1 + p2
lb1 = int(fft_size / 2) + 1
lb2 = 1
ub2 = int(fft_size / 2)
power_spectrum[lb1:] = power_spectrum[lb2:ub2][::-1]
return power_spectrum
def cheaptrick_linear_smoothing(power_spectrum, f0, fs, fft_size):
double_frequency_axis = np.arange(2 * fft_size) / float(fft_size) * fs - fs
double_spectrum = np.concatenate([power_spectrum, power_spectrum])
double_segment = np.cumsum(double_spectrum * (fs / float(fft_size)))
center_frequency = np.arange(int(fft_size / 2) + 1) / float(fft_size) * fs
low_levels = cheaptrick_interp1h(double_frequency_axis + fs / float(fft_size) / 2.,
double_segment, center_frequency - f0 / 3.)
high_levels = cheaptrick_interp1h(double_frequency_axis + fs / float(fft_size) / 2.,
double_segment, center_frequency + f0 / 3.)
smoothed_spectrum = (high_levels - low_levels) * 1.5 / f0
return smoothed_spectrum
def cheaptrick_interp1h(x, y, xi):
delta_x = float(x[1] - x[0])
xi = np.maximum(x[0], np.minimum(x[-1], xi))
xi_base = (np.floor((xi - x[0]) / delta_x)).astype("int32")
xi_fraction = (xi - x[0]) / delta_x - xi_base
delta_y = np.zeros_like(y)
delta_y[:-1] = y[1:] - y[:-1]
yi = y[xi_base] + delta_y[xi_base] * xi_fraction
return yi
def cheaptrick_smoothing_with_recovery(smoothed_spectrum, f0, fs, fft_size, q1):
quefrency_axis = np.arange(fft_size) / float(fs)
# 0 is NaN
smoothing_lifter = np.sin(np.pi * f0 * quefrency_axis) / (np.pi * f0 * quefrency_axis)
p = smoothing_lifter[1:int(fft_size / 2)][::-1].copy()
smoothing_lifter[int(fft_size / 2) + 1:] = p
smoothing_lifter[0] = 1.
compensation_lifter = (1 - 2. * q1) + 2. * q1 * np.cos(2 * np.pi * quefrency_axis * f0)
p = compensation_lifter[1:int(fft_size / 2)][::-1].copy()
compensation_lifter[int(fft_size / 2) + 1:] = p
tandem_cepstrum = np.fft.fft(np.log(smoothed_spectrum))
tmp_spectral_envelope = np.exp(np.real(np.fft.ifft(tandem_cepstrum * smoothing_lifter * compensation_lifter)))
spectral_envelope = tmp_spectral_envelope[:int(fft_size / 2) + 1]
return spectral_envelope
def cheaptrick_estimate_one_slice(x, fs, current_f0,
current_position, fft_size, q1):
waveform = cheaptrick_get_windowed_waveform(x, fs, current_f0,
current_position)
power_spectrum = cheaptrick_get_power_spectrum(waveform, fs, fft_size,
current_f0)
smoothed_spectrum = cheaptrick_linear_smoothing(power_spectrum, current_f0,
fs, fft_size)
comb_spectrum = np.concatenate([smoothed_spectrum, smoothed_spectrum[1:-1][::-1]])
spectral_envelope = cheaptrick_smoothing_with_recovery(comb_spectrum,
current_f0, fs, fft_size, q1)
return spectral_envelope
def cheaptrick(x, fs, temporal_positions, f0_sequence,
vuv, fftlen="auto", q1=-0.15):
f0_sequence = f0_sequence.copy()
f0_low_limit = 71
default_f0 = 500
if fftlen == "auto":
fftlen = int(2 ** np.ceil(np.log2(3. * float(fs) / f0_low_limit + 1)))
# raise ValueError("Only fftlen auto currently supported")
fft_size = fftlen
f0_low_limit = fs * 3.0 / (fft_size - 3.0)
f0_sequence[vuv == 0] = default_f0
spectrogram = np.zeros((int(fft_size / 2.) + 1, len(f0_sequence)))
for i in range(len(f0_sequence)):
if f0_sequence[i] < f0_low_limit:
f0_sequence[i] = default_f0
spectrogram[:, i] = cheaptrick_estimate_one_slice(x, fs, f0_sequence[i],
temporal_positions[i], fft_size, q1)
return temporal_positions, spectrogram.T, fs
def d4c_love_train(x, fs, current_f0, current_position, threshold):
vuv = 0
if current_f0 == 0:
return vuv
lowest_f0 = 40
current_f0 = max([current_f0, lowest_f0])
fft_size = int(2 ** np.ceil(np.log2(3. * fs / lowest_f0 + 1)))
boundary0 = int(np.ceil(100 / (float(fs) / fft_size)))
boundary1 = int(np.ceil(4000 / (float(fs) / fft_size)))
boundary2 = int(np.ceil(7900 / (float(fs) / fft_size)))
waveform = d4c_get_windowed_waveform(x, fs, current_f0, current_position,
1.5, 2)
power_spectrum = np.abs(np.fft.fft(waveform, int(fft_size)) ** 2)
power_spectrum[0:boundary0 + 1] = 0.
cumulative_spectrum = np.cumsum(power_spectrum)
if (cumulative_spectrum[boundary1] / cumulative_spectrum[boundary2]) > threshold:
vuv = 1
return vuv
def d4c_get_windowed_waveform(x, fs, current_f0, current_position, half_length,
window_type):
half_window_length = int(np.round(half_length * fs / current_f0))
base_index = np.arange(-half_window_length, half_window_length + 1)
index = np.round(current_position * fs + 0.001) + base_index + 1
safe_index = np.minimum(len(x), np.maximum(1, np.round(index))).astype("int32") - 1
segment = x[safe_index]
time_axis = base_index / float(fs) / float(half_length)
if window_type == 1:
window1 = 0.5 * np.cos(np.pi * time_axis * current_f0) + 0.5
elif window_type == 2:
window1 = 0.08 * np.cos(np.pi * time_axis * current_f0 * 2)
window1 += 0.5 * np.cos(np.pi * time_axis * current_f0) + 0.42
else:
raise ValueError("Unknown window type")
waveform = segment * window1 - window1 * np.mean(segment * window1) / np.mean(window1)
return waveform
def d4c_get_static_centroid(x, fs, current_f0, current_position, fft_size):
waveform1 = d4c_get_windowed_waveform(x, fs, current_f0,
current_position + 1. / current_f0 / 4., 2, 2)
waveform2 = d4c_get_windowed_waveform(x, fs, current_f0,
current_position - 1. / current_f0 / 4., 2, 2)
centroid1 = d4c_get_centroid(waveform1, fft_size)
centroid2 = d4c_get_centroid(waveform2, fft_size)
centroid = d4c_dc_correction(centroid1 + centroid2, fs, fft_size,
current_f0)
return centroid
def d4c_get_centroid(x, fft_size):
fft_size = int(fft_size)
time_axis = np.arange(1, len(x) + 1)
x = x.copy()
x = x / np.sqrt(np.sum(x ** 2))
spectrum = np.fft.fft(x, fft_size)
weighted_spectrum = np.fft.fft(-x * 1j * time_axis, fft_size)
centroid = -(weighted_spectrum.imag) * spectrum.real + spectrum.imag * weighted_spectrum.real
return centroid
def d4c_dc_correction(signal, fs, fft_size, f0):
fft_size = int(fft_size)
frequency_axis = np.arange(fft_size) / fft_size * fs
low_frequency_axis = frequency_axis[frequency_axis < f0 + fs / fft_size]
low_frequency_replica = interp1d(f0 - low_frequency_axis,
signal[frequency_axis < f0 + fs / fft_size],
kind="linear",
fill_value="extrapolate")(low_frequency_axis)
idx = frequency_axis < f0
signal[idx] = low_frequency_replica[idx[:len(low_frequency_replica)]] + signal[idx]
signal[int(fft_size / 2.) + 1:] = signal[1: int(fft_size / 2.)][::-1]
return signal
def d4c_linear_smoothing(group_delay, fs, fft_size, width):
double_frequency_axis = np.arange(2 * fft_size) / float(fft_size) * fs - fs
double_spectrum = np.concatenate([group_delay, group_delay])
double_segment = np.cumsum(double_spectrum * (fs / float(fft_size)))
center_frequency = np.arange(int(fft_size / 2) + 1) / float(fft_size) * fs
low_levels = cheaptrick_interp1h(double_frequency_axis + fs / float(fft_size) / 2.,
double_segment, center_frequency - width / 2.)
high_levels = cheaptrick_interp1h(double_frequency_axis + fs / float(fft_size) / 2.,
double_segment, center_frequency + width / 2.)
smoothed_spectrum = (high_levels - low_levels) / width
return smoothed_spectrum
def d4c_get_smoothed_power_spectrum(waveform, fs, f0, fft_size):
power_spectrum = np.abs(np.fft.fft(waveform, int(fft_size))) ** 2
spectral_envelope = d4c_dc_correction(power_spectrum, fs, fft_size, f0)
spectral_envelope = d4c_linear_smoothing(spectral_envelope, fs, fft_size, f0)
spectral_envelope = np.concatenate([spectral_envelope,
spectral_envelope[1:-1][::-1]])
return spectral_envelope
def d4c_get_static_group_delay(static_centroid, smoothed_power_spectrum, fs, f0,
fft_size):
group_delay = static_centroid / smoothed_power_spectrum
group_delay = d4c_linear_smoothing(group_delay, fs, fft_size, f0 / 2.)
group_delay = np.concatenate([group_delay, group_delay[1:-1][::-1]])
smoothed_group_delay = d4c_linear_smoothing(group_delay, fs, fft_size, f0)
group_delay = group_delay[:int(fft_size / 2) + 1] - smoothed_group_delay
group_delay = np.concatenate([group_delay, group_delay[1:-1][::-1]])
return group_delay
def d4c_get_coarse_aperiodicity(group_delay, fs, fft_size,
frequency_interval, number_of_aperiodicities, window1):
boundary = np.round(fft_size / len(window1) * 8)
half_window_length = np.floor(len(window1) / 2)
coarse_aperiodicity = np.zeros((number_of_aperiodicities, 1))
for i in range(1, number_of_aperiodicities + 1):
center = np.floor(frequency_interval * i / (fs / float(fft_size)))
segment = group_delay[int(center - half_window_length):int(center + half_window_length + 1)] * window1
power_spectrum = np.abs(np.fft.fft(segment, int(fft_size))) ** 2
cumulative_power_spectrum = np.cumsum(np.sort(power_spectrum[:int(fft_size / 2) + 1]))
coarse_aperiodicity[i - 1] = -10 * np.log10(
cumulative_power_spectrum[int(fft_size / 2 - boundary) - 1] / cumulative_power_spectrum[-1])
return coarse_aperiodicity
def d4c_estimate_one_slice(x, fs, current_f0, frequency_interval,
current_position, fft_size, number_of_aperiodicities, window1):
if current_f0 == 0:
coarse_aperiodicity = np.zeros((number_of_aperiodicities, 1))
return coarse_aperiodicity
static_centroid = d4c_get_static_centroid(x, fs, current_f0,
current_position, fft_size)
waveform = d4c_get_windowed_waveform(x, fs, current_f0, current_position,
2, 1)
smoothed_power_spectrum = d4c_get_smoothed_power_spectrum(waveform, fs,
current_f0, fft_size)
static_group_delay = d4c_get_static_group_delay(static_centroid,
smoothed_power_spectrum, fs, current_f0, fft_size)
coarse_aperiodicity = d4c_get_coarse_aperiodicity(static_group_delay,
fs, fft_size, frequency_interval, number_of_aperiodicities,
window1)
return coarse_aperiodicity
def d4c(x, fs, temporal_positions_h, f0_h, vuv_h, threshold="default",
fft_size="auto"):
f0_low_limit = 47
if fft_size == "auto":
fft_size = 2 ** np.ceil(np.log2(4. * fs / f0_low_limit + 1.))
else:
raise ValueError("Only fft_size auto currently supported")
f0_low_limit_for_spectrum = 71
fft_size_for_spectrum = 2 ** np.ceil(np.log2(3 * fs / f0_low_limit_for_spectrum + 1.))
threshold = 0.85
upper_limit = 15000
frequency_interval = 3000
f0 = f0_h.copy()
temporal_positions = temporal_positions_h.copy()
f0[vuv_h == 0] = 0.
number_of_aperiodicities = int(
np.floor(np.min([upper_limit, fs / 2. - frequency_interval]) / float(frequency_interval)))
window_length = np.floor(frequency_interval / (fs / float(fft_size))) * 2 + 1
window1 = harvest_nuttall(window_length)
aperiodicity = np.zeros((int(fft_size_for_spectrum / 2) + 1, len(f0)))
coarse_ap = np.zeros((1, len(f0)))
frequency_axis = np.arange(int(fft_size_for_spectrum / 2) + 1) * float(fs) / fft_size_for_spectrum
coarse_axis = np.arange(number_of_aperiodicities + 2) * frequency_interval
coarse_axis[-1] = fs / 2.
for i in range(len(f0)):
r = d4c_love_train(x, fs, f0[i], temporal_positions_h[i], threshold)
if r == 0:
aperiodicity[:, i] = 1 - 0.000000000001
continue
current_f0 = max([f0_low_limit, f0[i]])
coarse_aperiodicity = d4c_estimate_one_slice(x, fs, current_f0,
frequency_interval, temporal_positions[i], fft_size,
number_of_aperiodicities, window1)
coarse_ap[0, i] = coarse_aperiodicity.ravel()[0]
coarse_aperiodicity = np.maximum(0, coarse_aperiodicity - (current_f0 - 100) * 2. / 100.)
piece = np.concatenate([[-60], -coarse_aperiodicity.ravel(), [-0.000000000001]])
part = interp1d(coarse_axis, piece, kind="linear")(frequency_axis) / 20.
aperiodicity[:, i] = 10 ** part
return temporal_positions_h, f0_h, vuv_h, aperiodicity.T, coarse_ap.squeeze()
def world_synthesis_time_base_generation(temporal_positions, f0, fs, vuv,
time_axis, default_f0):
f0_interpolated_raw = interp1d(temporal_positions, f0, kind="linear",
fill_value="extrapolate")(time_axis)
vuv_interpolated = interp1d(temporal_positions, vuv, kind="linear",
fill_value="extrapolate")(time_axis)
vuv_interpolated = vuv_interpolated > 0.5
f0_interpolated = f0_interpolated_raw * vuv_interpolated.astype("float32")
f0_interpolated[f0_interpolated == 0] = f0_interpolated[f0_interpolated == 0] + default_f0
total_phase = np.cumsum(2 * np.pi * f0_interpolated / float(fs))
core = np.mod(total_phase, 2 * np.pi)
core = np.abs(core[1:] - core[:-1])
# account for diff, avoid deprecation warning with [:-1]
pulse_locations = time_axis[:-1][core > (np.pi / 2.)]
pulse_locations_index = np.round(pulse_locations * fs).astype("int32")
return pulse_locations, pulse_locations_index, vuv_interpolated
def world_synthesis_get_spectral_parameters(temporal_positions,
temporal_position_index, spectrogram, amplitude_periodic,
amplitude_random, pulse_locations):
floor_index = int(np.floor(temporal_position_index) - 1)
assert floor_index >= 0
ceil_index = int(np.ceil(temporal_position_index) - 1)
t1 = temporal_positions[floor_index]
t2 = temporal_positions[ceil_index]
if t1 == t2:
spectrum_slice = spectrogram[:, floor_index]
periodic_slice = amplitude_periodic[:, floor_index]
aperiodic_slice = amplitude_random[:, floor_index]
else:
cs = np.concatenate([spectrogram[:, floor_index][None],
spectrogram[:, ceil_index][None]], axis=0)
mmm = max([t1, min([t2, pulse_locations])])
spectrum_slice = interp1d(np.array([t1, t2]), cs,
kind="linear", axis=0)(mmm.copy())
cp = np.concatenate([amplitude_periodic[:, floor_index][None],
amplitude_periodic[:, ceil_index][None]], axis=0)
periodic_slice = interp1d(np.array([t1, t2]), cp,
kind="linear", axis=0)(mmm.copy())
ca = np.concatenate([amplitude_random[:, floor_index][None],
amplitude_random[:, ceil_index][None]], axis=0)
aperiodic_slice = interp1d(np.array([t1, t2]), ca,
kind="linear", axis=0)(mmm.copy())
return spectrum_slice, periodic_slice, aperiodic_slice
"""
Filter data with an FIR filter using the overlap-add method.
from http://projects.scipy.org/scipy/attachment/ticket/837/fftfilt.py
"""
def nextpow2(x):
"""Return the first integer N such that 2**N >= abs(x)"""
return np.ceil(np.log2(np.abs(x)))
def fftfilt(b, x, *n):
"""Filter the signal x with the FIR filter described by the
coefficients in b using the overlap-add method. If the FFT
length n is not specified, it and the overlap-add block length
are selected so as to minimize the computational cost of
the filtering operation."""
N_x = len(x)
N_b = len(b)
# Determine the FFT length to use:
if len(n):
# Use the specified FFT length (rounded up to the nearest
# power of 2), provided that it is no less than the filter
# length:
n = n[0]
if n != int(n) or n <= 0:
raise ValueError('n must be a nonnegative integer')
if n < N_b:
n = N_b
N_fft = 2 ** nextpow2(n)
else:
if N_x > N_b:
# When the filter length is smaller than the signal,
# choose the FFT length and block size that minimize the
# FLOPS cost. Since the cost for a length-N FFT is
# (N/2)*log2(N) and the filtering operation of each block
# involves 2 FFT operations and N multiplications, the
# cost of the overlap-add method for 1 length-N block is
# N*(1+log2(N)). For the sake of efficiency, only FFT
# lengths that are powers of 2 are considered:
N = 2 ** np.arange(np.ceil(np.log2(N_b)),
np.floor(np.log2(N_x)))
cost = np.ceil(N_x / (N - N_b + 1)) * N * (np.log2(N) + 1)
N_fft = N[np.argmin(cost)]
else:
# When the filter length is at least as long as the signal,
# filter the signal using a single block:
N_fft = 2 ** nextpow2(N_b + N_x - 1)
N_fft = int(N_fft)
# Compute the block length:
L = int(N_fft - N_b + 1)
# Compute the transform of the filter:
H = np.fft.fft(b, N_fft)
y = np.zeros(N_x, dtype=np.float32)
i = 0
while i <= N_x:
il = min([i + L, N_x])
k = min([i + N_fft, N_x])
yt = np.fft.ifft(np.fft.fft(x[i:il], N_fft) * H, N_fft) # Overlap..
y[i:k] = y[i:k] + yt[:k - i] # and add
i += L
return y
def world_synthesis(f0_d4c, vuv_d4c, aperiodicity_d4c,
spectrogram_ct, fs_ct, random_seed=1999):
# swap 0 and 1 axis
spectrogram_ct = spectrogram_ct.T
fs = fs_ct
# coarse -> fine aper
if len(aperiodicity_d4c.shape) == 1 or aperiodicity_d4c.shape[1] == 1:
print("Coarse aperiodicity detected - interpolating to full size")
aper = np.zeros_like(spectrogram_ct)
if len(aperiodicity_d4c.shape) == 1:
aperiodicity_d4c = aperiodicity_d4c[None, :]
else:
aperiodicity_d4c = aperiodicity_d4c.T
coarse_aper_d4c = aperiodicity_d4c
frequency_interval = 3000
upper_limit = 15000
number_of_aperiodicities = int(
np.floor(np.min([upper_limit, fs / 2. - frequency_interval]) / float(frequency_interval)))
coarse_axis = np.arange(number_of_aperiodicities + 2) * frequency_interval
coarse_axis[-1] = fs / 2.
f0_low_limit_for_spectrum = 71
fft_size_for_spectrum = 2 ** np.ceil(np.log2(3 * fs / f0_low_limit_for_spectrum + 1.))
frequency_axis = np.arange(int(fft_size_for_spectrum / 2) + 1) * float(fs) / fft_size_for_spectrum
for i in range(len(f0_d4c)):
ca = coarse_aper_d4c[0, i]
cf = f0_d4c[i]
coarse_aperiodicity = np.maximum(0, ca - (cf - 100) * 2. / 100.)
piece = np.concatenate([[-60], -ca.ravel(), [-0.000000000001]])
part = interp1d(coarse_axis, piece, kind="linear")(frequency_axis) / 20.
aper[:, i] = 10 ** part
aperiodicity_d4c = aper
else:
aperiodicity_d4c = aperiodicity_d4c.T
default_f0 = 500.
random_state = np.random.RandomState(1999)
spectrogram = spectrogram_ct
aperiodicity = aperiodicity_d4c
# max 30s, if greater than thrown an error
max_len = 5000000
_, temporal_positions = _world_get_temporal_positions(max_len, fs)
temporal_positions = temporal_positions[:spectrogram.shape[1]]
# temporal_positions = temporal_positions_d4c
# from IPython import embed; embed()
# raise ValueError()
vuv = vuv_d4c
f0 = f0_d4c
time_axis = np.arange(temporal_positions[0], temporal_positions[-1],
1. / fs)
y = 0. * time_axis
r = world_synthesis_time_base_generation(temporal_positions, f0, fs, vuv,
time_axis, default_f0)
pulse_locations, pulse_locations_index, interpolated_vuv = r
fft_size = int((len(spectrogram) - 1) * 2)
base_index = np.arange(-fft_size / 2, fft_size / 2) + 1
y_length = len(y)
tmp_complex_cepstrum = np.zeros((fft_size,), dtype=np.complex128)
latter_index = np.arange(int(fft_size / 2) + 1, fft_size + 1) - 1
temporal_position_index = interp1d(temporal_positions, np.arange(1, len(temporal_positions) + 1), kind="linear",
fill_value="extrapolate")(pulse_locations)
temporal_postion_index = np.maximum(1, np.minimum(len(temporal_positions),
temporal_position_index)) - 1
amplitude_aperiodic = aperiodicity ** 2
amplitude_periodic = np.maximum(0.001, (1. - amplitude_aperiodic))
for i in range(len(pulse_locations_index)):
spectrum_slice, periodic_slice, aperiodic_slice = world_synthesis_get_spectral_parameters(
temporal_positions, temporal_position_index[i], spectrogram,
amplitude_periodic, amplitude_aperiodic, pulse_locations[i])
idx = min(len(pulse_locations_index), i + 2) - 1
noise_size = pulse_locations_index[idx] - pulse_locations_index[i]
output_buffer_index = np.maximum(1, np.minimum(y_length, pulse_locations_index[i] + 1 + base_index)).astype(
"int32") - 1
if interpolated_vuv[pulse_locations_index[i]] >= 0.5:
tmp_periodic_spectrum = spectrum_slice * periodic_slice
# eps in matlab/octave
tmp_periodic_spectrum[tmp_periodic_spectrum == 0] = 2.2204E-16
periodic_spectrum = np.concatenate([tmp_periodic_spectrum,
tmp_periodic_spectrum[1:-1][::-1]])
tmp_cepstrum = np.real(np.fft.fft(np.log(np.abs(periodic_spectrum)) / 2.))
tmp_complex_cepstrum[latter_index] = tmp_cepstrum[latter_index] * 2
tmp_complex_cepstrum[0] = tmp_cepstrum[0]
response = np.fft.fftshift(np.real(np.fft.ifft(np.exp(np.fft.ifft(
tmp_complex_cepstrum)))))
y[output_buffer_index] += response * np.sqrt(
max([1, noise_size]))
tmp_aperiodic_spectrum = spectrum_slice * aperiodic_slice
else:
tmp_aperiodic_spectrum = spectrum_slice
tmp_aperiodic_spectrum[tmp_aperiodic_spectrum == 0] = 2.2204E-16
aperiodic_spectrum = np.concatenate([tmp_aperiodic_spectrum,
tmp_aperiodic_spectrum[1:-1][::-1]])
tmp_cepstrum = np.real(np.fft.fft(np.log(np.abs(aperiodic_spectrum)) / 2.))
tmp_complex_cepstrum[latter_index] = tmp_cepstrum[latter_index] * 2
tmp_complex_cepstrum[0] = tmp_cepstrum[0]
rc = np.fft.ifft(tmp_complex_cepstrum)
erc = np.exp(rc)
response = np.fft.fftshift(np.real(np.fft.ifft(erc)))
noise_input = random_state.randn(max([3, noise_size]), )
y[output_buffer_index] = y[output_buffer_index] + fftfilt(noise_input - np.mean(noise_input), response)
return y
def _mgc_b2c(wc, c, alpha):
wc_o = np.zeros_like(wc)
desired_order = len(wc) - 1
for i in range(0, len(c))[::-1]:
prev = copy.copy(wc_o)
wc_o[0] = c[i]
if desired_order >= 1:
wc_o[1] = (1. - alpha ** 2) * prev[0] + alpha * prev[1]
for m in range(2, desired_order + 1):
wc_o[m] = prev[m - 1] + alpha * (prev[m] - wc_o[m - 1])
return wc_o
def _mgc_ptrans(p, m, alpha):
d = 0.
o = 0.
d = p[m]
for i in range(1, m)[::-1]:
o = p[i] + alpha * d
d = p[i]
p[i] = o
o = alpha * d
p[0] = (1. - alpha ** 2) * p[0] + 2 * o
def _mgc_qtrans(q, m, alpha):
d = q[1]
for i in range(2, 2 * m + 1):
o = q[i] + alpha * d
d = q[i]
q[i] = o
def _mgc_gain(er, c, m, g):
t = 0.
if g != 0:
for i in range(1, m + 1):
t += er[i] * c[i]
return er[0] + g * t
else:
return er[0]
def _mgc_fill_toeplitz(A, t):
n = len(t)
for i in range(n):
for j in range(n):
A[i, j] = t[i - j] if i - j >= 0 else t[j - i]
def _mgc_fill_hankel(A, t):
n = len(t) // 2 + 1
for i in range(n):
for j in range(n):
A[i, j] = t[i + j]
def _mgc_ignorm(c, gamma):
if gamma == 0.:
c[0] = np.log(c[0])
return c
gain = c[0] ** gamma
c[1:] *= gain
c[0] = (gain - 1.) / gamma
def _mgc_gnorm(c, gamma):
if gamma == 0.:
c[0] = np.exp(c[0])
return c
gain = 1. + gamma * c[0]
c[1:] /= gain
c[0] = gain ** (1. / gamma)
def _mgc_b2mc(mc, alpha):
m = len(mc)
o = 0.
d = mc[m - 1]
for i in range(m - 1)[::-1]:
o = mc[i] + alpha * d
d = mc[i]
mc[i] = o
def _mgc_mc2b(mc, alpha):
itr = list(range(len(mc) - 1))[::-1]
for i in itr:
mc[i] = mc[i] - alpha * mc[i + 1]
def _mgc_gc2gc(src_ceps, src_gamma=0., dst_order=None, dst_gamma=0.):
if dst_order == None:
dst_order = len(src_ceps) - 1
dst_ceps = np.zeros((dst_order + 1,), dtype=src_ceps.dtype)
dst_order = len(dst_ceps) - 1
m1 = len(src_ceps) - 1
dst_ceps[0] = copy.deepcopy(src_ceps[0])
for m in range(2, dst_order + 2):
ss1 = 0.
ss2 = 0.
min_1 = m1 if (m1 < m - 1) else m - 2
itr = list(range(2, min_1 + 2))
if len(itr) < 1:
if min_1 + 1 == 2:
itr = [2]
else:
itr = []
"""
# old slower version
for k in itr:
assert k >= 1
assert (m - k) >= 0
cc = src_ceps[k - 1] * dst_ceps[m - k]
ss2 += (k - 1) * cc
ss1 += (m - k) * cc
"""
if len(itr) > 0:
itr = np.array(itr)
cc_a = src_ceps[itr - 1] * dst_ceps[m - itr]
ss2 += ((itr - 1) * cc_a).sum()
ss1 += ((m - itr) * cc_a).sum()
if m <= m1 + 1:
dst_ceps[m - 1] = src_ceps[m - 1] + (dst_gamma * ss2 - src_gamma * ss1) / (m - 1.)
else:
dst_ceps[m - 1] = (dst_gamma * ss2 - src_gamma * ss1) / (m - 1.)
return dst_ceps
def _mgc_newton(mgc_stored, periodogram, order, alpha, gamma,
recursion_order, iter_number, y_fft, z_fft, cr, pr, rr, ri,
qr, qi, Tm, Hm, Tm_plus_Hm, b):
# a lot of inplace operations to match the Julia code
cr[1:order + 1] = mgc_stored[1:order + 1]
if alpha != 0:
cr_res = _mgc_b2c(cr[:recursion_order + 1], cr[:order + 1], -alpha)
cr[:recursion_order + 1] = cr_res[:]
y = sp.fftpack.fft(np.cast["float64"](cr))
c = mgc_stored
x = periodogram
if gamma != 0.:
gamma_inv = 1. / gamma
else:
gamma_inv = np.inf
if gamma == -1.:
pr[:] = copy.deepcopy(x)
new_pr = copy.deepcopy(pr)
elif gamma == 0.:
pr[:] = copy.deepcopy(x) / np.exp(2 * np.real(y))
new_pr = copy.deepcopy(pr)
else:
tr = 1. + gamma * np.real(y)
ti = -gamma * np.imag(y)
trr = tr * tr
tii = ti * ti
s = trr + tii
t = x * np.power(s, (-gamma_inv))
t /= s
pr[:] = t
rr[:] = tr * t
ri[:] = ti * t
t /= s
qr[:] = (trr - tii) * t
s = tr * ti * t
qi[:] = (s + s)
new_pr = copy.deepcopy(pr)
if gamma != -1.:
"""
print()
print(pr.sum())
print(rr.sum())
print(ri.sum())
print(qr.sum())
print(qi.sum())
print()
"""
pass
y_fft[:] = copy.deepcopy(pr) + 0.j
z_fft[:] = np.fft.fft(y_fft) / len(y_fft)
pr[:] = copy.deepcopy(np.real(z_fft))
if alpha != 0.:
idx_1 = pr[:2 * order + 1]
idx_2 = pr[:recursion_order + 1]
idx_3 = _mgc_b2c(idx_1, idx_2, alpha)
pr[:2 * order + 1] = idx_3[:]
if gamma == 0. or gamma == -1.:
qr[:2 * order + 1] = pr[:2 * order + 1]
rr[:order + 1] = copy.deepcopy(pr[:order + 1])
else:
for i in range(len(qr)):
y_fft[i] = qr[i] + 1j * qi[i]
z_fft[:] = np.fft.fft(y_fft) / len(y_fft)
qr[:] = np.real(z_fft)
for i in range(len(rr)):
y_fft[i] = rr[i] + 1j * ri[i]
z_fft[:] = np.fft.fft(y_fft) / len(y_fft)
rr[:] = np.real(z_fft)
if alpha != 0.:
qr_new = _mgc_b2c(qr[:recursion_order + 1], qr[:recursion_order + 1], alpha)
qr[:recursion_order + 1] = qr_new[:]
rr_new = _mgc_b2c(rr[:order + 1], rr[:recursion_order + 1], alpha)
rr[:order + 1] = rr_new[:]
if alpha != 0:
_mgc_ptrans(pr, order, alpha)
_mgc_qtrans(qr, order, alpha)
eta = 0.
if gamma != -1.:
eta = _mgc_gain(rr, c, order, gamma)
c[0] = np.sqrt(eta)
if gamma == -1.:
qr[:] = 0.
elif gamma != 0.:
for i in range(2, 2 * order + 1):
qr[i] *= 1. + gamma
te = pr[:order]
_mgc_fill_toeplitz(Tm, te)
he = qr[2: 2 * order + 1]
_mgc_fill_hankel(Hm, he)
Tm_plus_Hm[:] = Hm[:] + Tm[:]
b[:order] = rr[1:order + 1]
res = np.linalg.solve(Tm_plus_Hm, b)
b[:] = res[:]
c[1:order + 1] += res[:order]
if gamma == -1.:
eta = _mgc_gain(rr, c, order, gamma)
c[0] = np.sqrt(eta)
return np.log(eta), new_pr
def _mgc_mgcepnorm(b_gamma, alpha, gamma, otype):
if otype != 0:
raise ValueError("Not yet implemented for otype != 0")
mgc = copy.deepcopy(b_gamma)
_mgc_ignorm(mgc, gamma)
_mgc_b2mc(mgc, alpha)
return mgc
def _sp2mgc(sp, order=20, alpha=0.35, gamma=-0.41, miniter=2, maxiter=30, criteria=0.001, otype=0, verbose=False):
# Based on r9y9 Julia code
# https://github.com/r9y9/MelGeneralizedCepstrums.jl
periodogram = np.abs(sp) ** 2
recursion_order = len(periodogram) - 1
slen = len(periodogram)
iter_number = 1
def _z():
return np.zeros((slen,), dtype="float64")
def _o():
return np.zeros((order,), dtype="float64")
def _o2():
return np.zeros((order, order), dtype="float64")
cr = _z()
pr = _z()
rr = _z()
ri = _z().astype("float128")
qr = _z()
qi = _z().astype("float128")
Tm = _o2()
Hm = _o2()
Tm_plus_Hm = _o2()
b = _o()
y = _z() + 0j
z = _z() + 0j
b_gamma = np.zeros((order + 1,), dtype="float64")
# return pr_new due to oddness with Julia having different numbers
# in pr at end of function vs back in this scope
eta0, pr_new = _mgc_newton(b_gamma, periodogram, order, alpha, -1.,
recursion_order, iter_number, y, z, cr, pr, rr,
ri, qr, qi, Tm, Hm, Tm_plus_Hm, b)
pr[:] = pr_new
"""
print(eta0)
print(sum(b_gamma))
print(sum(periodogram))
print(order)
print(alpha)
print(recursion_order)
print(sum(y))
print(sum(cr))
print(sum(z))
print(sum(pr))
print(sum(rr))
print(sum(qi))
print(Tm.sum())
print(Hm.sum())
print(sum(b))
raise ValueError()
"""
if gamma != -1.:
d = np.zeros((order + 1,), dtype="float64")
if alpha != 0.:
_mgc_ignorm(b_gamma, -1.)
_mgc_b2mc(b_gamma, alpha)
d = copy.deepcopy(b_gamma)
_mgc_gnorm(d, -1.)
# numbers are slightly different here - numerical diffs?
else:
d = copy.deepcopy(b_gamma)
b_gamma = _mgc_gc2gc(d, -1., order, gamma)
if alpha != 0.:
_mgc_ignorm(b_gamma, gamma)
_mgc_mc2b(b_gamma, alpha)
_mgc_gnorm(b_gamma, gamma)
if gamma != -1.:
eta_t = eta0
for i in range(1, maxiter + 1):
eta, pr_new = _mgc_newton(b_gamma, periodogram, order, alpha,
gamma, recursion_order, i, y, z, cr, pr, rr,
ri, qr, qi, Tm, Hm, Tm_plus_Hm, b)
pr[:] = pr_new
"""
print(eta0)
print(sum(b_gamma))
print(sum(periodogram))
print(order)
print(alpha)
print(recursion_order)
print(sum(y))
print(sum(cr))
print(sum(z))
print(sum(pr))
print(sum(rr))
print(sum(qi))
print(Tm.sum())
print(Hm.sum())
print(sum(b))
raise ValueError()
"""
err = np.abs((eta_t - eta) / eta)
if verbose:
print(("iter %i, criterion: %f" % (i, err)))
if i >= miniter:
if err < criteria:
if verbose:
print(("optimization complete at iter %i" % i))
break
eta_t = eta
mgc_arr = _mgc_mgcepnorm(b_gamma, alpha, gamma, otype)
return mgc_arr
_sp_convert_results = []
def _sp_collect_result(result):
_sp_convert_results.append(result)
def _sp_convert(c_i, order, alpha, gamma, miniter, maxiter, criteria,
otype, verbose):
i = c_i[0]
tot_i = c_i[1]
sp_i = c_i[2]
r_i = (i, _sp2mgc(sp_i, order=order, alpha=alpha, gamma=gamma,
miniter=miniter, maxiter=maxiter, criteria=criteria,
otype=otype, verbose=verbose))
return r_i
def sp2mgc(sp, order=20, alpha=0.35, gamma=-0.41, miniter=2,
maxiter=30, criteria=0.001, otype=0, verbose=False):
"""
Accepts 1D or 2D one-sided spectrum (complex or real valued).
If 2D, assumes time is axis 0.
Returns mel generalized cepstral coefficients.
Based on r9y9 Julia code
https://github.com/r9y9/MelGeneralizedCepstrums.jl
"""
if len(sp.shape) == 1:
sp = np.concatenate((sp, sp[:, 1:][:, ::-1]), axis=0)
return _sp2mgc(sp, order=order, alpha=alpha, gamma=gamma,
miniter=miniter, maxiter=maxiter, criteria=criteria,
otype=otype, verbose=verbose)
else:
sp = np.concatenate((sp, sp[:, 1:][:, ::-1]), axis=1)
# Slooow, use multiprocessing to speed up a bit
# http://blog.shenwei.me/python-multiprocessing-pool-difference-between-map-apply-map_async-apply_async/
# http://stackoverflow.com/questions/5666576/show-the-progress-of-a-python-multiprocessing-pool-map-call
c = [(i + 1, sp.shape[0], sp[i]) for i in range(sp.shape[0])]
p = Pool()
start = time.time()
if verbose:
print(("Starting conversion of %i frames" % sp.shape[0]))
print("This may take some time...")
# takes ~360s for 630 frames, 1 process
itr = p.map_async(
functools.partial(_sp_convert, order=order, alpha=alpha, gamma=gamma, miniter=miniter, maxiter=maxiter,
criteria=criteria, otype=otype, verbose=False), c, callback=_sp_collect_result)
sz = len(c) // itr._chunksize
if (sz * itr._chunksize) != len(c):
sz += 1
last_remaining = None
while True:
remaining = itr._number_left
if verbose:
if remaining != last_remaining:
last_remaining = remaining
print(("%i chunks of %i complete" % (sz - remaining, sz)))
if itr.ready():
break
time.sleep(.5)
"""
# takes ~455s for 630 frames
itr = p.imap_unordered(functools.partial(_sp_convert, order=order, alpha=alpha, gamma=gamma, miniter=miniter, maxiter=maxiter, criteria=criteria, otype=otype, verbose=False), c)
res = []
# print ~every 5%
mod = int(len(c)) // 20
if mod < 1:
mod = 1
for i, res_i in enumerate(itr, 1):
res.append(res_i)
if i % mod == 0 or i == 1:
print("%i of %i complete" % (i, len(c)))
"""
p.close()
p.join()
stop = time.time()
if verbose:
print(("Processed %i frames in %s seconds" % (sp.shape[0], stop - start)))
# map_async result comes in chunks
flat = [a_i for a in _sp_convert_results for a_i in a]
final = [o[1] for o in sorted(flat, key=lambda x: x[0])]
for i in range(len(_sp_convert_results)):
_sp_convert_results.pop()
return np.array(final)
def win2mgc(windowed_signal, order=20, alpha=0.35, gamma=-0.41, miniter=2,
maxiter=30, criteria=0.001, otype=0, verbose=False):
"""
Accepts 1D or 2D array of windowed signal frames.
If 2D, assumes time is axis 0.
Returns mel generalized cepstral coefficients.
Based on r9y9 Julia code
https://github.com/r9y9/MelGeneralizedCepstrums.jl
"""
if len(windowed_signal.shape) == 1:
sp = np.fft.fft(windowed_signal)
return _sp2mgc(sp, order=order, alpha=alpha, gamma=gamma,
miniter=miniter, maxiter=maxiter, criteria=criteria,
otype=otype, verbose=verbose)
else:
raise ValueError("2D input not yet complete for win2mgc")
def _mgc_freqt(wc, c, alpha):
prev = np.zeros_like(wc)
dst_order = len(wc) - 1
wc *= 0
m1 = len(c) - 1
for i in range(-m1, 1, 1):
prev[:] = wc
if dst_order >= 0:
wc[0] = c[-i] + alpha * prev[0]
if dst_order >= 1:
wc[1] = (1. - alpha * alpha) * prev[0] + alpha * prev[1]
for m in range(2, dst_order + 1):
wc[m] = prev[m - 1] + alpha * (prev[m] - wc[m - 1])
def _mgc_mgc2mgc(src_ceps, src_alpha, src_gamma, dst_order, dst_alpha, dst_gamma):
dst_ceps = np.zeros((dst_order + 1,))
alpha = (dst_alpha - src_alpha) / (1. - dst_alpha * src_alpha)
if alpha == 0.:
new_dst_ceps = copy.deepcopy(src_ceps)
_mgc_gnorm(new_dst_ceps, src_gamma)
dst_ceps = _mgc_gc2gc(new_dst_ceps, src_gamma, dst_order, dst_gamma)
_mgc_ignorm(dst_ceps, dst_gamma)
else:
_mgc_freqt(dst_ceps, src_ceps, alpha)
_mgc_gnorm(dst_ceps, src_gamma)
new_dst_ceps = copy.deepcopy(dst_ceps)
dst_ceps = _mgc_gc2gc(new_dst_ceps, src_gamma, dst_order, dst_gamma)
_mgc_ignorm(dst_ceps, dst_gamma)
return dst_ceps
_mgc_convert_results = []
def _mgc_collect_result(result):
_mgc_convert_results.append(result)
def _mgc_convert(c_i, alpha, gamma, fftlen):
i = c_i[0]
tot_i = c_i[1]
mgc_i = c_i[2]
r_i = (i, _mgc_mgc2mgc(mgc_i, src_alpha=alpha, src_gamma=gamma,
dst_order=fftlen // 2, dst_alpha=0., dst_gamma=0.))
return r_i
def mgc2sp(mgc_arr, alpha=0.35, gamma=-0.41, fftlen="auto", fs=None,
mode="world_pad", verbose=False):
"""
Accepts 1D or 2D array of mgc
If 2D, assume time is on axis 0
Returns reconstructed smooth spectrum
Based on r9y9 Julia code
https://github.com/r9y9/MelGeneralizedCepstrums.jl
"""
if mode != "world_pad":
raise ValueError("Only currently supported mode is world_pad")
if fftlen == "auto":
if fs == None:
raise ValueError("fs must be provided for fftlen 'auto'")
f0_low_limit = 71
fftlen = int(2 ** np.ceil(np.log2(3. * float(fs) / f0_low_limit + 1)))
if verbose:
print(("setting fftlen to %i" % fftlen))
if len(mgc_arr.shape) == 1:
c = _mgc_mgc2mgc(mgc_arr, alpha, gamma, fftlen // 2, 0., 0.)
buf = np.zeros((fftlen,), dtype=c.dtype)
buf[:len(c)] = c[:]
return np.fft.rfft(buf)
else:
# Slooow, use multiprocessing to speed up a bit
# http://blog.shenwei.me/python-multiprocessing-pool-difference-between-map-apply-map_async-apply_async/
# http://stackoverflow.com/questions/5666576/show-the-progress-of-a-python-multiprocessing-pool-map-call
c = [(i + 1, mgc_arr.shape[0], mgc_arr[i]) for i in range(mgc_arr.shape[0])]
p = Pool()
start = time.time()
if verbose:
print(("Starting conversion of %i frames" % mgc_arr.shape[0]))
print("This may take some time...")
# itr = p.map(functools.partial(_mgc_convert, alpha=alpha, gamma=gamma, fftlen=fftlen), c)
# raise ValueError()
# 500.1 s for 630 frames process
itr = p.map_async(functools.partial(_mgc_convert, alpha=alpha, gamma=gamma, fftlen=fftlen), c,
callback=_mgc_collect_result)
sz = len(c) // itr._chunksize
if (sz * itr._chunksize) != len(c):
sz += 1
last_remaining = None
while True:
remaining = itr._number_left
if verbose:
if last_remaining != remaining:
last_remaining = remaining
print(("%i chunks of %i complete" % (sz - remaining, sz)))
if itr.ready():
break
time.sleep(.5)
p.close()
p.join()
stop = time.time()
if verbose:
print(("Processed %i frames in %s seconds" % (mgc_arr.shape[0], stop - start)))
# map_async result comes in chunks
flat = [a_i for a in _mgc_convert_results for a_i in a]
final = [o[1] for o in sorted(flat, key=lambda x: x[0])]
for i in range(len(_mgc_convert_results)):
_mgc_convert_results.pop()
c = np.array(final)
buf = np.zeros((len(c), fftlen), dtype=c.dtype)
buf[:, :c.shape[1]] = c[:]
return np.exp(np.fft.rfft(buf, axis=-1).real)
def implot(arr, scale=None, title="", cmap="gray"):
import matplotlib.pyplot as plt
if scale is "specgram":
# plotting part
mag = 20. * np.log10(np.abs(arr))
# Transpose so time is X axis, and invert y axis so
# frequency is low at bottom
mag = mag.T[::-1, :]
else:
mag = arr
f, ax = plt.subplots()
ax.matshow(mag, cmap=cmap)
plt.axis("off")
x1 = mag.shape[0]
y1 = mag.shape[1]
def autoaspect(x_range, y_range):
"""
The aspect to make a plot square with ax.set_aspect in Matplotlib
"""
mx = max(x_range, y_range)
mn = min(x_range, y_range)
if x_range <= y_range:
return mx / float(mn)
else:
return mn / float(mx)
asp = autoaspect(x1, y1)
ax.set_aspect(asp)
plt.title(title)
def test_lpc_to_lsf():
# Matlab style vectors for testing
# lsf = [0.7842 1.5605 1.8776 1.8984 2.3593]
# a = [1.0000 0.6149 0.9899 0.0000 0.0031 -0.0082];
lsf = [[0.7842, 1.5605, 1.8776, 1.8984, 2.3593],
[0.7842, 1.5605, 1.8776, 1.8984, 2.3593]]
a = [[1.0000, 0.6149, 0.9899, 0.0000, 0.0031, -0.0082],
[1.0000, 0.6149, 0.9899, 0.0000, 0.0031, -0.0082]]
a = np.array(a)
lsf = np.array(lsf)
lsf_r = lpc_to_lsf(a)
assert_almost_equal(lsf, lsf_r, decimal=4)
a_r = lsf_to_lpc(lsf)
assert_almost_equal(a, a_r, decimal=4)
lsf_r = lpc_to_lsf(a[0])
assert_almost_equal(lsf[0], lsf_r, decimal=4)
a_r = lsf_to_lpc(lsf[0])
assert_almost_equal(a[0], a_r, decimal=4)
def test_lpc_analysis_truncate():
# Test that truncate doesn't crash and actually truncates
[a, g, e] = lpc_analysis(np.random.randn(85), order=8, window_step=80,
window_size=80, emphasis=0.9, truncate=True)
assert (a.shape[0] == 1)
def test_feature_build():
samplerate, X = fetch_sample_music()
# MATLAB wavread does normalization
X = X.astype('float32') / (2 ** 15)
wsz = 256
wst = 128
a, g, e = lpc_analysis(X, order=8, window_step=wst,
window_size=wsz, emphasis=0.9,
copy=True)
v, p = voiced_unvoiced(X, window_size=wsz,
window_step=wst)
c = compress(e, n_components=64)
# First component of a is always 1
combined = np.hstack((a[:, 1:], g, c[:a.shape[0]]))
features = np.zeros((a.shape[0], 2 * combined.shape[1]))
start_indices = v * combined.shape[1]
start_indices = start_indices.astype('int32')
end_indices = (v + 1) * combined.shape[1]
end_indices = end_indices.astype('int32')
for i in range(features.shape[0]):
features[i, start_indices[i]:end_indices[i]] = combined[i]
def test_mdct_and_inverse():
fs, X = fetch_sample_music()
X_dct = mdct_slow(X)
X_r = imdct_slow(X_dct)
assert np.all(np.abs(X_r[:len(X)] - X) < 1E-3)
assert np.abs(X_r[:len(X)] - X).mean() < 1E-6
def test_all():
test_lpc_analysis_truncate()
test_feature_build()
test_lpc_to_lsf()
test_mdct_and_inverse()
def run_lpc_example():
# ae.wav is from
# http://www.linguistics.ucla.edu/people/hayes/103/Charts/VChart/ae.wav
# Partially following the formant tutorial here
# http://www.mathworks.com/help/signal/ug/formant-estimation-with-lpc-coefficients.html
samplerate, X = fetch_sample_music()
c = overlap_dct_compress(X, 200, 400)
X_r = overlap_dct_uncompress(c, 400)
wavfile.write('lpc_uncompress.wav', samplerate, soundsc(X_r))
print("Calculating sinusoids")
f_hz, m = sinusoid_analysis(X, input_sample_rate=16000)
Xs_sine = sinusoid_synthesis(f_hz, m)
orig_fname = 'lpc_orig.wav'
sine_fname = 'lpc_sine_synth.wav'
wavfile.write(orig_fname, samplerate, soundsc(X))
wavfile.write(sine_fname, samplerate, soundsc(Xs_sine))
lpc_order_list = [8, ]
dct_components_list = [200, ]
window_size_list = [400, ]
# Seems like a dct component size of ~2/3rds the step
# (1/3rd the window for 50% overlap) works well.
for lpc_order in lpc_order_list:
for dct_components in dct_components_list:
for window_size in window_size_list:
# 50% overlap
window_step = window_size // 2
a, g, e = lpc_analysis(X, order=lpc_order,
window_step=window_step,
window_size=window_size, emphasis=0.9,
copy=True)
print("Calculating LSF")
lsf = lpc_to_lsf(a)
# Not window_size - window_step! Need to implement overlap
print("Calculating compression")
c = dct_compress(e, n_components=dct_components,
window_size=window_step)
co = overlap_dct_compress(e, n_components=dct_components,
window_size=window_step)
block_excitation = dct_uncompress(c, window_size=window_step)
overlap_excitation = overlap_dct_uncompress(co,
window_size=window_step)
a_r = lsf_to_lpc(lsf)
f, m = lpc_to_frequency(a_r, g)
block_lpc = lpc_synthesis(a_r, g, block_excitation,
emphasis=0.9,
window_step=window_step)
overlap_lpc = lpc_synthesis(a_r, g, overlap_excitation,
emphasis=0.9,
window_step=window_step)
v, p = voiced_unvoiced(X, window_size=window_size,
window_step=window_step)
noisy_lpc = lpc_synthesis(a_r, g, voiced_frames=v,
emphasis=0.9,
window_step=window_step)
if dct_components is None:
dct_components = window_size
noisy_fname = 'lpc_noisy_synth_%iwin_%ilpc_%idct.wav' % (
window_size, lpc_order, dct_components)
block_fname = 'lpc_block_synth_%iwin_%ilpc_%idct.wav' % (
window_size, lpc_order, dct_components)
overlap_fname = 'lpc_overlap_synth_%iwin_%ilpc_%idct.wav' % (
window_size, lpc_order, dct_components)
wavfile.write(noisy_fname, samplerate, soundsc(noisy_lpc))
wavfile.write(block_fname, samplerate,
soundsc(block_lpc))
wavfile.write(overlap_fname, samplerate,
soundsc(overlap_lpc))
def run_fft_vq_example():
n_fft = 512
time_smoothing = 4
def _pre(list_of_data):
f_c = np.vstack([stft(dd, n_fft) for dd in list_of_data])
if len(f_c) % time_smoothing != 0:
newlen = len(f_c) - len(f_c) % time_smoothing
f_c = f_c[:newlen]
f_mag = complex_to_abs(f_c)
f_phs = complex_to_angle(f_c)
f_sincos = angle_to_sin_cos(f_phs)
f_r = np.hstack((f_mag, f_sincos))
f_r = f_r.reshape((len(f_r) // time_smoothing,
time_smoothing * f_r.shape[1]))
return f_r, n_fft
def preprocess_train(list_of_data, random_state):
f_r, n_fft = _pre(list_of_data)
clusters = f_r
return clusters
def apply_preprocess(list_of_data, clusters):
f_r, n_fft = _pre(list_of_data)
memberships, distances = vq(f_r, clusters)
vq_r = clusters[memberships]
vq_r = vq_r.reshape((time_smoothing * len(vq_r),
vq_r.shape[1] // time_smoothing))
f_mag = vq_r[:, :n_fft // 2 + 1]
f_sincos = vq_r[:, n_fft // 2 + 1:]
extent = f_sincos.shape[1] // 2
f_phs = sin_cos_to_angle(f_sincos[:, :extent], f_sincos[:, extent:])
vq_c = abs_and_angle_to_complex(f_mag, f_phs)
d_k = istft(vq_c, fftsize=n_fft)
return d_k
random_state = np.random.RandomState(1999)
"""
fs, d = fetch_sample_music()
sub = int(.8 * d.shape[0])
d1 = [d[:sub]]
d2 = [d[sub:]]
"""
fs, d = fetch_sample_speech_fruit()
d1 = d[::8] + d[1::8] + d[2::8] + d[3::8] + d[4::8] + d[5::8] + d[6::8]
d2 = d[7::8]
# make sure d1 and d2 aren't the same!
assert [len(di) for di in d1] != [len(di) for di in d2]
clusters = preprocess_train(d1, random_state)
# Training data
vq_d1 = apply_preprocess(d1, clusters)
vq_d2 = apply_preprocess(d2, clusters)
assert [i != j for i, j in zip(vq_d1.ravel(), vq_d2.ravel())]
fix_d1 = np.concatenate(d1)
fix_d2 = np.concatenate(d2)
wavfile.write("fft_train_no_agc.wav", fs, soundsc(fix_d1))
wavfile.write("fft_test_no_agc.wav", fs, soundsc(fix_d2))
wavfile.write("fft_vq_train_no_agc.wav", fs, soundsc(vq_d1, fs))
wavfile.write("fft_vq_test_no_agc.wav", fs, soundsc(vq_d2, fs))
agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5)
agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5)
agc_vq_d1, freq_vq_d1, energy_vq_d1 = time_attack_agc(vq_d1, fs, .5, 5)
agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5)
wavfile.write("fft_train_agc.wav", fs, soundsc(agc_d1))
wavfile.write("fft_test_agc.wav", fs, soundsc(agc_d2))
wavfile.write("fft_vq_train_agc.wav", fs, soundsc(agc_vq_d1, fs))
wavfile.write("fft_vq_test_agc.wav", fs, soundsc(agc_vq_d2))
def run_dct_vq_example():
def _pre(list_of_data):
# Temporal window setting is crucial! - 512 seems OK for music, 256
# fruit perhaps due to samplerates
n_dct = 512
f_r = np.vstack([mdct_slow(dd, n_dct) for dd in list_of_data])
return f_r, n_dct
def preprocess_train(list_of_data, random_state):
f_r, n_dct = _pre(list_of_data)
clusters = f_r
return clusters
def apply_preprocess(list_of_data, clusters):
f_r, n_dct = _pre(list_of_data)
f_clust = f_r
memberships, distances = vq(f_clust, clusters)
vq_r = clusters[memberships]
d_k = imdct_slow(vq_r, n_dct)
return d_k
random_state = np.random.RandomState(1999)
# This doesn't work very well due to only taking a sample from the end as
# test
fs, d = fetch_sample_music()
sub = int(.8 * d.shape[0])
d1 = [d[:sub]]
d2 = [d[sub:]]
"""
fs, d = fetch_sample_speech_fruit()
d1 = d[::8] + d[1::8] + d[2::8] + d[3::8] + d[4::8] + d[5::8] + d[6::8]
d2 = d[7::8]
# make sure d1 and d2 aren't the same!
assert [len(di) for di in d1] != [len(di) for di in d2]
"""
clusters = preprocess_train(d1, random_state)
# Training data
vq_d1 = apply_preprocess(d1, clusters)
vq_d2 = apply_preprocess(d2, clusters)
assert [i != j for i, j in zip(vq_d2.ravel(), vq_d2.ravel())]
fix_d1 = np.concatenate(d1)
fix_d2 = np.concatenate(d2)
wavfile.write("dct_train_no_agc.wav", fs, soundsc(fix_d1))
wavfile.write("dct_test_no_agc.wav", fs, soundsc(fix_d2))
wavfile.write("dct_vq_train_no_agc.wav", fs, soundsc(vq_d1))
wavfile.write("dct_vq_test_no_agc.wav", fs, soundsc(vq_d2))
"""
import matplotlib.pyplot as plt
plt.specgram(vq_d2, cmap="gray")
plt.figure()
plt.specgram(fix_d2, cmap="gray")
plt.show()
"""
agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5)
agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5)
agc_vq_d1, freq_vq_d1, energy_vq_d1 = time_attack_agc(vq_d1, fs, .5, 5)
agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5)
wavfile.write("dct_train_agc.wav", fs, soundsc(agc_d1))
wavfile.write("dct_test_agc.wav", fs, soundsc(agc_d2))
wavfile.write("dct_vq_train_agc.wav", fs, soundsc(agc_vq_d1))
wavfile.write("dct_vq_test_agc.wav", fs, soundsc(agc_vq_d2))
def run_phase_reconstruction_example():
fs, d = fetch_sample_speech_tapestry()
# actually gives however many components you say! So double what .m file
# says
fftsize = 512
step = 64
X_s = np.abs(stft(d, fftsize=fftsize, step=step, real=False,
compute_onesided=False))
X_t = iterate_invert_spectrogram(X_s, fftsize, step, verbose=True)
"""
import matplotlib.pyplot as plt
plt.specgram(d, cmap="gray")
plt.savefig("1.png")
plt.close()
plt.imshow(X_s, cmap="gray")
plt.savefig("2.png")
plt.close()
"""
wavfile.write("phase_original.wav", fs, soundsc(d))
wavfile.write("phase_reconstruction.wav", fs, soundsc(X_t))
def run_phase_vq_example():
def _pre(list_of_data):
# Temporal window setting is crucial! - 512 seems OK for music, 256
# fruit perhaps due to samplerates
n_fft = 256
step = 32
f_r = np.vstack([np.abs(stft(dd, n_fft, step=step, real=False,
compute_onesided=False))
for dd in list_of_data])
return f_r, n_fft, step
def preprocess_train(list_of_data, random_state):
f_r, n_fft, step = _pre(list_of_data)
clusters = copy.deepcopy(f_r)
return clusters
def apply_preprocess(list_of_data, clusters):
f_r, n_fft, step = _pre(list_of_data)
f_clust = f_r
# Nondeterministic ?
memberships, distances = vq(f_clust, clusters)
vq_r = clusters[memberships]
d_k = iterate_invert_spectrogram(vq_r, n_fft, step, verbose=True)
return d_k
random_state = np.random.RandomState(1999)
fs, d = fetch_sample_speech_fruit()
d1 = d[::9]
d2 = d[7::8][:5]
# make sure d1 and d2 aren't the same!
assert [len(di) for di in d1] != [len(di) for di in d2]
clusters = preprocess_train(d1, random_state)
fix_d1 = np.concatenate(d1)
fix_d2 = np.concatenate(d2)
vq_d2 = apply_preprocess(d2, clusters)
wavfile.write("phase_train_no_agc.wav", fs, soundsc(fix_d1))
wavfile.write("phase_vq_test_no_agc.wav", fs, soundsc(vq_d2))
agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5)
agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5)
agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5)
"""
import matplotlib.pyplot as plt
plt.specgram(agc_vq_d2, cmap="gray")
#plt.title("Fake")
plt.figure()
plt.specgram(agc_d2, cmap="gray")
#plt.title("Real")
plt.show()
"""
wavfile.write("phase_train_agc.wav", fs, soundsc(agc_d1))
wavfile.write("phase_test_agc.wav", fs, soundsc(agc_d2))
wavfile.write("phase_vq_test_agc.wav", fs, soundsc(agc_vq_d2))
def run_cqt_example():
try:
fs, d = fetch_sample_file("/Users/User/cqt_resources/kempff1.wav")
except ValueError:
print("WARNING: Using sample music instead but kempff1.wav is the example")
fs, d = fetch_sample_music()
X = d[:44100]
X_cq, c_dc, c_nyq, multiscale, shift, window_lens = cqt(X, fs)
X_r = icqt(X_cq, c_dc, c_nyq, multiscale, shift, window_lens)
SNR = 20 * np.log10(np.linalg.norm(X - X_r) / np.linalg.norm(X))
wavfile.write("cqt_original.wav", fs, soundsc(X))
wavfile.write("cqt_reconstruction.wav", fs, soundsc(X_r))
def run_fft_dct_example():
random_state = np.random.RandomState(1999)
fs, d = fetch_sample_speech_fruit()
n_fft = 64
X = d[0]
X_stft = stft(X, n_fft)
X_rr = complex_to_real_view(X_stft)
X_dct = fftpack.dct(X_rr, axis=-1, norm='ortho')
X_dct_sub = X_dct[1:] - X_dct[:-1]
std = X_dct_sub.std(axis=0, keepdims=True)
X_dct_sub += .01 * std * random_state.randn(
X_dct_sub.shape[0], X_dct_sub.shape[1])
X_dct_unsub = np.cumsum(X_dct_sub, axis=0)
X_idct = fftpack.idct(X_dct_unsub, axis=-1, norm='ortho')
X_irr = real_to_complex_view(X_idct)
X_r = istft(X_irr, n_fft)[:len(X)]
SNR = 20 * np.log10(np.linalg.norm(X - X_r) / np.linalg.norm(X))
print(SNR)
wavfile.write("fftdct_orig.wav", fs, soundsc(X))
wavfile.write("fftdct_rec.wav", fs, soundsc(X_r))
def run_world_example():
fs, d = fetch_sample_speech_tapestry()
d = d.astype("float32") / 2 ** 15
temporal_positions_h, f0_h, vuv_h, f0_candidates_h = harvest(d, fs)
temporal_positions_ct, spectrogram_ct, fs_ct = cheaptrick(d, fs,
temporal_positions_h, f0_h, vuv_h)
temporal_positions_d4c, f0_d4c, vuv_d4c, aper_d4c, coarse_aper_d4c = d4c(d, fs,
temporal_positions_h, f0_h, vuv_h)
# y = world_synthesis(f0_d4c, vuv_d4c, aper_d4c, spectrogram_ct, fs_ct)
y = world_synthesis(f0_d4c, vuv_d4c, coarse_aper_d4c, spectrogram_ct, fs_ct)
wavfile.write("out.wav", fs, soundsc(y))
def run_mgc_example():
import matplotlib.pyplot as plt
fs, x = wavfile.read("test16k.wav")
pos = 3000
fftlen = 1024
win = np.blackman(fftlen) / np.sqrt(np.sum(np.blackman(fftlen) ** 2))
xw = x[pos:pos + fftlen] * win
sp = 20 * np.log10(np.abs(np.fft.rfft(xw)))
mgc_order = 20
mgc_alpha = 0.41
mgc_gamma = -0.35
mgc_arr = win2mgc(xw, order=mgc_order, alpha=mgc_alpha, gamma=mgc_gamma, verbose=True)
xwsp = 20 * np.log10(np.abs(np.fft.rfft(xw)))
sp = mgc2sp(mgc_arr, mgc_alpha, mgc_gamma, fftlen)
plt.plot(xwsp)
plt.plot(20. / np.log(10) * np.real(sp), "r")
plt.xlim(1, len(xwsp))
plt.show()
def run_world_mgc_example():
fs, d = fetch_sample_speech_tapestry()
d = d.astype("float32") / 2 ** 15
# harcoded for 16k from
# https://github.com/CSTR-Edinburgh/merlin/blob/master/misc/scripts/vocoder/world/extract_features_for_merlin.sh
mgc_alpha = 0.58
# mgc_order = 59
mgc_order = 59
# this is actually just mcep
mgc_gamma = 0.0
# from sklearn.externals import joblib
# mem = joblib.Memory("/tmp")
# mem.clear()
def enc():
temporal_positions_h, f0_h, vuv_h, f0_candidates_h = harvest(d, fs)
temporal_positions_ct, spectrogram_ct, fs_ct = cheaptrick(d, fs,
temporal_positions_h, f0_h, vuv_h)
temporal_positions_d4c, f0_d4c, vuv_d4c, aper_d4c, coarse_aper_d4c = d4c(d, fs,
temporal_positions_h, f0_h, vuv_h)
mgc_arr = sp2mgc(spectrogram_ct, mgc_order, mgc_alpha, mgc_gamma,
verbose=True)
return mgc_arr, spectrogram_ct, f0_d4c, vuv_d4c, coarse_aper_d4c
mgc_arr, spectrogram_ct, f0_d4c, vuv_d4c, coarse_aper_d4c = enc()
sp_r = mgc2sp(mgc_arr, mgc_alpha, mgc_gamma, fs=fs, verbose=True)
"""
import matplotlib.pyplot as plt
plt.imshow(20 * np.log10(sp_r))
plt.figure()
plt.imshow(20 * np.log10(spectrogram_ct))
plt.show()
raise ValueError()
"""
y = world_synthesis(f0_d4c, vuv_d4c, coarse_aper_d4c, sp_r, fs)
# y = world_synthesis(f0_d4c, vuv_d4c, aper_d4c, sp_r, fs)
wavfile.write("out_mgc.wav", fs, soundsc(y))
def get_frame(signal, winsize, no):
shift = winsize // 2
start = no * shift
end = start + winsize
return signal[start:end]
class LTSD():
"""
LTSD VAD code from jfsantos
"""
def __init__(self, winsize, window, order):
self.winsize = int(winsize)
self.window = window
self.order = order
self.amplitude = {}
def get_amplitude(self, signal, l):
if l in self.amplitude:
return self.amplitude[l]
else:
amp = sp.absolute(sp.fft(get_frame(signal, self.winsize, l) * self.window))
self.amplitude[l] = amp
return amp
def compute_noise_avg_spectrum(self, nsignal):
windownum = int(len(nsignal) // (self.winsize // 2) - 1)
avgamp = np.zeros(self.winsize)
for l in range(windownum):
avgamp += sp.absolute(sp.fft(get_frame(nsignal, self.winsize, l) * self.window))
return avgamp / float(windownum)
def compute(self, signal):
self.windownum = int(len(signal) // (self.winsize // 2) - 1)
ltsds = np.zeros(self.windownum)
# Calculate the average noise spectrum amplitude based 20 frames in the head parts of input signal.
self.avgnoise = self.compute_noise_avg_spectrum(signal[0:self.winsize * 20]) ** 2
for l in range(self.windownum):
ltsds[l] = self.ltsd(signal, l, 5)
return ltsds
def ltse(self, signal, l, order):
maxamp = np.zeros(self.winsize)
for idx in range(l - order, l + order + 1):
amp = self.get_amplitude(signal, idx)
maxamp = np.maximum(maxamp, amp)
return maxamp
def ltsd(self, signal, l, order):
if l < order or l + order >= self.windownum:
return 0
return 10.0 * np.log10(np.sum(self.ltse(signal, l, order) ** 2 / self.avgnoise) / float(len(self.avgnoise)))
def ltsd_vad(x, fs, threshold=9, winsize=8192):
# winsize based on sample rate
# 1024 for fs = 16000
orig_dtype = x.dtype
orig_scale_min = x.min()
orig_scale_max = x.max()
x = (x - x.min()) / (x.max() - x.min())
# works with 16 bit
x = x * (2 ** 15)
x = x.astype("int32")
window = sp.hanning(winsize)
ltsd = LTSD(winsize, window, 5)
s_vad = ltsd.compute(x)
# LTSD is 50% overlap, so each "step" covers 4096 samples
# +1 to cover the extra edge window
n_samples = int(((len(s_vad) + 1) * winsize) // 2)
time_s = n_samples / float(fs)
time_points = np.linspace(0, time_s, len(s_vad))
time_samples = (fs * time_points).astype(np.int32)
time_samples = time_samples
f_vad = np.zeros_like(x, dtype=np.bool)
offset = winsize
for n, (ss, es) in enumerate(zip(time_samples[:-1], time_samples[1:])):
sss = ss - offset
if sss < 0:
sss = 0
ses = es - offset
if ses < 0:
ses = 0
if s_vad[n + 1] < threshold:
f_vad[sss:ses] = False
else:
f_vad[sss:ses] = True
f_vad[ses:] = False
x = x.astype("float64")
x = x / float(2 ** 15)
x = x * (orig_scale_max - orig_scale_min) + orig_scale_min
x = x.astype(orig_dtype)
return x[f_vad], f_vad
def run_ltsd_example():
fs, d = fetch_sample_speech_tapestry()
winsize = 1024
d = d.astype("float32") / 2 ** 15
d -= d.mean()
pad = 3 * fs
noise_pwr = np.percentile(d, 1) ** 2
noise_pwr = max(1E-9, noise_pwr)
d = np.concatenate((np.zeros((pad,)) + noise_pwr * np.random.randn(pad), d))
_, vad_segments = ltsd_vad(d, fs, winsize=winsize)
v_up = np.where(vad_segments == True)[0]
s = v_up[0]
st = v_up[-1] + int(.5 * fs)
d = d[s:st]
bname = "tapestry.wav".split(".")[0]
wavfile.write("%s_out.wav" % bname, fs, soundsc(d))
if __name__ == "__main__":
run_ltsd_example()
"""
Trying to run all examples will seg fault on my laptop - probably memory!
Comment individually
run_ltsd_example()
run_world_mgc_example()
run_world_example()
run_mgc_example()
run_phase_reconstruction_example()
run_phase_vq_example()
run_dct_vq_example()
run_fft_vq_example()
run_lpc_example()
run_cqt_example()
run_fft_dct_example()
test_all()
"""
| mit |
ilo10/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
Jackson-Y/Machine-Learning | text/classification_cnn.py | 1 | 6019 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHIAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHIAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def cnn_model(features, labels, mode):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
word_vectors,
filters=N_FILTERS,
kernel_size=FILTER_SHIAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu
)
# Max pooling across output of Convolution+ReLU
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME'
)
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHIAPE2,
padding='VALID'
)
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits
)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes
)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
global n_words
# prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data
)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH
)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = tf.estimator.Estimator(model_fn=cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True
)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False
)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| mit |
darafferty/factor | factor/scripts/smooth_amps_spline.py | 1 | 26517 | #! /usr/bin/env python
"""
Script to smooth and normalize amplitude solutions
Reinout van Weeren, April 2016
"""
import argparse
from argparse import RawTextHelpFormatter
import casacore.tables as pt
import numpy
import os
import lofar.parmdb
import math
import shutil
import multiprocessing
from scipy.interpolate import LSQUnivariateSpline, interp1d, interp2d
import sys
import scipy.ndimage
import astropy.convolution
def std(inputData, Zero=False, axis=None, dtype=None):
"""
Robust estimator of the standard deviation of a data set.
Based on the robust_sigma function from the AstroIDL User's Library.
.. versionchanged:: 1.0.3
Added the 'axis' and 'dtype' keywords to make this function more
compatible with numpy.std()
"""
epsilon = 1.0e-20
if axis is not None:
fnc = lambda x: std(x, dtype=dtype)
sigma = numpy.apply_along_axis(fnc, axis, inputData)
else:
data = inputData.ravel()
if type(data).__name__ == "MaskedArray":
data = data.compressed()
if dtype is not None:
data = data.astype(dtype)
if Zero:
data0 = 0.0
else:
data0 = numpy.median(data)
maxAbsDev = numpy.median(numpy.abs(data-data0)) / 0.6745
if maxAbsDev < epsilon:
maxAbsDev = (numpy.abs(data-data0)).mean() / 0.8000
if maxAbsDev < epsilon:
sigma = 0.0
return sigma
u = (data-data0) / 6.0 / maxAbsDev
u2 = u**2.0
good = numpy.where( u2 <= 1.0 )
good = good[0]
if len(good) < 3:
print "WARNING: Distribution is too strange to compute standard deviation"
sigma = -1.0
return sigma
numerator = ((data[good]-data0)**2.0 * (1.0-u2[good])**2.0).sum()
nElements = (data.ravel()).shape[0]
denominator = ((1.0-u2[good])*(1.0-5.0*u2[good])).sum()
sigma = nElements*numerator / (denominator*(denominator-1.0))
if sigma > 0:
sigma = math.sqrt(sigma)
else:
sigma = 0.0
return sigma
def findscatter(datavector):
shifted_vec = numpy.roll(datavector, 1)
#scatter = sum(abs(shifted_vec - datavector))/numpy.float(len(datavector))
scatter = numpy.median(abs(shifted_vec - datavector))
return scatter
def findscatter_time(dataarray):
scattervec = []
for freq in range(0,len(dataarray[:,0])):
#print 'findscatter_time', freq
scatter = findscatter(dataarray[freq,:])
scattervec.append(scatter)
return numpy.median(scattervec)
def findscatter_freq(dataarray):
scattervec = []
for time in range(0,len(dataarray[0,:])):
#print 'findscatter_freq', time
scatter = findscatter(dataarray[:,time])
scattervec.append(scatter)
return numpy.median(scattervec)
def findnoisevec(datavector):
shifted_vec = numpy.roll(datavector, 1)
scatter_vec = (abs(shifted_vec - datavector))
#scatter_vec = medfilt(scatter_vec,21)
scatter_vec = scipy.ndimage.filters.median_filter(scatter_vec,9, mode='mirror')
# now smooth
gauss = astropy.convolution.Gaussian1DKernel(stddev=4.0)
scatter_vec = astropy.convolution.convolve(scatter_vec,gauss , boundary='extend')
# normalize scatter_vec
scatter_vec = scatter_vec/numpy.mean(scatter_vec)
return scatter_vec
def spline1D(amp_orig):
# to compute knot points
f = lambda m, n: [i*n//m + n//(2*m) for i in range(m)]
if amp_orig is None:
return None, None, None, None, None, None, None
# expand array and mirror full array around edges
ndata = len(amp_orig)
amp = numpy.zeros(ndata+2*ndata)
amp[ndata:ndata+ndata] = amp_orig
for i in range(0, ndata):
# Mirror at left edge.
idx = min(ndata-1, ndata-i)
amp[i] = amp_orig[idx]
# Mirror at right edge
idx = max(0, ndata-2-i)
amp[ndata+ndata+i] = amp_orig[idx]
# Find flagged values
flagged = numpy.where(amp == 1.0)
# work in log-sapce
amp_orig_ext = numpy.copy(amp)
amp = numpy.log10(amp)
weights = (0.*numpy.copy(amp)) + 1 # initialize weights to 1
# filter bad data and determine average scatter of amplitudes
scatter = findscatter(amp)
# remove some really bad stuff, by putting weights to zero.
idxbadi1 = numpy.where(amp > (numpy.median(amp) + (35.*std(amp))))
weights[idxbadi1] = 1e-10 # small value, zero generates NaN in spline
idxbadi2 = numpy.where(amp < (numpy.median(amp) - (35.*std(amp))))
weights[idxbadi2] = 1e-10 # small value, zero generates NaN in spline
# Set weights for flagged values
weights[flagged] = 1e-10 # small value, zero generates NaN in spline
# make the noisevec
if len(amp) > 30: # so at least 30/3 = 10 good data points
# create noise vector
noisevec = findnoisevec(amp)
else:
noisevec = (numpy.copy(amp) * 0.) + 1.0 # just make constant noise, if we have too little datapoints
if scatter < 0.005:
#Interior knots t must satisfy Schoenberg-Whitney conditions
scatter = 0.005 # otherwise we fit more parameters than we have data points
knotfactor = 0.5e3*scatter # normalize based on trial and error
#print scatter, antenna, len(amp), knotfactor
timevec = numpy.arange(0,len(amp))
knotvec = f(numpy.int(len(amp)/knotfactor),len(amp))
#print antenna, 'knots', knotvec, noisevec[knotvec]
#print 'knots OR', knotvec
# simple optimization knot selection for vectors that have at least 30 data points
# based on the noisevector
# removes even numbered knots if the noise is high
knotvec_copy = numpy.copy(knotvec) # otherwise tcopy is updated as well
if len(timevec) > 30 and len(knotvec) > 2:
for counter, knot in enumerate(knotvec_copy):
#print counter, knot, noisevec[knot]
if (counter % 2 == 0) and noisevec[knot] > 1.5: # even index and large noise
knotvec.remove(knot)
#print 'Removing knot because of local increase in noise'
#print antenna, 'cleaned knots', knotvec, noisevec[knotvec]
# asign midpoint if not enough data points/20
if len (knotvec) < 3: # because we are working with a 3x larger mirrored array
knotvec = [numpy.int(len(timevec)*0.25),numpy.int(len(timevec)/2),numpy.int(len(timevec)*0.75)]
#print 'extending to', knotvec
splineorder = 5 # default
if len(knotvec) == 3 and scatter > 0.1:
splineorder = 3 # reduce order, data is bad
if scatter > 0.2:
splineorder = 1 # very bad data
#print 'knots CL', knotvec
spl2 = LSQUnivariateSpline(timevec, amp, knotvec, w=weights, k=splineorder)
# now find bad data devatiating from the fit 30 x scatter
residual = numpy.abs(spl2(timevec)-amp)
idx = numpy.where(residual > 15.*scatter)
# second iteration
if numpy.any(idx):
ampcopy = numpy.copy(amp)
ampcopy[idx] = spl2(timevec[idx]) # replace bad amplitudes by model
spl2 = LSQUnivariateSpline(timevec, ampcopy, knotvec, w=weights, k=splineorder)
residual = numpy.abs(spl2(timevec)-amp)
idx = numpy.where(residual > 8.*scatter)
# third iteration
if numpy.any(idx):
ampcopy = numpy.copy(amp)
ampcopy[idx] = spl2(timevec[idx]) # replace bad amplitudes by model
spl2 = LSQUnivariateSpline(timevec, ampcopy, knotvec, w=weights, k=splineorder)
# again look at residual, go back to original amp again, find deviating data > 3x scatter
residual = numpy.abs(spl2(timevec)-amp)
idx = numpy.where(residual > 3.*scatter)
# replace the bad data with model
model =spl2(timevec)
#if len(idx) != 0:
amp[idx] = model[idx]
# go out of log-space
idxnodata = numpy.where(amp > 1.0)
amp[idxnodata] = 0.0
amp = 10**amp
amp_clean = amp[ndata:ndata + ndata]
idxbad = numpy.where(amp_clean != amp_orig)
n_knots = numpy.int(numpy.ceil(numpy.float(len(knotvec))/3.)) # approxmiate, just for plot
# return cleaned amplitudes, model, scatter, number of knots, indices of replaced outliers
return amp_clean, 10**(model[ndata:ndata + ndata]), noisevec[ndata:ndata + ndata], scatter, n_knots, idxbad, weights[ndata:ndata + ndata]
def pad_2Darray(a, width, mode):
pad_shape = (a.shape[0]*3, a.shape[1]*3)
pad_a = numpy.zeros(pad_shape)
# center
pad_a[a.shape[0]:2*a.shape[0], a.shape[1]:2*a.shape[1]] = a
# four corners
pad_a[0:a.shape[0], 0:a.shape[1]] = a[::-1, ::-1]
pad_a[0:a.shape[0], 2*a.shape[1]:3*a.shape[1]] = a[::-1, ::-1]
pad_a[2*a.shape[0]:3*a.shape[0], 2*a.shape[1]:3*a.shape[1]] = a[::-1, ::-1]
pad_a[2*a.shape[0]:3*a.shape[0], 0:a.shape[1]] = a[::-1, ::-1]
# middle edges
pad_a[0:a.shape[0], a.shape[1]:2*a.shape[1]] = a[:, ::-1]
pad_a[a.shape[0]:2*a.shape[0], 2*a.shape[1]:3*a.shape[1]] = a[::-1, :]
pad_a[2*a.shape[0]:3*a.shape[0], a.shape[1]:2*a.shape[1]] = a[:, ::-1]
pad_a[a.shape[0]:2*a.shape[0], 0:a.shape[1]] = a[::-1, :]
return pad_a
def median2Dampfilter(amp_orig):
try:
from numpy import pad
except ImportError:
pad = pad_2Darray
orinal_size = numpy.shape(amp_orig)
# padd array by reflection around axis
amp = pad(amp_orig, ((numpy.shape(amp_orig)[0],numpy.shape(amp_orig)[0]),
(numpy.shape(amp_orig)[1],numpy.shape(amp_orig)[1])), mode='reflect')
flagged = numpy.where(amp == 1.0)
# take the log
amp = numpy.log10(amp)
# Set flagged values to NaN
amp[flagged] = numpy.nan
# create median filtered array, ignoring NaNs
amp_median = scipy.ndimage.filters.generic_filter(amp, numpy.nanmedian, (3,5)) # so a bit more smoothing along the time-axis
# find scatter
scatter_freq = findscatter_freq(amp)
scatter_time = findscatter_time(amp)
# print 'scatter (freq,time)', scatter_freq, scatter_time
scatter = 0.5*(scatter_freq+scatter_time) # average x-y scatter
# find bad data
idxbad = numpy.where((numpy.abs(amp - amp_median)) > scatter*3.)
baddata = numpy.copy(amp)*0.0
baddata[idxbad] = 1.0
# replace the bad data points
amp_cleaned = numpy.copy(amp)
amp_cleaned[idxbad] = amp_median[idxbad]
# raise to the power
amp = 10**amp
amp_median = 10**amp_median
amp_cleaned = 10**amp_cleaned
#back to original size
amp_median = amp_median[orinal_size[0]:2*orinal_size[0],orinal_size[1]:2*orinal_size[1]]
baddata = baddata[orinal_size[0]:2*orinal_size[0],orinal_size[1]:2*orinal_size[1]]
amp_cleaned = amp_cleaned[orinal_size[0]:2*orinal_size[0],orinal_size[1]:2*orinal_size[1]]
return amp_cleaned, amp_median, baddata
def main(instrument_name, instrument_name_smoothed, normalize=True, plotting=False,
scratch_dir=None):
if type(normalize) is str:
if normalize.lower() == 'true':
normalize = True
else:
normalize = False
if type(plotting) is str:
if plotting.lower() == 'true':
plotting = True
else:
plotting = False
gain = 'Gain'
# Copy to scratch directory if specified
if scratch_dir is not None:
instrument_name_orig = instrument_name
instrument_name = os.path.join(scratch_dir, os.path.basename(instrument_name_orig))
instrument_name_smoothed_orig = instrument_name_smoothed
instrument_name_smoothed = os.path.join(scratch_dir, os.path.basename(instrument_name_smoothed_orig))
shutil.copytree(instrument_name_orig, instrument_name)
pdb = lofar.parmdb.parmdb(instrument_name)
parms = pdb.getValuesGrid('*')
key_names = parms.keys()
initial_flagged_dict = {}
initial_unflagged_dict = {}
for key_name in key_names:
# Check for NaNs and zeros. If found, set to 1
initial_flagged_indx = numpy.where(numpy.logical_or(numpy.isnan(parms[key_name]['values']),
parms[key_name]['values'] == 0.0))
initial_flagged_dict[key_name] = initial_flagged_indx
initial_unflagged_indx = numpy.where(numpy.logical_and(~numpy.isnan(parms[key_name]['values']),
parms[key_name]['values'] != 0.0))
initial_unflagged_dict[key_name] = initial_unflagged_indx
parms[key_name]['values'][initial_flagged_indx] = 1.0
nchans = len(parms[key_names[0]]['freqs'])
# determine the number of polarizations in parmdb (2 or 4)
if any(gain+':0:1:' in s for s in key_names):
pol_list = ['0:0', '1:1', '0:1', '1:0']
else:
pol_list = ['0:0', '1:1']
times = numpy.copy(sorted( parms[key_names[0]]['times']))
orig_times = parms[key_names[0]]['times']
timewidths = parms[key_names[0]]['timewidths']
freqs = numpy.copy(sorted( parms[key_names[0]]['freqs']))/1e6 # get this in MHz
orig_freqs = parms[key_names[0]]['freqs']
freqwidths = parms[key_names[0]]['freqwidths']
# times not used at the moment, I assume the time axis for a parmdb is regular and does not contain gaps
times = (times - numpy.min(times))/24. #so we get an axis in hrs
ntimes = len(times)
# Get station names
antenna_list = set([s.split(':')[-1] for s in pdb.getNames()])
# for plotting
Nr = int(numpy.ceil(numpy.sqrt(len(antenna_list))))
Nc = int(numpy.ceil(numpy.float(len(antenna_list))/Nr))
if plotting:
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('font',size =6 )
mpl.rc('figure.subplot',left=0.05, bottom=0.05, right=0.95, top=0.95 )
fa, axa = plt.subplots(Nr, Nc, sharex=True, sharey=True, figsize=(16,12))
axsa = axa.reshape((Nr*Nc,1))
if nchans > 5: # 2D filter
Nr = len(antenna_list)
Nc = int(4)
fa2, axa2 = plt.subplots(Nr, Nc, sharex=True, sharey=True, figsize=(8,108),)
axsa2 = axa2.reshape((Nr*Nc,1))
for pol in pol_list:
for istat,antenna in enumerate(sorted(antenna_list)[::-1]):
channel_parms_real = [parms[gain + ':' + pol + ':Real:'+ antenna]['values'][:, chan]
for chan in range(nchans)]
channel_parms_imag = [parms[gain + ':' + pol + ':Imag:'+ antenna]['values'][:, chan]
for chan in range(nchans)]
# some plotting setup
if len(channel_parms_real[0]) > 500:
fmt = ','
else:
fmt = 'o'
ls='none'
channel_amp_orig = [numpy.sqrt(channel_parms_real[chan]**2 +
channel_parms_imag[chan]**2) for chan in range(nchans)]
# Find flagged solutions and set to 1.0
channel_amp_interp = []
for chan in range(nchans):
unflagged_times = numpy.where(channel_parms_real[chan] != 1.0)
flagged_times = numpy.where(channel_parms_real[chan] == 1.0)
if numpy.any(unflagged_times):
if numpy.any(flagged_times):
channel_amp_orig[chan][flagged_times] = 1.0
channel_amp_interp.append(channel_amp_orig[chan])
else:
channel_amp_interp.append(None)
# now find the bad data
if ntimes > 5:
pool = multiprocessing.Pool()
results = pool.map(spline1D, channel_amp_interp)
pool.close()
pool.join()
for chan, (amp_cleaned, model, noisevec, scatter, n_knots, idxbad, weights) in enumerate(results):
# put back the results
phase = numpy.arctan2(channel_parms_imag[chan], channel_parms_real[chan])
if amp_cleaned is None:
amp_cleaned = channel_amp_orig[chan]
parms[gain + ':' + pol + ':Real:' + antenna]['values'][:, chan] = numpy.copy(amp_cleaned*numpy.cos(phase))
parms[gain + ':' + pol + ':Imag:' + antenna]['values'][:, chan] = numpy.copy(amp_cleaned*numpy.sin(phase))
if pol in pol_list[0]:
cc = 'blue'
ccf = 'orange'
else:
cc = 'green'
ccf= 'red'
timevec = numpy.arange(0,len(channel_amp_orig[chan]))
# only plot one channel, just to verify code works
if plotting and chan == nchans-1: # plot last channel
axsa[istat][0].plot(timevec, amp_cleaned, marker=fmt, ls=ls,
markersize=0.1*len(amp_cleaned), c=cc,mec=cc)
axsa[istat][0].plot(timevec,noisevec, c=cc, lw=0.75, ls='--')
if pol in pol_list[0]:
axsa[istat][0].annotate('scatter=' +'{:.2g}'.format(scatter),
xy=(0.5,0.15), color=cc,textcoords='axes fraction')
axsa[istat][0].annotate('#knots=' +'{:d}'.format(n_knots),
xy=(0.01,0.15), color=cc,textcoords='axes fraction') # we divded by three beucase we mirrored the array
else:
axsa[istat][0].annotate('scatter=' +'{:.2g}'.format(scatter),
xy=(0.5,0.02), color=cc, textcoords='axes fraction')
axsa[istat][0].annotate('#knots=' +'{:d}'.format(n_knots),
xy=(0.01,0.02), color=cc,textcoords='axes fraction')
if numpy.any(idxbad):
axsa[istat][0].plot(timevec[idxbad],channel_amp_orig[chan][idxbad],
marker='o', c=ccf, ls=ls, markersize=4)
idxbadi = numpy.where(weights < 1.0)
if numpy.any(idxbadi):
axsa[istat][0].plot(timevec[idxbadi],channel_amp_orig[chan][idxbadi],
marker='o', c='black', ls=ls, markersize=4, mec='black')
axsa[istat][0].plot(timevec, model, c=ccf, lw=1.0)
axsa[istat][0].set_title(antenna)
axsa[istat][0].set_ylim(-0.3, 2)
axsa[istat][0].set_xlim(0, max(timevec))
if nchans > 5: # Do 2D smooth
channel_parms_real = [parms[gain + ':' + pol + ':Real:'+ antenna]['values'][:, chan]
for chan in range(nchans)]
channel_parms_imag = [parms[gain + ':' + pol + ':Imag:'+ antenna]['values'][:, chan]
for chan in range(nchans)]
channel_parms_real = numpy.asarray(channel_parms_real)
channel_parms_imag = numpy.asarray(channel_parms_imag)
channel_amp_orig = numpy.asarray([numpy.sqrt(channel_parms_real[chan]**2 +
channel_parms_imag[chan]**2) for chan in range(nchans)])
phase = numpy.arctan2(channel_parms_imag[:], channel_parms_real[:])
# Smooth
channel_amp_interp = []
unflagged_sols = numpy.where(channel_parms_real != 1.0)
x, y = numpy.meshgrid(times, range(nchans))
if numpy.any(unflagged_sols):
# Set flagged solutions to 1.0
flagged_sols = numpy.where(channel_parms_real == 1.0)
channel_amp_orig[flagged_sols] = 1.0
amp_cleaned, amp_median, baddata = median2Dampfilter(channel_amp_orig)
for chan in range(nchans):
# put back the results
parms[gain + ':' + pol + ':Real:' + antenna]['values'][:, chan] = numpy.copy((amp_cleaned[chan,:])*numpy.cos(phase[chan,:]))
parms[gain + ':' + pol + ':Imag:' + antenna]['values'][:, chan] = numpy.copy((amp_cleaned[chan,:])*numpy.sin(phase[chan,:]))
if plotting:
axsa2[4*istat][0].imshow(numpy.transpose(channel_amp_orig),
interpolation='none',origin='lower',clim=(0.5, 1.5),aspect='auto')
axsa2[4*istat][0].set_xlabel('freq')
axsa2[4*istat][0].set_ylabel('time')
axsa2[4*istat][0].set_title('Original' + ' ' + antenna)
axsa2[4*istat+1][0].imshow(numpy.transpose(amp_median),
interpolation='none',origin='lower',aspect='auto', clim=(0.5,1.5))
axsa2[4*istat+1][0].set_xlabel('freq')
axsa2[4*istat+1][0].set_ylabel('time')
axsa2[4*istat+1][0].set_title('2D median model')
axsa2[4*istat+2][0].imshow(numpy.transpose(numpy.abs(channel_amp_orig-amp_median)),
interpolation='none',origin='lower',clim=(0.0, 0.3),aspect='auto')
axsa2[4*istat+2][0].set_xlabel('freq')
axsa2[4*istat+2][0].set_ylabel('time')
axsa2[4*istat+2][0].set_title('abs(Residual)')
axsa2[4*istat+3][0].imshow(numpy.transpose(baddata),
interpolation='none',origin='lower',clim=(0.0, 2.0),
aspect='auto', cmap='gnuplot')
axsa2[4*istat+3][0].set_xlabel('freq')
axsa2[4*istat+3][0].set_ylabel('time')
axsa2[4*istat+3][0].set_title('Replaced solutions')
if plotting:
fa.savefig('1Dsmooth.png', dpi=100)
if nchans > 5: # make 2D plot
fa2.tight_layout()
fa2.savefig('2Dsmooth.png')
plt.show()
# Normalize the amplitude solutions to a mean of one across all channels
if normalize:
# First find the normalization factor
amplist = []
for pol in ['0:0','1:1']: # hard code here in case the data contains 0:1 and 1:0
for antenna in antenna_list:
key_name = gain + ':' + pol + ':Real:'+ antenna
if numpy.any(initial_unflagged_dict[key_name]):
# Only use unflagged data for normalization
real = numpy.copy(parms[key_name]['values'])
key_name = gain + ':' + pol + ':Imag:'+ antenna
imag = numpy.copy(parms[key_name]['values'])
amp = numpy.copy(numpy.sqrt(real**2 + imag**2))
amplist.append(amp[initial_unflagged_dict[key_name]])
norm_factor = 1.0/(numpy.nanmean(numpy.concatenate(amplist)))
print "smooth_amps_spline.py: Normalization-Factor is:", norm_factor
# Now do the normalization
for chan in range(nchans):
for pol in pol_list:
for antenna in antenna_list:
real = numpy.copy(parms[gain + ':' + pol + ':Real:'+ antenna]['values'][:, chan])
imag = numpy.copy(parms[gain + ':' + pol + ':Imag:'+ antenna]['values'][:, chan])
phase = numpy.arctan2(imag, real)
amp = numpy.copy(numpy.sqrt(real**2 + imag**2))
# Clip extremely low amplitude solutions to prevent very high
# amplitudes in the corrected data
unflagged = numpy.where(~numpy.isnan(amp))
low_ind = numpy.where(amp[unflagged] < 0.2)
amp[unflagged][low_ind] = 0.2
parms[gain + ':' + pol + ':Real:'+ antenna]['values'][:, chan] = numpy.copy(amp *
numpy.cos(phase) * norm_factor)
parms[gain + ':' + pol + ':Imag:'+ antenna]['values'][:, chan] = numpy.copy(amp *
numpy.sin(phase) * norm_factor)
# Make sure flagged solutions are still flagged
for key_name in key_names:
parms[key_name]['values'][initial_flagged_dict[key_name]] = numpy.nan
if os.path.exists(instrument_name_smoothed):
shutil.rmtree(instrument_name_smoothed)
pdbnew = lofar.parmdb.parmdb(instrument_name_smoothed, create=True)
# Identify any gaps in time (frequency gaps are not allowed), as we need to handle
# each section separately if gaps are present
delta_times = orig_times[1:] - orig_times[:-1]
gaps = numpy.where(delta_times > timewidths[:-1]*1.1)
if len(gaps[0]) > 0:
gaps_ind = gaps[0] + 1
else:
gaps_ind = []
for pol in pol_list:
for station in antenna_list:
g_start = 0
real = parms['Gain:' + pol + ':Real:'+ station]['values']
imag = parms['Gain:' + pol + ':Imag:'+ station]['values']
for g in gaps_ind:
# If time gaps exist, add them one-by-one (except for last one)
pdbnew.addValues('Gain:'+pol+':Real:{}'.format(station), real[g_start:g], orig_freqs, freqwidths,
orig_times[g_start:g], timewidths[g_start:g], asStartEnd=False)
pdbnew.addValues('Gain:'+pol+':Imag:{}'.format(station), imag[g_start:g], orig_freqs, freqwidths,
orig_times[g_start:g], timewidths[g_start:g], asStartEnd=False)
g_start = g
# Add remaining time slots
pdbnew.addValues('Gain:'+pol+':Real:{}'.format(station), real[g_start:], orig_freqs, freqwidths,
orig_times[g_start:], timewidths[g_start:], asStartEnd=False)
pdbnew.addValues('Gain:'+pol+':Imag:{}'.format(station), imag[g_start:], orig_freqs, freqwidths,
orig_times[g_start:], timewidths[g_start:], asStartEnd=False)
pdbnew.flush()
# Copy output to original path and delete copies if scratch directory is specified
if scratch_dir is not None:
if os.path.exists(instrument_name_smoothed_orig):
shutil.rmtree(instrument_name_smoothed_orig)
shutil.copytree(instrument_name_smoothed, instrument_name_smoothed_orig)
shutil.rmtree(instrument_name)
shutil.rmtree(instrument_name_smoothed)
if __name__ == '__main__':
descriptiontext = "Smooth and normalize amplitude solutions.\n"
parser = argparse.ArgumentParser(description=descriptiontext, formatter_class=RawTextHelpFormatter)
parser.add_argument('instrument_name', help='name of the instrument parmdb to smooth')
parser.add_argument('instrument_name_smoothed', help='name of the output parmdb')
parser.add_argument('normalize', help='normalize?')
parser.add_argument('plotting', help='make plots?')
args = parser.parse_args()
main(args.instrument_name, args.instrument_name_smoothed, args.normalize, args.plotting)
| gpl-2.0 |
DougBurke/astropy | astropy/visualization/mpl_normalize.py | 2 | 8314 | """
Normalization class for Matplotlib that can be used to produce
colorbars.
"""
import numpy as np
from numpy import ma
from .interval import (PercentileInterval, AsymmetricPercentileInterval,
ManualInterval, MinMaxInterval, BaseInterval)
from .stretch import (LinearStretch, SqrtStretch, PowerStretch, LogStretch,
AsinhStretch, BaseStretch)
try:
import matplotlib # pylint: disable=W0611
from matplotlib.colors import Normalize
except ImportError:
class Normalize:
def __init__(self, *args, **kwargs):
raise ImportError('matplotlib is required in order to use this '
'class.')
__all__ = ['ImageNormalize', 'simple_norm']
__doctest_requires__ = {'*': ['matplotlib']}
class ImageNormalize(Normalize):
"""
Normalization class to be used with Matplotlib.
Parameters
----------
data : `~numpy.ndarray`, optional
The image array. This input is used only if ``interval`` is
also input. ``data`` and ``interval`` are used to compute the
vmin and/or vmax values only if ``vmin`` or ``vmax`` are not
input.
interval : `~astropy.visualization.BaseInterval` subclass instance, optional
The interval object to apply to the input ``data`` to determine
the ``vmin`` and ``vmax`` values. This input is used only if
``data`` is also input. ``data`` and ``interval`` are used to
compute the vmin and/or vmax values only if ``vmin`` or ``vmax``
are not input.
vmin, vmax : float
The minimum and maximum levels to show for the data. The
``vmin`` and ``vmax`` inputs override any calculated values from
the ``interval`` and ``data`` inputs.
stretch : `~astropy.visualization.BaseStretch` subclass instance, optional
The stretch object to apply to the data. The default is
`~astropy.visualization.LinearStretch`.
clip : bool, optional
If `True` (default), data values outside the [0:1] range are
clipped to the [0:1] range.
"""
def __init__(self, data=None, interval=None, vmin=None, vmax=None,
stretch=LinearStretch(), clip=False):
# this super call checks for matplotlib
super().__init__(vmin=vmin, vmax=vmax, clip=clip)
self.vmin = vmin
self.vmax = vmax
if data is not None and interval is not None:
_vmin, _vmax = interval.get_limits(data)
if self.vmin is None:
self.vmin = _vmin
if self.vmax is None:
self.vmax = _vmax
if stretch is not None and not isinstance(stretch, BaseStretch):
raise TypeError('stretch must be an instance of a BaseStretch '
'subclass')
self.stretch = stretch
if interval is not None and not isinstance(interval, BaseInterval):
raise TypeError('interval must be an instance of a BaseInterval '
'subclass')
self.interval = interval
self.inverse_stretch = stretch.inverse
self.clip = clip
def __call__(self, values, clip=None):
if clip is None:
clip = self.clip
if isinstance(values, ma.MaskedArray):
if clip:
mask = False
else:
mask = values.mask
values = values.filled(self.vmax)
else:
mask = False
# Make sure scalars get broadcast to 1-d
if np.isscalar(values):
values = np.array([values], dtype=float)
else:
# copy because of in-place operations after
values = np.array(values, copy=True, dtype=float)
# Set default values for vmin and vmax if not specified
self.autoscale_None(values)
# Normalize based on vmin and vmax
np.subtract(values, self.vmin, out=values)
np.true_divide(values, self.vmax - self.vmin, out=values)
# Clip to the 0 to 1 range
if self.clip:
values = np.clip(values, 0., 1., out=values)
# Stretch values
values = self.stretch(values, out=values, clip=False)
# Convert to masked array for matplotlib
return ma.array(values, mask=mask)
def inverse(self, values):
# Find unstretched values in range 0 to 1
values_norm = self.inverse_stretch(values, clip=False)
# Scale to original range
return values_norm * (self.vmax - self.vmin) + self.vmin
def simple_norm(data, stretch='linear', power=1.0, asinh_a=0.1, min_cut=None,
max_cut=None, min_percent=None, max_percent=None,
percent=None, clip=True):
"""
Return a Normalization class that can be used for displaying images
with Matplotlib.
This function enables only a subset of image stretching functions
available in `~astropy.visualization.mpl_normalize.ImageNormalize`.
This function is used by the
``astropy.visualization.scripts.fits2bitmap`` script.
Parameters
----------
data : `~numpy.ndarray`
The image array.
stretch : {'linear', 'sqrt', 'power', log', 'asinh'}, optional
The stretch function to apply to the image. The default is
'linear'.
power : float, optional
The power index for ``stretch='power'``. The default is 1.0.
asinh_a : float, optional
For ``stretch='asinh'``, the value where the asinh curve
transitions from linear to logarithmic behavior, expressed as a
fraction of the normalized image. Must be in the range between
0 and 1. The default is 0.1.
min_cut : float, optional
The pixel value of the minimum cut level. Data values less than
``min_cut`` will set to ``min_cut`` before stretching the image.
The default is the image minimum. ``min_cut`` overrides
``min_percent``.
max_cut : float, optional
The pixel value of the maximum cut level. Data values greater
than ``min_cut`` will set to ``min_cut`` before stretching the
image. The default is the image maximum. ``max_cut`` overrides
``max_percent``.
min_percent : float, optional
The percentile value used to determine the pixel value of
minimum cut level. The default is 0.0. ``min_percent``
overrides ``percent``.
max_percent : float, optional
The percentile value used to determine the pixel value of
maximum cut level. The default is 100.0. ``max_percent``
overrides ``percent``.
percent : float, optional
The percentage of the image values used to determine the pixel
values of the minimum and maximum cut levels. The lower cut
level will set at the ``(100 - percent) / 2`` percentile, while
the upper cut level will be set at the ``(100 + percent) / 2``
percentile. The default is 100.0. ``percent`` is ignored if
either ``min_percent`` or ``max_percent`` is input.
clip : bool, optional
If `True` (default), data values outside the [0:1] range are
clipped to the [0:1] range.
Returns
-------
result : `ImageNormalize` instance
An `ImageNormalize` instance that can be used for displaying
images with Matplotlib.
"""
if percent is not None:
interval = PercentileInterval(percent)
elif min_percent is not None or max_percent is not None:
interval = AsymmetricPercentileInterval(min_percent or 0.,
max_percent or 100.)
elif min_cut is not None or max_cut is not None:
interval = ManualInterval(min_cut, max_cut)
else:
interval = MinMaxInterval()
if stretch == 'linear':
stretch = LinearStretch()
elif stretch == 'sqrt':
stretch = SqrtStretch()
elif stretch == 'power':
stretch = PowerStretch(power)
elif stretch == 'log':
stretch = LogStretch()
elif stretch == 'asinh':
stretch = AsinhStretch(asinh_a)
else:
raise ValueError('Unknown stretch: {0}.'.format(stretch))
vmin, vmax = interval.get_limits(data)
return ImageNormalize(vmin=vmin, vmax=vmax, stretch=stretch, clip=clip)
| bsd-3-clause |
kdebrab/pandas | asv_bench/benchmarks/strings.py | 3 | 2934 | import warnings
import numpy as np
from pandas import Series
import pandas.util.testing as tm
class Methods(object):
goal_time = 0.2
def setup(self):
self.s = Series(tm.makeStringIndex(10**5))
def time_cat(self):
self.s.str.cat(sep=',')
def time_center(self):
self.s.str.center(100)
def time_count(self):
self.s.str.count('A')
def time_endswith(self):
self.s.str.endswith('A')
def time_extract(self):
with warnings.catch_warnings(record=True):
self.s.str.extract('(\\w*)A(\\w*)')
def time_findall(self):
self.s.str.findall('[A-Z]+')
def time_get(self):
self.s.str.get(0)
def time_len(self):
self.s.str.len()
def time_match(self):
self.s.str.match('A')
def time_pad(self):
self.s.str.pad(100, side='both')
def time_replace(self):
self.s.str.replace('A', '\x01\x01')
def time_slice(self):
self.s.str.slice(5, 15, 2)
def time_startswith(self):
self.s.str.startswith('A')
def time_strip(self):
self.s.str.strip('A')
def time_rstrip(self):
self.s.str.rstrip('A')
def time_lstrip(self):
self.s.str.lstrip('A')
def time_title(self):
self.s.str.title()
def time_upper(self):
self.s.str.upper()
def time_lower(self):
self.s.str.lower()
class Repeat(object):
goal_time = 0.2
params = ['int', 'array']
param_names = ['repeats']
def setup(self, repeats):
N = 10**5
self.s = Series(tm.makeStringIndex(N))
repeat = {'int': 1, 'array': np.random.randint(1, 3, N)}
self.repeat = repeat[repeats]
def time_repeat(self, repeats):
self.s.str.repeat(self.repeat)
class Contains(object):
goal_time = 0.2
params = [True, False]
param_names = ['regex']
def setup(self, regex):
self.s = Series(tm.makeStringIndex(10**5))
def time_contains(self, regex):
self.s.str.contains('A', regex=regex)
class Split(object):
goal_time = 0.2
params = [True, False]
param_names = ['expand']
def setup(self, expand):
self.s = Series(tm.makeStringIndex(10**5)).str.join('--')
def time_split(self, expand):
self.s.str.split('--', expand=expand)
class Dummies(object):
goal_time = 0.2
def setup(self):
self.s = Series(tm.makeStringIndex(10**5)).str.join('|')
def time_get_dummies(self):
self.s.str.get_dummies('|')
class Encode(object):
goal_time = 0.2
def setup(self):
self.ser = Series(tm.makeUnicodeIndex())
def time_encode_decode(self):
self.ser.str.encode('utf-8').str.decode('utf-8')
class Slice(object):
goal_time = 0.2
def setup(self):
self.s = Series(['abcdefg', np.nan] * 500000)
def time_vector_slice(self):
# GH 2602
self.s.str[:5]
| bsd-3-clause |
rsignell-usgs/notebook | ucsd_wms_test.py | 1 | 4178 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=2>
# Test WMS performance of joinExisting and FMRC aggregations from UCSD THREDDS
# <markdowncell>
# The [UCSD THREDDS server](http://hfrnet.ucsd.edu/thredds/catalog.html) has the largest archive of gridded HF Radar surface currents in the USA. But their services have been very frustrating to use because sometimes they are fast and sometimes very slow.
#
# Based on conversations with Tom Cook and Mark Otero, here's how the datasets are constructed:
#
# There are alway Matlab processes running, retrieving new HFRAdar data and as soon as a certain amount of data comes in, a gridded netcdf file is produced for a specific observation hour. As additional radar data comes in, the *data* in the file is updated, but the metadata stays the same. The file is updated by replacing the existing file with a new file of the same name.
#
# The files are scanned by a `joinExisting` aggregation scan and also by a `FMRC featureCollection` aggregation scan (accessible via the [FMRC aggregation catalog](http://hfrnet.ucsd.edu/thredds/fmrc-catalog.html))
#
# Below we test the WMS performance of the two aggregation techniques.
#
# We find that even though `FMRC` is not designed for this type of aggregation, it performs consistently better than the `joinExisting`.
#
# Why?
# <codecell>
from IPython.core.display import Image
import time
import pandas as pd
import datetime as dt
import numpy as np
import urllib
# <codecell>
%matplotlib inline
# <codecell>
t = range(60)
# <codecell>
bbox=[-75.413, -70.1733582764, 37.586, 41.68049] # [lonmin lonmax latmin latmax]
# <codecell>
wms1='http://hfrnet.ucsd.edu/thredds/wms/HFRNet/USEGC/6km/hourly/RTV?'
wms_query='LAYERS=surface_sea_water_velocity&ELEVATION=0&TIME=2015-03-11T00%3A00%3A00.000Z&TRANSPARENT=true&STYLES=vector%2Frainbow&CRS=EPSG%3A4326&COLORSCALERANGE=0%2C0.5&NUMCOLORBANDS=20&LOGSCALE=false&SERVICE=WMS&VERSION=1.1.1&REQUEST=GetMap&EXCEPTIONS=application%2Fvnd.ogc.se_inimage&FORMAT=image%2Fpng&SRS=EPSG%3A4326&'
wms_bbox = 'BBOX=%.6f,%.6f,%.6f,%.6f&WIDTH=768&HEIGHT=600' % (bbox[0],bbox[2],bbox[1],bbox[3])
wms_get_map1 = wms1 + wms_query + wms_bbox
print wms_get_map1
# <codecell>
wms2='http://hfrnet.ucsd.edu/thredds/wms/USEGC/RTV/6km/HFRADAR,_US_East_and_Gulf_Coast,_6km_Resolution,_Hourly_RTV_best.ncd?'
wms_get_map2 = wms2 + wms_query + wms_bbox
print wms_get_map2
# <codecell>
indx=[]
dti1=[]
dti2=[]
for i in t:
time.sleep(60)
box= bbox + np.random.uniform(-.1,.1,size=4)
wms_bbox = 'BBOX=%.6f,%.6f,%.6f,%.6f&WIDTH=768&HEIGHT=600' % (box[0],box[2],box[1],bbox[3])
wms_get_map1 = wms1 + wms_query + wms_bbox
indx.append(dt.datetime.now())
time0=time.time()
urllib.urlretrieve(wms_get_map1, 'foo.png')
dti1.append(time.time()-time0)
wms_get_map2 = wms2 + wms_query + wms_bbox
time0=time.time()
urllib.urlretrieve(wms_get_map2, 'foo.png')
dti2.append(time.time()-time0)
# <codecell>
t1 = pd.Series(index=indx,data=dti1)
t1.plot(figsize=(12,4),marker='o')
# <codecell>
t2 = pd.Series(index=indx,data=dti2)
t2.plot(figsize=(12,4),marker='o')
# <markdowncell>
# The configuration of the `joinExisting` is as follows:
#
# ```xml
# <dataset name="HFRADAR, US East and Gulf Coast, 2km Resolution, Hourly RTV"
# ID="HFRNet/USEGC/2km/hourly/RTV" urlPath="HFRNet/USEGC/2km/hourly/RTV">
# <metadata>
# <documentation type="Summary">HFRADAR, US East and Gulf Coast,
# 2km Resolution, Hourly Combined Total Vectors (RTV)</documentation>
# </metadata>
# <netcdf xmlns="http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2">
# <aggregation dimName="time" type="joinExisting" recheckEvery="10 min">
# <scan location="/exports/hfradar/hfrnet/hfrtv/USEGC" subdirs="true"
# olderThan="2 min"
# regExp=".*[0-9]{12}_hfr_usegc_2km_rtv_uwls_SIO\.nc$"/>
# </aggregation>
# </netcdf>
# </dataset>
# ```
# with the `threddsConfig.xml` cache settings like this:
# ```xml
# <AggregationCache>
# <scour>-1 hours</scour>
# </AggregationCache>
# ```
| mit |
jblackburne/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 86 | 4092 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
k_means_cluster_centers = np.sort(k_means.cluster_centers_, axis=0)
mbk_means_cluster_centers = np.sort(mbk.cluster_centers_, axis=0)
k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for k in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
yaukwankiu/armor | pattern.py | 1 | 145731 | # -*- coding: utf-8 -*-
# defining the basic object we will be working with
# adapted from :
# /media/KINGSTON/ARMOR/ARMOR/python/weatherPattern.py ,
# /media/KINGSTON/ARMOR/ARMOR/python/clustering.py ,
# /media/KINGSTON/ARMOR/2013/pythonJan2013/basics.py
# Yau Kwan Kiu, Room 801, 23-1-2013
##############################################################################################
#
#==== imports ================================================================================
# some of the stuff were moved to defaultParameters.py
import copy
import time, datetime
import pickle
import struct
import os
import re
import numpy
import numpy as np
import numpy.ma as ma
#import matplotlib
import matplotlib.pyplot as plt
#import scipy.misc.pilutil as smp
#import numpy.fft as fft
#import shutil
#import sys
from copy import deepcopy
try:
from scipy import signal
from scipy import interpolate
except ImportError:
#print "Scipy not installed"
pass
#==== setting up the global parameters========================================================
import defaultParameters as dp # to keep a link 2014-03-12
import misc # to keep a link 2014-03-12
from defaultParameters import * # bad habit but all these variables are prefixed with "default"
# or at least i try to make them to
from misc import * # same - default functions
import colourbarQPESUMS # the colourbars for the Central Weather Bureau
import colourbarQPESUMSwhiteBackground # the same as above, with white backgrounds
#==== defining the classes ===================================================================
class DBZ(object): #python 2.7 (?) new style class, subclassing object
"""module predecessors: basics.py; weatherPattern.py
NOTE: a DBZ object can be loaded from data or generated in run time (e.g. by translation, or
other operations.) There is flexibility in this. In particular, the object is instantiated before
its values loaded (either from file or from other computations).
attributes (some to be defined at __init__, some afterwards):
DBZ.name - a string, the name of the instance, default = something like "DBZ20120612.0200"
DBZ.matrix - a numpy.ma.core.MaskedArray object
DBZ.datatime - a string like "20120612.0200"
DBZ.dataPath - a string like "../data/dbz20120612.0200.dat"
can be relative (preferred) or absolute
DBZ.outputPath - a string like "../data/dbz20120612.0200.dat"
can be relative (preferred) or absolute
DBZ.inputType - a string to record the type of input file, most common is "txt",
which should be 2-dim arrays in text, separated by " " and "\n",
readable by numpy or matlab
- convention: first row of data = bottom row of pixels
DBZ.image - I am not going to define this yet, since matplotlib.pyplot is pretty fast
DBZ.imagePath - a string like "../data/dbz20120612.0200.png"
can be relative (preferred) or absolute
default = "" (undefined)
DBZ.dt - time interval from the previous image (default=1; how about 10mins = 1/6 hour??)
DBZ.dy - grid size, latitudinal, in km (default =1; how about 0.0125 degree = how many kms?)
DBZ.dx - grid size, longitudinal, in km (same as above)
DBZ.timeStamp - time stamp when the object was created
DBZ.verbose - whether print out a lot of stuff when we work with this object
#################################################################
# DBZ.inputFolder - a string, self evident # <-- not used yet,
# DBZ.outputFolder - ditto # perhaps not here
# DBZ.outputFolderForImages - ditto #
#################################################################
DBZ.database - a string, pointing to the database, somehow, for future
methods:
DBZ.load - load into DBZ.matrix
DBZ.save
DBZ.saveImage
DBZ.printToScreen
use:
>>> from armor import pattern
>>> a = pattern.DBZ(dataTime="20120612.0200",name="", dt=1, dx=1, dy=1, dataPath="", imagePath="")
>>> a.load()
>>> a.printToScreen()
>>> import numpy as np
>>> import armor
>>> import armor.pattern as pattern
>>> dbz=pattern.DBZ
>>> a = dbz('20120612.0300')
DBZ20120612.0300initialised. Use the command '___.load()' to load your data, and '__.printToScreen()' to print it to screen.
>>> b = dbz('20120612.0330')
DBZ20120612.0330initialised. Use the command '___.load()' to load your data, and '__.printToScreen()' to print it to screen.
>>> a.load()
>>> b.load()# -*- coding: utf-8 -*-
>>> c=a-b
DBZ20120612.0300_minus_DBZ20120612.0330initialised. Use the command '___.load()' to load your data, and '__.printToScreen()' to print it to screen.
>>> c.show()
>>> d=a*b
DBZ20120612.0300_times_DBZ20120612.0330initialised. Use the command '___.load()' to load your data, and '__.printToScreen()' to print it to screen.
>>> d.show()
>>>
"""
def __init__(self, dataTime="NoneGiven", matrix=-999, name="", dt=1, dx=1, dy=1,\
dataPath="",outputPath ="",imagePath="",\
cmap=defaultCmap, vmin=-20, vmax=100, coordinateOrigin="default",\
coastDataPath="", relief100DataPath='', relief1000DataPath='',\
relief2000DataPath='', relief3000DataPath='',\
lowerLeftCornerLatitudeLongitude ='',\
upperRightCornerLatitudeLongitude ='',\
database="",
allocateMemory=True,
imageTopDown="",
missingDataThreshold="", #added 2014-02-20
imageDataFolder="", #2014-09-15
verbose=False):
self.timeStamp = str(int(time.time()))
"""
Notes:
1. cmap = colourbar of the dbz plot, need to find out how to plot it with
CWB's colour scheme as specified in the modules colourbarQPESUMS
and colourbarQPESUMSwhiteBackground
2. coordinateOrigin: normally either place at the centre of the picture
or at Taichung Park
(24.145056°N 120.683329°E)
which translates to
(492, 455) in our 881x921 grid
reference:
http://zh.wikipedia.org/wiki/%E8%87%BA%E4%B8%AD%E5%85%AC%E5%9C%92
/media/KINGSTON/ARMOR/2013/python/testing/test104/test104.py
"""
########
#
if name == "":
name = "DBZ" + dataTime
if type(matrix)==type(-999): # if matrix not given,
# the following line is replaced 2013-09-07 due to memory waste
#matrix = ma.zeros((defaultHeight, defaultWidth)) # initialise with zeros
matrix = ma.zeros((1,1))
matrix.mask = False
matrix.fill_value = -999 # -999 for missing values always
if isinstance(matrix, ma.MaskedArray):
matrix.fill_value = -999
if missingDataThreshold=="":
missingDataThreshold = dp.defaultMissingDataThreshold #added 2014-02-20
if isinstance(matrix, np.ndarray) and not isinstance(matrix, ma.MaskedArray):
matrix = matrix.view(ma.MaskedArray)
matrix.mask = (matrix<missingDataThreshold )
matrix.fill_value = -999
if dataPath =="":
dataPath =dp. defaultInputFolder + "COMPREF." + dataTime +".dat"
if outputPath =="":
outputPath = dp.defaultOutputFolder + name + '_'+ self.timeStamp + ".dat"
if imagePath =="":
imagePath = dp.defaultOutputFolderForImages + name + '_'+self.timeStamp + ".png"
if coastDataPath == "":
coastDataPath = dp.defaultInputFolder + "taiwanCoast.dat"
if relief100DataPath == "":
relief100DataPath = dp.defaultInputFolder + "relief100.dat"
if relief1000DataPath == "":
relief1000DataPath = dp.defaultInputFolder + "relief1000.dat"
if relief2000DataPath == "":
relief2000DataPath = dp.defaultInputFolder + "relief2000.dat"
if relief3000DataPath == "":
relief3000DataPath = dp.defaultInputFolder + "relief3000.dat"
if lowerLeftCornerLatitudeLongitude =="":
lowerLeftCornerLatitudeLongitude = dp.defaultLowerLeftCornerLatitudeLongitude
if upperRightCornerLatitudeLongitude=="":
upperRightCornerLatitudeLongitude = dp.defaultUpperRightCornerLatitudeLongitude
if imageTopDown=="":
imageTopDown = dp.defaultImageTopDown
if database =="": # an extra parameter not yet used
database = dp.defaultDatabase
if imageDataFolder =="":
imageDataFolder= dp.defaultImageDataFolder
###############################################################################
# if matrix shape = (881, 921) then by default the origin at Taichung Park
# (24.145056°N 120.683329°E)
# or (492, 455) in our grid
# else the centre is the origin by default
###############################################################################
if coordinateOrigin == "default": #default
if matrix.shape == (881, 921) or matrix.size<10: # hack
coordinateOrigin = (492, 455)
else:
coordinateOrigin = (matrix.shape[0]//2, matrix.shape[1]//2)
elif coordinateOrigin == "centre" or coordinateOrigin=="center":
coordinateOrigin = (matrix.shape[0]//2, matrix.shape[1]//2)
elif (coordinateOrigin == 'Taichung' or \
coordinateOrigin == 'Taichung Park' or\
coordinateOrigin == 'taichungpark') and matrix.shape==(881,921):
coordinateOrigin = (492,455)
#coordinateOrigin = (0,0) # switch it off - will implement coordinate Origin later
if verbose:
print "------------------------------------------------------------------"
print "armor.pattern.DBZ:\nname, dt, dx, dy, dataPath, imagePath ="
print name, dt, dx, dy, dataPath, imagePath
#
########
self.matrix = matrix
self.dataTime = dataTime
self.name = name
self.dt = dt #retrospective
self.dx = dx #grid size
self.dy = dy
self.outputFolder= defaultOutputFolder
self.dataPath = dataPath
self.outputPath = outputPath
self.imagePath = imagePath
self.coastDataPath = coastDataPath
self.relief100DataPath = relief100DataPath
self.relief1000DataPath = relief1000DataPath
self.relief2000DataPath = relief2000DataPath
self.relief3000DataPath = relief3000DataPath
self.lowerLeftCornerLatitudeLongitude = lowerLeftCornerLatitudeLongitude
self.upperRightCornerLatitudeLongitude = upperRightCornerLatitudeLongitude
self.database = database
self.cmap = cmap
self.vmin = vmin # min and max for makeImage()
self.vmax = vmax
self.coordinateOrigin = coordinateOrigin
self.O = self.coordinateOrigin #alise, no guarentee
self.imageTopDown = imageTopDown
self.missingDataThreshold = missingDataThreshold
self.verbose = verbose
#self.imageDataFolder = imageDataFolder #commented out for now, don't want to set this attribute until we are actually using it, i.e. with the relevant harddisc plugged in.
#self.features = 0 # initialise
#self.matrix_backups = [] # for storage
#if verbose:
# print(self.name + "initialised. Use the command '___.load()' to load your data, " +\
# "and '__.printToScreen()' to print it to screen.")
#################################################################################
# basic operator overloads
def __call__(self, i=-999, j=-999, display=False):
if i ==-999 and j ==-999:
height, width = self.matrix.shape
h = int(height**.5 /2)
w = int(width**.5 /2)
print self.matrix.filled().astype(int)[height//2-h:height//2+h,\
width//2-w: width//2+w]
return self.matrix.filled().astype(int)
else:
"""
returns interpolated value
"""
arr= self.matrix
i0 = int(i)
j0 = int(j)
i1 = i0 + 1
j1 = j0 + 1
i_frac = i % 1
j_frac = j % 1
f00 = arr[i0,j0]
f01 = arr[i0,j1]
f10 = arr[i1,j0]
f11 = arr[i1,j1]
interpolated_value = (1-i_frac)*(1-j_frac) * f00 + \
(1-i_frac)*( j_frac) * f01 + \
( i_frac)*(1-j_frac) * f10 + \
( i_frac)*( j_frac) * f11
if display:
print i_frac, j_frac, f00, f01, f10, f11
return interpolated_value
def __add__(self, DBZ2):
"""defining the addition of two pattern.DBZ objects
c.f. http://docs.python.org/release/2.5.2/ref/numeric-types.html
can move to CUDA in the future
"""
return DBZ(dataTime=self.dataTime, matrix=self.matrix+DBZ2.matrix,\
name=self.name+"_plus_"+DBZ2.name, \
dt=self.dt, dx=self.dx, dy=self.dy,\
dataPath =self.outputPath+"_plus_"+DBZ2.name+".dat",\
outputPath=self.outputPath+"_plus_"+DBZ2.name+".dat",\
imagePath =self.imagePath +"_plus_"+DBZ2.name+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
def __sub__(self, DBZ2):
"""defining the subtraction of two pattern.DBZ objects
c.f. http://docs.python.org/release/2.5.2/ref/numeric-types.html
can move to CUDA in the future
"""
return DBZ(dataTime=self.dataTime, matrix=self.matrix-DBZ2.matrix,\
name=self.name+"_minus_"+DBZ2.name, \
dt=self.dt, dx=self.dx, dy=self.dy,\
dataPath =self.outputPath+"_minus_"+DBZ2.name+".dat",\
outputPath=self.outputPath+"_minus_"+DBZ2.name+".dat",\
imagePath =self.imagePath +"_minus_"+DBZ2.name+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
def __mul__(self, M):
""" defining multiplication
c.f. http://docs.python.org/release/2.5.2/ref/numeric-types.html
can move to CUDA in the future
"""
if type(M)==type(1) or type(M)==type(1.1) or type(M)==type(self.matrix) :
matrix = self.matrix * M
name=self.name+"_times_"+ str(M)
if type(M)==type(self):
matrix = self.matrix * M.matrix
name=self.name+"_times_"+ M.name
return DBZ(dataTime=self.dataTime, matrix=matrix,\
dt=self.dt, dx=self.dx, dy=self.dy,\
name =name,
dataPath =self.outputPath+"_times_"+str(M)+".dat",\
outputPath=self.outputPath+"_times_"+str(M)+".dat",\
imagePath =self.imagePath +"_times_"+str(M)+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
def __rmul__(self, M):
""" defining multiplication on the right
c.f. http://docs.python.org/release/2.5.2/ref/numeric-types.html
can move to CUDA in the future
"""
if type(M)==type(1) or type(M)==type(1.1) or type(M)==type(self.matrix) :
matrix = self.matrix * M
name=self.name+"_times_"+ str(M)
if type(M)==type(self):
matrix = self.matrix * M.matrix
name=self.name+"_times_"+ M.name
return DBZ(dataTime=self.dataTime, matrix=matrix,\
dt=self.dt, dx=self.dx, dy=self.dy,\
name =name,
dataPath =self.outputPath+"_times_"+str(M)+".dat",\
outputPath=self.outputPath+"_times_"+str(M)+".dat",\
imagePath =self.imagePath +"_times_"+str(M)+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
# end basic operator overloads
##################################
############################################################
# basic i/o's
def load(self, toInferPositionFromShape=True, *args, **kwargs):
"""
DBZ.load - load into DBZ.matrix
adapted from basics.readToArray(path)
"""
if self.dataPath.endswith('.png') or self.dataPath.endswith('.jpg'):
return self.loadImage() #2014-07-26
try: #if it's text
m = np.loadtxt(self.dataPath)
except ValueError: # try to load with the binary option
m = self.loadBinary(**kwargs)
except IOError:
m = self.loadImage(*args, **kwargs).matrix
self.matrix = ma.array(m)
# setting the mask
self.matrix.fill_value = -999 # -999 for missing values
# self.matrix.fill_value = -20.1 # -20 for missing values
self.matrix.mask = (m < self.missingDataThreshold) # smaller than -20 considered no echo
# 1 March 2013
# converted to -40, 12 september 2013
# converted to missingDataThreshold, 16 september 2013
# anything below " missingDataThreshold is marked as "masked"
# changed to (m < self.missingDataThreshold) , 20 feb 2014
if toInferPositionFromShape: # 26 March 2014
# determining the type/position of the grid by the size of the grid (heuristics)
if self.matrix.shape == (150,140):
self.lowerLeftCornerLatitudeLongitude = defaultWRFLowerLeftCornerLatitudeLongitude
self.upperRightCornerLatitudeLongitude = defaultWRFUpperRightCornerLatitudeLongitude
##
# THE FOLLOWING IS SKIPPED TO SAVE MEMORY
# loading coastal data
#try:
# self.coastData = np.loadtxt(self.coastDataPath)
#except:
# print "Cannot load coast data from the path: ", self.coastDataPath
return self
def loadBinary(self, height=201, width=183):
"""
codes from Dr. C. Y. Feng, 19 May 2014
e.g. self.dataPath )
a = pattern.DBZ(dataPath= "/media/TOSHIBA EXT/ARMOR/data/1may2014/RADARCV/COMPREF.20140501.1200.0p03.bin"
"""
dataFile=open(self.dataPath,'rb')
data = []
for i in range(height):
dataV = struct.unpack("!%df" % width, dataFile.read(4*width))
data.append(list(dataV))
dataFile.close()
self.matrix = ma.array(data)
return self.matrix
def loadImage(self, *args, **kwargs):
# to load an image into matrix (jpg/png), if it's not grey-scale make it so
from basicio import loadImage
if not hasattr(self, 'imageDataFolder'):
self.imageDataFolder = dp.defaultImageDataFolder
return loadImage.loadImage(a=self, inputFolder=self.imageDataFolder, *args, **kwargs)
def loadCoast(self):
try:
self.coastData = np.loadtxt(self.coastDataPath)
except:
self.coastData = np.loadtxt(defaultTaiwanReliefDataFolder + 'taiwanCoast.dat') #fallback
def load100(self):
self.coastData = np.loadtxt(self.relief100DataPath)
def load1000(self):
self.coastData = np.loadtxt(self.relief1000DataPath)
def load2000(self):
self.coastData = np.loadtxt(self.relief2000DataPath)
def load3000(self):
self.coastData = np.loadtxt(self.relief3000DataPath)
def toArray(self):
"""convert return a normal array filled with -999 for missing values for other uses
"""
return ma.filled(self.matrix)
def save(self, outputPath=""):
"""
* We convert the masked array into a standard array with masked data filled by -999
* adapted from basics.writeArrayToTxtFile(arr, path, as_integer=False):
if as_integer:
np.savetxt(path, arr, fmt='%.0f') # 0 decimal place
else:
np.savetxt(path, arr, fmt='%.2f') # two decimal places as default
"""
if outputPath =="":
outputPath = self.outputPath
np.savetxt(outputPath, self.toArray())
def constructFromFunction(self, func, funcParameters={},
origin="", height="", width="", newName="",
takeRealPart=False, takeImagPart=False, resetVmaxmin=True):
"""
to construct a matrix from a function
added 2014-04-09
"""
# setting the coordinate origin
if origin == "":
origin = self.coordinateOrigin
else:
self.coordinateOrigin = origin
i0, j0 = origin
# setting an empty matrix
if hasattr(self, "matrix"):
self.matrix *= 0
height, width = self.matrix.shape
else:
if height == "":
height = dp.defaultHeight
if width == "":
width = dp.defaultWidth
self.matrix = np.zeros((height, width))
# computing
X, Y = np.meshgrid(range(width), range(height))
I, J = Y, X
Z = func(I-i0, J-j0, **funcParameters)
if takeRealPart:
self.fullMatrix = Z
Z = np.real(Z)
if takeImagPart:
self.fullMatrix = Z
Z = np.imag(Z)
self.matrix = Z
if newName != "":
self.name = newName
if resetVmaxmin:
self.vmax = Z.max()
self.vmin = Z.min()
#del self.dataPath
return Z
def saveMatrix(self, *args, **kwargs):
""" alias for self.save()
"""
return self.save(*args, **kwargs)
def makeImage(self, matrix="", vmin=99999, vmax=-99999, cmap="", title="",\
showColourbar=True, closeAll=True,
useSubplot=False,
*args, **kwargs):
"""
requires: matplotlib
to make the plot before you save/print it to screen
*adapted from basics.printToScreen(m,cmap='gray'):
which was in turn adapted from stackoverflow:
http://stackoverflow.com/questions/7875688/how-can-i-create-a-standard-colorbar-for-a-series-of-plots-in-python
def printToScreen(m,cmap='gray'):
fig, axes = plt.subplots(nrows=1, ncols=1)
# The vmin and vmax arguments specify the color limits
im = axes.imshow(m, vmin=-20, vmax=100, cmap=cmap)
cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
fig.colorbar(im, cax=cax)
plt.show()
!!! TO DO: FIX THE AXES !!!
"""
if matrix=="":
matrix = self.matrix
if vmin == 99999 or vmin=="":
vmin = self.vmin
if vmax == -99999 or vmax=="":
vmax = self.vmax
else:
if vmin == 99999 or vmin=="":
vmin = matrix.min()
if vmax == -99999 or vmax=="":
vmax = matrix.max()
#print "vmax, vmin:", vmax, vmin #debug
if title =="":
title = self.name
if cmap == "":
cmap = self.cmap
# clear the canvass
if closeAll:
#plt.clf()
plt.close()
# make the image
if not self.imageTopDown:
#matrix = np.flipud(matrix)
imshowOrigin = 'lower'
else:
imshowOrigin = 'upper'
if useSubplot:
fig, axes = plt.subplots(nrows=1, ncols=1)
im = axes.imshow(matrix, # or np.flipud(self.matrix)?
vmin=vmin, vmax=vmax, cmap=cmap, # The vmin and vmax arguments specify the color limits
origin=imshowOrigin, # 2013-10-15
)
if showColourbar :
cax = fig.add_axes([0.9, 0.1, 0.01, 0.8])
fig.colorbar(im,cax=cax)
else:
plt.imshow(matrix, vmin=vmin, vmax=vmax, cmap=cmap, origin=imshowOrigin) #2014-07-17
if showColourbar:
#plt.add_axes([0.9, 0.1, 0.01, 0.8])
plt.colorbar()
plt.title(title)
#plt.show() # wait, don't show!
def saveImage(self, imagePath="", matrix="",
#dpi=200,
dpi='default',
**kwargs):
#if matrix=="":
# matrix = self.matrix
if imagePath == "":
imagePath = self.imagePath
self.makeImage(matrix, **kwargs)
if dpi =="default":
plt.savefig(imagePath)
else:
plt.savefig(imagePath, dpi=dpi)
#def printToScreen(self, matrix="", cmap=""):
def printToScreen(self, block=False, *args, **kwargs): #2013-12-06
#self.makeImage(matrix=matrix, cmap=cmap)
self.makeImage(*args, **kwargs)
plt.show(block=block)
#def show(self, matrix="", cmap=""):
def show(self, *args, **kwargs): #2013-12-06
"""alias to printToScreen()
"""
#self.printToScreen(matrix=matrix, cmap=cmap)
self.printToScreen(*args, **kwargs)
def showWith(self, b, title1="", title2="", block=False, *args, **kwargs):
""" showing two charts together
minimalistic approach
"""
if title1 =="":
title1=self.name
if title2 =="":
title2 = b.name
plt.clf()
plt.subplot(121)
if self.imageTopDown:
imshowOrigin='upper'
else:
imshowOrigin='lower'
plt.imshow(self.matrix, vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, origin=imshowOrigin)
plt.title(title1)
plt.subplot(122)
if b.imageTopDown:
imshowOrigin='upper'
else:
imshowOrigin='lower'
plt.imshow(b.matrix, vmin=b.vmin, vmax=b.vmax, cmap=b.cmap, origin=imshowOrigin)
plt.title(title2)
plt.show(block=block)
def showWithFlip(self, cmap=""):
"""flip it upside down and show it
"""
self.matrix = np.flipud(self.matrix)
self.printToScreen(cmap=cmap)
def showWithCoast(self, matrix="", cmap='', intensity=9999):
#if matrix=="":
# matrix=self.matrix
try:
if self.showingWithCoast: # if already showing coast: do nothing
self.show(matrix=matrix)
return None # just show and go
except AttributeError: # if it didn't happen before: default = False
self.showingWithCoast = False # just do something
self.showingWithCoast = True
self.backupMatrix('chartWithoutCoast') #2014-05-22
if cmap != '':
self.cmap_backup = self.cmap
self.cmap = cmap
else:
self.cmap_backup = self.cmap
try:
if self.coastData == "" : print "haha" #test for existence
except AttributeError:
self.loadCoast()
print "\n... coast data loaded from ", self.coastDataPath, "for ", self.name
for v in self.coastData:
self.matrix[v[0], v[1]] = intensity
try:
self.matrix.mask[v[0], v[1]].mask = 0
except AttributeError: # AttributeError: 'numpy.float64' object has no attribute 'mask'
pass
#matrix=self.matrix #this line is commented out so that self.vmin and self.vmax are used in makeImage()
self.show(matrix=matrix)
def showWithoutCoast(self):
"""resetting
"""
self.showingWithCoast = False
self.cmap = self.cmap_backup
self.matrix = self.matrix_backups['chartWithoutCoast']
self.show()
def show2(self, cmap='', intensity=99999):
""" adding the coastline and then flip it
"""
try:
if self.showingWithCoast: # if already showing coast: do nothing
self.show()
return None # just show and go
except AttributeError: # if it didn't happen before: default = False
self.showingWithCoast = False # just do something
self.showingWithCoast = True
self.matrix_backup = self.matrix.copy()
if cmap != '':
self.cmap_backup = self.cmap.copy()
self.cmap = cmap
else:
self.cmap_backup = self.cmap
try:
if self.coastData == "" : print "haha" #test for existence
except AttributeError:
self.loadCoast()
print "\n... coast data loaded from ", self.coastDataPath, "for ", self.name
for v in self.coastData:
self.matrix[v[0], v[1]] = intensity
self.matrix = np.flipud(self.matrix)
self.printToScreen(cmap=cmap)
def show3(self):
"""alias
"""
self.showWithoutCoast()
def showInverted(self):
self.matrix = np.flipud(self.matrix)
self.printToScreen()
self.matrix = np.flipud(self.matrix)
def show0(self):
"""alias
"""
self.showInverted()
def show4(self):
"""alias
"""
self.showInverted()
def backupMatrix(self, name=""):
"""backing up self.matrix for analysis
paired with self.restoreMatrix()
"""
try:
self.backupCount += 1
if name =="":
name = self.backupCount
self.matrix_backups[name] = self.matrix.copy()
except AttributeError:
self.backupCount = 0
self.matrix_backups = {}
if name =="":
name = self.backupCount
self.matrix_backups[name] = self.matrix.copy()
def restoreMatrix(self, name =""):
"""see self.backupMatrix() for comments
"""
if name =="":
name = self.backupCount
self.matrix = self.matrix_backups[name].copy()
# end basic i/o's
############################################################
#############################################################
# new objects from old
def copy(self):
"""returning a copy of itself
9 March 2013
"""
return DBZ(dataTime =self.dataTime,
matrix =self.matrix.copy(),
name =self.name,
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath[:-4]+ '[copy]' + self.outputPath[-4:],
imagePath=self.imagePath[:-4] + '[copy]' + self.imagePath[-4:],
coastDataPath=self.coastDataPath,
relief100DataPath=self.relief100DataPath,
relief1000DataPath=self.relief1000DataPath,
relief2000DataPath=self.relief2000DataPath,
relief3000DataPath=self.relief3000DataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
lowerLeftCornerLatitudeLongitude = self.lowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude =self.upperRightCornerLatitudeLongitude,
imageTopDown=self.imageTopDown,
verbose =self.verbose)
def setMaxMin(self, vmax="", vmin=""):
if vmax=="":
self.vmax = self.matrix.max()
else:
self.vmax = vmax
if vmin=="":
self.vmin = self.matrix.min()
else:
self.vmin=vmin
def drawCross(self, i="", j="", radius=5, intensity=9999, newObject=True):
"""to draw a cross (+) at the marked point
"""
# to draw a list of crosses - added 2014-01-23
if isinstance(i, list):
if newObject:
a2 = self.copy()
else:
a2 = self
if j!="":
radius = j
for p, q in i: # i = list of points
a2 = a2.drawCross(i=p, j=q, radius=radius, intensity=intensity)
return a2
# codes before 2014-01-23
if i=="" or j=="":
i=self.coordinateOrigin[0]
j=self.coordinateOrigin[1]
matrix=self.matrix.copy()
matrix[i-radius:i+radius+1, j ] = intensity
matrix[i , j-radius:j+radius+1] = intensity
if newObject:
return DBZ(dataTime =self.dataTime,
matrix = matrix,
name =self.name + \
", cross at x,y=(%d,%d), radius=%d" %\
(j, i, radius),
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath[:-4]+ '_with_cross' + self.outputPath[-4:],
imagePath=self.imagePath[:-4]+ '_with_cross' + self.imagePath[-4:],
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
lowerLeftCornerLatitudeLongitude = self.lowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude =self.upperRightCornerLatitudeLongitude,
verbose =self.verbose)
else:
self.matrix = matrix
return self
def drawFrame(self, intensity=9999, newCopy=False):
"""
2013-11-08
"""
if newCopy:
a = self.copy() # no need for this i guess!!!
else:
a = self
a.matrix[ 0,:] = intensity
a.matrix[-1,:] = intensity
a.matrix[ :, 0] = intensity
a.matrix[ :,-1] = intensity
a.matrix.mask[ 0,:] = 0
a.matrix.mask[-1,:] = 0
a.matrix.mask[ :, 0] = 0
a.matrix.mask[ :,-1] = 0
return a
def drawCoast(self, intensity=9999, matrix="", newCopy=False, newObject=False):
"""
adapted from DBZ.show2()
"""
if newCopy or newObject:
a = self.copy() # no need for this i guess!!!
else:
a = self
if matrix =="":
matrix = a.matrix
try:
if a.coastData == "" : print "haha" #test for existence
except AttributeError:
a.loadCoast()
print "\n... coast data loaded from ", a.coastDataPath, "for ", a.name
for v in a.coastData:
matrix[v[0], v[1]] = intensity
matrix.mask[v[0], v[1]] = 0
return a
def recentreTaichungPark(self):
"""
2013-08-27
use:
a = pattern.a
a.showTaichungPark()
takes as input
attributes:
lowerLeftCornerLatitudeLongitude
upperRightCornerLatitudeLongitude
constants:
taichung park coordinates (24.145056°N 120.683329°E)
changes:
self.coordinateOrigin
self.O
returns:
grid square for taichung park
"""
#global taichungParkLatitude, taichungParkLongitude
height, width = self.matrix.shape
i0 = taichungParkLatitude #defined in defaultParameters.py
j0 = taichungParkLongitude
# the above two lines dont work, here's a hack fix
#import defaultParameters
#j0 = defaultParameters.taichungParkLongitude
#i0 = defaultParameters.taichungParkLatitude
i1, j1 = self.lowerLeftCornerLatitudeLongitude
i2, j2 = self.upperRightCornerLatitudeLongitude
i3 = 1.*(i0-i1)*height/(i2-i1) # (latitudeTCP-latLowerleft) * grid per latitude
j3 = 1.*(j0-j1)*width/(j2-j1) # ditto for longitude
self.coordinateOrigin = (i3,j3)
self.O = (i3,j3)
return i3, j3
def recentre(self):
"""alias for recentreTaichungPark(self)
"""
return self.recentreTaichungPark()
def recenter(self):
"""alias for recentreTaichungPark(self)
"""
return self.recentreTaichungPark()
def drawRectangle(self, bottom=0, left=0, height=100, width=100, intensity=9999, thickness=1, newObject=True):
""" return a copy with a rectangle on the image
"""
vmax = self.vmax
matrix = self.matrix.copy()
for i in range(bottom, bottom+height):
matrix[i , left:left+thickness] = intensity
#matrix[i , left] = intensity
matrix[i , left+width:left+width+thickness] = intensity
for j in range(left, left+width):
#matrix[bottom:bottom+2, j] = intensity
matrix[bottom, j:j+thickness] = intensity
matrix[bottom+height:bottom+height+thickness, j] = intensity
if newObject:
return DBZ(dataTime =self.dataTime,
matrix = matrix,
name =self.name + \
", rectangle at x,y=(%d,%d), width=%d, height=%d" %\
(left, bottom, width, height),
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
verbose =self.verbose)
else:
self.matrix = matrix
return self
def drawRectangleForValue(self, N=20, N2="", **kwargs):
""" to draw a rectanglular hull for all points with the given value range
"""
if N2 =="":
N2=N
indexMatrix = (self.matrix>=N) * (self.matrix<=N2)
locations = np.argwhere(indexMatrix)
if locations == []:
return self
else:
iMax = max([v[0] for v in locations])
iMin = min([v[0] for v in locations])
jMax = max([v[1] for v in locations])
jMin = min([v[1] for v in locations])
return self.drawRectangle(iMin, jMin, iMax-iMin, jMax-jMin)
def getRegionForValue(self, N=20, N2="", **kwargs):
""" to draw a rectanglular hull for all points with the given value range
"""
if N2 =="":
N2=N
indexMatrix = (self.matrix>=N) * (self.matrix<=N2)
locations = np.argwhere(indexMatrix)
if locations.tolist() == []:
return (-1,-1,0,0)
else:
iMax = max([v[0] for v in locations])
iMin = min([v[0] for v in locations])
jMax = max([v[1] for v in locations])
jMin = min([v[1] for v in locations])
return (iMin, jMin, iMax-iMin, jMax-jMin)
def getWindow(self, bottom=0, left=0, height=100, width=100):
"""return a dbz object, a window view of itself
"""
name = self.name +'_windowed' + '_bottom' + str(bottom) +\
'_left' + str(left) + '_height' + str(height) + '_width' + str(width)
matrix = self.matrix.copy()
matrix = matrix[bottom:bottom+height, left:left+width]
return DBZ(dataTime =self.dataTime,
matrix = matrix,
name = name,
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin = (height//2, width//2) , #hack
verbose =self.verbose)
def getRectangle(self, *args, **kwargs):
"""
alias
"""
return self.getWindow(*args, **kwargs)
def getRectangularHull(self, points):
"""2014-03-13"""
iMax = max(v[0] for v in points)
iMin = min(v[0] for v in points)
jMax = max(v[1] for v in points)
jMin = min(v[1] for v in points)
height = iMax-iMin
width = jMax-jMin
return (iMin, jMin, height, width)
def drawRectangularHull(self, points, newObject=False, **kwargs):
"""2014-03-13"""
rec = self.getRectangularHull(points)
self.drawRectangle(*rec, newObject=newObject, **kwargs)
def shiftMatrix(self,i,j):
"""shifting the array/dbz pattern; masking the edge
codes migrated from shiiba.py (now armor.shiiba.regression) to here
i = shift in axis-0 = going up
j = shift in axis-1 = going right
"""
#1. copy the matrix
matrix = self.matrix.copy()
#2. shift the matrix
matrix = np.roll(matrix, i,axis=0)
matrix = np.roll(matrix, j,axis=1)
#3. mask the edges
if i>0: # up
matrix.mask[ :i, : ] = 1 #mask the first (=bottom) i rows
if i<0: # down
matrix.mask[i: , : ] = 1 #mask the last (=top) rows; i<0
if j>0: # right
matrix.mask[ : , :j] = 1 #mask the first (=left) columns
if j<0: # left
matrix.mask[ : ,j: ] = 1 #mask the last (=right) columns
#4. return an armor.pattern.DBZ object
self_shifted_by_ij =DBZ(dataTime=self.dataTime, matrix=matrix,\
name=self.name+"shifted"+str((i,j)),\
dt=self.dt, dx=self.dx, dy=self.dy, \
dataPath =self.outputPath+"shifted"+str((i,j))+".dat",\
outputPath =self.outputPath+"shifted"+str((i,j))+".dat",\
imagePath =self.imagePath +"shifted"+str((i,j))+".png",\
database =self.database,\
cmap=self.cmap,
coordinateOrigin = (self.coordinateOrigin,\
self.coordinateOrigin),
verbose=self.verbose)
return self_shifted_by_ij
def shift(self, i, j):
"""alias for shiftMatrix()
"""
return self.shiftMatrix(i,j)
def smooth(self, ker=""):
"""
################################
# smoothing the image by convolution with a kernal
# uses SciPY
# return : a DBZ object, smoothed
# 8 March 2013
#################################
"""
if ker=="":
ker = 1./237. * np.array( [[1, 4, 7, 4, 1], # default kernel
[4,16,26,16, 4],
[7,26,41,26, 7],
[4,16,26,16, 4],
[1, 4, 7, 4, 1]])
phi0 = self.matrix.copy()
phi0.fill_value = -999999999
phi0 = signal.convolve(phi0.filled(),ker)
phi0 = ma.array(phi0, fill_value=-999, mask=(phi0<-80))
# cutting it down to size (881,921)
return DBZ(name=self.name+'smoothed', matrix =phi0[2:-2, 2:-2],
dt=self.dt, dx=self.dx, dy=self.dy,
dataPath =self.dataPath +'smoothed.dat',
outputPath=self.outputPath+'smoothed.dat',
imagePath =self.imagePath +'smoothed.dat',
coastDataPath=self.coastDataPath,
database=self.database,
cmap=self.cmap, vmin=self.vmin, vmax=self.vmax,
coordinateOrigin = self.coordinateOrigin,
verbose=self.verbose)
def coarser(self, scale=2):
"""
################################
# returning a coarser image by averaging 4 nearby points
#
# return : a DBZ object
# parameter "scale" not used yet
# 8 March 2013
# parameter "scale" implementation started on 12 march 2013
#################################
"""
phi = self.matrix.copy()
# trim if dimensions not even
height, width = phi.shape
horizontal = width//scale
vertical = height//scale
phi = phi[0:vertical*scale, 0:horizontal*scale] # trimming
# getting the shifted copies
# 0 1
# 2 3
phi.fill_value = -999999999
phiList = [] #work to be continued here (parameter "scale" implementation)
phi0 = phi[ ::scale, ::scale].flatten()
phi1 = phi[ ::scale,1::scale].flatten()
phi2 = phi[1::scale, ::scale].flatten()
phi3 = phi[1::scale,1::scale].flatten() # unfinished re: scale/averaging
phi_mean= ma.vstack([phi0, phi1, phi2, phi3])
phi_mean= ma.mean(phi_mean, axis=0)
phi_mean= phi_mean.reshape(vertical, horizontal)
# cutting it down to size (881,921)
return DBZ(name=self.name+'_coarser', matrix =phi_mean,
dt=self.dt, dx=self.dx, dy=self.dy,
dataPath =self.dataPath[:-4] +'_coarser' + self.dataPath[-4:],
outputPath=self.outputPath[:-4]+'_coarser' + self.outputPath[-4:],
imagePath =self.imagePath[:-4] +'_coarser' + self.imagePath[-4:],
#coastDataPath=self.coastDataPath,
database=self.database,
cmap=self.cmap, vmin=self.vmin, vmax=self.vmax,
coordinateOrigin = (self.coordinateOrigin[0] //scale,\
self.coordinateOrigin[1] //scale ) ,
verbose=self.verbose)
def coarser2(self):
""" like coarser() but returning a matrix of the same size, not smaller
do it later when i have time
algorithm:
to multiply self.matrix with a "diagonal" of matrix [[.5, .5],[.5,.5]]
on both left and right.
"""
height, width = self.matrix.shape
pass
def coarser3(self, scale=2):
"""
################################
# returning a coarser image by picking one out of 2x2 points
#
# return : a DBZ object
# parameter "scale" not used yet
# adapted from function "coarser", 25 Feb 2014
#################################
"""
phi = self.matrix.copy()
# trim if dimensions not even
height, width = phi.shape
horizontal = width//scale
vertical = height//scale
phi = phi[0:vertical*scale, 0:horizontal*scale] # trimming
# getting the shifted copies
# 0 1
# 2 3
phi.fill_value = -999999999
phiList = [] #work to be continued here (parameter "scale" implementation)
phi0 = phi[ ::scale, ::scale]
# cutting it down to size (881,921)
return DBZ(name=self.name+'coarser', matrix =phi0,
dt=self.dt, dx=self.dx, dy=self.dy,
dataPath =self.dataPath[:-4] +'_coarser' + self.dataPath[-4:],
outputPath=self.outputPath[:-4]+'_coarser' + self.outputPath[-4:],
imagePath =self.imagePath[:-4] +'_coarser' + self.imagePath[-4:],
coastDataPath=self.coastDataPath,
database=self.database,
cmap=self.cmap, vmin=self.vmin, vmax=self.vmax,
coordinateOrigin = (self.coordinateOrigin[0] //scale,\
self.coordinateOrigin[1] //scale ) ,
verbose=self.verbose)
def getWRFwindow(self, window=dp.COMPREF2WRFwindow):
if self.matrix.shape == (881,921):
a1 = self.getWindow(*window)
a1.name = self.name + "_to_WRF_window"
a1.outputFolder = self.outputFolder
a1.outputPath = self.outputPath[:-4] + "_to_WRF_window" + self.outputPath[-4:]
a1.coarser().coarser()
return a1
else:
return self
def getPrediction(self, C):
"""wrapping armor.shiiba.regression2.getPrediction
"""
from armor.shiiba import regression2
return regression2.getPrediction(C, self)
def predict(self, *args, **kwargs):
"""wrapping self.getPrediction for the moment
"""
return self.getPrediction(*args, **kwargs)
def advect(self, *args, **kwargs):
"""wrapping advection.semiLagrangian.interploate2 for the moment
"""
from armor.advection import semiLagrangian as sl
return sl.interpolate2(self, *args, **kwargs)
def flipud(self, newObject=True):
"""wrapping the function np.flipud
"""
if newObject:
a_flipud = self.copy()
else:
a_flipud = self
a_flipud.matrix = np.flipud(a_flipud.matrix)
return a_flipud
def fliplr(self, newObject=True):
if newObject:
a_fliplr = self.copy()
else:
a_fliplr = self
a_fliplr.matrix = np.fliplr(a_fliplr.matrix)
return a_fliplr
def threshold(self, threshold=0, type='lower'):
"""getting a threshold image of itself with mask
"""
matrix= self.matrix.copy()
name = self.name + " thresholded (" + str(type) + ")at " + str(threshold)
oldMask = matrix.mask.copy()
if type=='lower':
matrix.mask += (matrix < threshold)
else:
matrix.mask += (matrix > threshold)
a_thres = DBZ(dataTime =self.dataTime,
matrix =matrix,
name =name,
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath[:-4] + "_thresholded_" + str(threshold) + self.imagePath[-4:],
imagePath=self.imagePath[:-4] + "_thresholded_" + str(threshold) + self.imagePath[-4:],
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
verbose =self.verbose)
a_thres.oldMask = oldMask
return a_thres
def above(self, threshold=10):
"""
getting all the points above threshold
"""
a = self
a1 = a.copy()
a1.name = a.name + '_points_above_' + str(threshold)
a1.vmin = -.5
a1.vmax = 2
a1.matrix = (a.matrix >threshold)
return a1
def entropyGlobal(self, threshold=-999,
strata = [0, 10, 20, 25, 20, 35, 40, 45, 50, 55, 60, 65, 70, 75, 999],
display=False):
"""
http://docs.scipy.org/doc/scipy-dev/reference/generated/scipy.stats.entropy.html
scipy: from probability/frequency distribution to entropy
"""
from scipy.stats import entropy
if threshold != -999:
a = self.threshold(0)
arr = a.matrix
else:
a=self
arr = self.matrix
freqList =[]
N = len(strata)
for i in range(N-1):
m = (arr>=strata[i]) * (arr<strata[i+1])
freqList.append(m.sum())
freqList = np.array(freqList)
freqTotal = freqList.sum()
probArray = 1.* freqList / freqTotal
if display:
a.show(block=False)
return entropy(probArray)
def entropyLocal(self,
*args, **kwargs):
from . import analysis
return analysis.entropyLocal(a=self, *args, **kwargs)
def entropy(self, *args, **kwargs):
return self.entropyGlobal(*args, **kwargs)
def affineTransform(self, T=np.matrix([[1,0,0],[0,1,0]]), origin=""):
"""2013-10-17
"""
if origin =="":
origin = self.getCentroid()
T = np.matrix(T)
a2 = self.copy()
a2.name += '_affineTransformed'
from .geometry import transforms as tr
I, J = tr.IJ (a2.matrix)
I, J = tr.affine(I, J, T=T, origin=origin)
a2.matrix = tr.interpolation(a2.matrix, I, J)
a2.matrix = ma.array(a2.matrix, mask=(a2.matrix < self.missingDataThreshold))
#a2.setThreshold(0)
return a2
def momentNormalise(self, b,
useShiiba=False,
centre=(0,0), # shiiba parameter: the search window centre
searchWindowHeight=9, searchWindowWidth=9,
extraAngle = 0 # set it to np.pi if we want to add 180 degrees or pi to the rotation angle
):
"""2013-10-17
"""
from .geometry import transforms as tr
a = self
a.getCentroid()
a.getEigens()
b.getCentroid()
b.getEigens()
Mlin = tr.rotationMatrix( b.momentAngle+ extraAngle) * \
np.diag(b.eigenvalues / a.eigenvalues) **.5 * \
tr.rotationMatrix(-a.momentAngle )
if useShiiba:
x = a.shiiba(b, centre=centre,
searchWindowHeight=searchWindowHeight,
searchWindowWidth= searchWindowWidth,
)
shift = x['C'][2:6:3]+x['mn']
else:
shift = b.centroid-a.centroid
origin = a.centroid + shift
Maffine = np.hstack([Mlin, np.matrix(shift).T])
a2 = a.affineTransform(T=Maffine, origin=origin)
a2.name = a.name + '_normalised_to_' + b.name
if useShiiba:
a2.name+='_with_shiiba'
a2.shiibaResult = x
#################################
# storing the results
a2.Maffine = Maffine
try:
a.relativeAngle[b.name] = b.momentAngle + extraAngle - a.momentAngle
except:
a.relativeAngle = {b.name : b.momentAngle + extraAngle - a.momentAngle}
try:
a.momentRatios[b.name] = np.diag(b.eigenvalues / a.eigenvalues) **.5
except:
a.momentRatios = {b.name : np.diag(b.eigenvalues / a.eigenvalues) **.5}
#################################
# return value
return a2
def shiibaNormalise(self, b):
"""2013-10-17
"""
pass
def truncate(self, threshold=0, mode='below', newObject=True):
"""2013-10-20
truncate the image above/below the threshold
"""
if newObject:
a1 = self.copy()
else:
a1 = self
if mode == 'below':
a1.matrix = a1.matrix * (a1.matrix >=threshold )
elif mode == 'above':
a1.matrix = a1.matrix * (a1.matrix <= threshold )
return a1
def levelSet(self, value):
"""
DBZ object at the given value
"""
a1 = self.copy()
a1.name = self.name + '_level_set_' + str(value)
a1.matrix = (a1.matrix==value)
a1.vmax = 1
a1.vmin = -0.5
return a1
def laplacianOfGaussian(self, sigma=20):
"""
returns a dbz object
"""
from scipy import ndimage
a1 = self.copy()
a1.matrix = ndimage.filters.gaussian_laplace(self.matrix, sigma)
a1.matrix = ma.array(a1.matrix, fill_value=-999.)
a1.name = self.name + "laplacian-of-gaussian-sigma" + str(sigma)
a1.imagePath = self.imagePath[:-4] + "laplacian-of-gaussian-sigma" + str(sigma) + self.imagePath[-4:] #hack
a1.outputPath = self.outputPath[:-4] + "laplacian-of-gaussian-sigma" + str(sigma) + self.outputPath[-4:] #hack
mx = a1.matrix.max()
mn = a1.matrix.min()
a1.vmax = mx + (mx-mn)*0.2 # to avoid red top
a1.vmin = mn
return a1
# def laplacianOfGaussianMask(self, sigma=100):
# """
# returns a function from the domain to the interval [0, 1] in Real Numbers
# 2013-11-07
# """
# a1 = self.laplacianOfGaussian(sigma=sigma)
# m = a1.matrix
def gaussianFilter(self, sigma=20):
"""
returns a dbz object
"""
from scipy import ndimage
a1 = self.copy()
a1.matrix = ndimage.filters.gaussian_filter(self.matrix, sigma)
a1.matrix = ma.array(a1.matrix, fill_value=-999.)
a1.matrix.mask = np.zeros(a1.matrix.shape)
a1.name = self.name + "gaussian-sigma" + str(sigma)
a1.imagePath = self.imagePath[:-4] + "gaussian-sigma" + str(sigma) + self.imagePath[-4:] #hack
a1.outputPath = self.outputPath[:-4] + "gaussian-sigma" + str(sigma) + self.outputPath[-4:] #hack
#mx = a1.matrix.max()
#mn = a1.matrix.min()
#a1.vmax = mx + (mx-mn)*0.2 # to avoid red top # replaced by lines below 2014-02-20
#a1.vmin = mn
a1.matrix.mask = (a1.matrix< self.missingDataThreshold)
a1.vmax = self.vmax
a1.vmin = self.vmin
return a1
def gaussianMask(self, sigma=20, fraction=0.8):
"""
returns an array - a smooth mask valued from 0 to 1 built from the gaussian filter
- sigma = sigma
- fraction = fraction retained extending from a1.matrix.max() to a1.matrix.min()
"""
a1 = self.gaussianFilter(sigma=sigma)
a1max = a1.matrix.max()
a1min = a1.matrix.min()
a1min = a1min + (a1max-a1min)*(1-fraction)
a_mask = (a1.matrix - a1min) / (a1max-a1min)
a_mask *= (a_mask > 0)
self.mask = a_mask
return a_mask
def gaborFilter(self, sigma=20, phi=1./5, theta=0, **kwargs):
"""
wrapping armor.filter.gabor.gaborFilter
"""
from armor.filter import gabor
a2 = gabor.gaborFilter(self, sigma, phi, theta, **kwargs)
return a2 # a pair
def shortTermTrajectory(self, DBZstream="", key1="", key2="", radius=30, hours=6, timeInterval=3, verbose=True, drawCoast=True):
"""
plot the twelve-hourly-trajectory (+-6) within the DBZstream
with the DBZ object having the keywords key1 and key2
"""
a1 = self.copy() # dirty copy for computation
a2 = self.copy() # clean copy for display
try:
a2.load() #get a clean copy for display
#a2.show() #debug
except:
pass
if DBZstream =="":
DBZstream = self.DBZstream
a1.name = self.name + '\nshort term trajectory: +- ' + str(hours) + 'hours'
a2.name = self.name + '\nshort term trajectory: +- ' + str(hours) + 'hours'
ds = DBZstream # alias
a1.setThreshold(a1.matrix.min())
a1.getCentroid()
a2.matrix = a2.drawCross(a1.centroid[0], a1.centroid[1], radius=radius+5, intensity= 9999).matrix
for h in range(-hours, hours+1, timeInterval):
if h == 0:
continue # skip this
dataTime = self.timeDiff(hours=h)
D = [v for v in ds(dataTime) if key1 in v.name and key2 in v.name]
if verbose:
print 'time:', dataTime
if len(D) == 0:
continue # do nothing if none found
else:
r = max(radius - 2.5 *abs(h), 5)
#print 'radius:', rs #debug
#print '\n\n\n'
D = D[0] # choose the first one if there's any ambiguity
if verbose:
print 'DBZ object name:', D.name
D.load()
D.setThreshold(self.matrix.min()) #usually =0
D.getCentroid()
if verbose:
print D.name, "centroid:", D.centroid
# plot the centroid of D onto a1 with crosses of decreasing size, min=5
if h <= 0:
a2.matrix = a2.drawCross(D.centroid[0], D.centroid[1], r, intensity= 9999).matrix
else:
a2.matrix = a2.drawCross(D.centroid[0], D.centroid[1], r, intensity= 90).matrix
if drawCoast:
a2.drawCoast
if verbose:
a2.show()
return a2
def drawShiibaTrajectory(self, a2, L,
k=12, #default - 12 steps (or 2 hours if successive slides are 10 minutes apart)
*args, **kwargs
):
"""wrapper"""
from armor import analysis
a1_new = analysis.drawShiibaTrajectory(a1=self, a2=a2, L=L,
k=k, *args, **kwargs)
return a1_new
def fft(self):
"""2014-02-24
wrapping armor.filter.fourier
"""
from armor.filter import fourier
return fourier.fft(self)
def ifft(self):
"""2014-02-24
wrapping armor.filter.fourier
"""
from armor.filter import fourier
return fourier.ifft(self)
def sigmoid(self, L=""):
"""2014-02-24
supplementary function to smooth out the extremities of the fourier transforms
wrapping armor.misc.sigmoid"""
from armor import misc
if L=="":
L = self.vmax - self.vmin
self.matrix = misc.sigmoid(self.matrix, L=L)
self.vmax = 1.
self.vmin = 0.
def powerSpec(self, *args, **kwargs):
"""
moved to armor/analysis.py 2014-07-04
updated 2014-07-03
including the new 3dplotting function from lin yen ting
armor.graphics.spectrum3d
new pipeline:
WRF/RADAR -> response layers for various sigmas -> 1. max spec map
2. max internsity map
3. sigma ranges
-> 1. 3D max spec chart
2. 3D total spec chart
"""
from armor import analysis
return analysis.powerSpec(self, *args, **kwargs)
def powerSpecTest0709(self ,*args, **kwargs):
from armor import analysis
return analysis.powerSpecTest0709(self, *args, **kwargs)
# end new objects from old
#############################################################
############################################################
# functions on object
def datetime(self, T="", dh=0):
"""
2013-11-28
get the datetime object
dH - diff in number of hours
"""
import datetime
if T =="":
T = self.dataTime # '20120612.0100'
dt = datetime.timedelta(1./24 * dh)
dd = re.findall(r'\d\d', T)
year = int(dd[0]+dd[1])
month = int(dd[2])
day = int(dd[3])
hour = int(dd[4])
minute = int(dd[5])
return datetime.datetime(year, month, day, hour, minute) + dt
def getDataTime(self, T):
"""
input:
T - a datetime.datetime object
output:
a dataTime string in the pattern.DBZ format
effect:
none
"""
dataTime = str(T.year) + ("0"+str(T.month))[-2:] + ("0"+str(T.day))[-2:] + "." + \
("0"+str(T.hour))[-2:] + ("0"+str(T.minute))[-2:]
return dataTime
def setDataTime(self, T):
"""
input:
T - a datetime.datetime object
output:
none
effect:
resetting self.dataTime to T
"""
dataTime = str(T.year) + ("0"+str(T.month))[-2:] + ("0"+str(T.day))[-2:] + "." + \
("0"+str(T.hour))[-2:] + ("0"+str(T.minute))[-2:]
self.dataTime = dataTime
def volume(self, threshold=-999):
""" return the volume above the threshold
"""
m = self.matrix.copy()
m *= (m >= threshold)
vol = m.sum()
return vol
def count(self, threshold=-999):
""" return the number of points above the threshold
"""
m = self.matrix
c = (m >= threshold).sum()
return c
def getCentroid(self):
"""
2013-10-17
"""
from .geometry import transforms as tr
self.centroid = tr.getCentroid(self.matrix)
return self.centroid
def getEigens(self):
"""2013-10-17
get the momentMatrix, eigenvalues and eigenvectors, momentAngle
store them as attributes
and return the eigenvalues and eigenvectors
small eigenvalue goes first (small eigenvalue-> i(=y)-axis)
"""
self.getCentroid()
from .geometry import transforms as tr
self.momentMatrix = tr.getMomentMatrix(self.matrix)
self.eigenvalues, self.eigenvectors = tr.getAxes(self.momentMatrix)
v0 = self.eigenvectors[:,0]
v1 = self.eigenvectors[:,1]
self.momentAngle = np.arctan(1.*v0[1]/v0[0])
return self.eigenvalues, self.eigenvectors
def getRelativeAngle(self, b="", returnType="radian", resultInFirstQuadrant=True, threshold=0):
"""2014-08-04
to get the relative angle between the axes
"""
arr1 = self.matrix.copy() #backup
self.matrix = self.threshold(threshold).matrix
#if not hasattr(self,'eigenvectors'):
# self.getEigens()
self.getEigens()
###
if b=="":
cos = (self.eigenvectors[0,0])
sin = (self.eigenvectors[1,0])
angle = np.arccos(cos)
if sin < 0 :
angle = 2 * np.pi -angle
self.matrix = arr1
else:
arr2 = b.matrix.copy()
b.matrix = b.threshold(threshold).matrix
b.getEigens()
###
#cos = (self.eigenvectors[0,:] * b.eigenvectors[0,:]).sum()
cos = (self.eigenvectors[:,0] * b.eigenvectors[:,0]).sum() #2014-10-29
angle = np.arccos(cos)
if resultInFirstQuadrant and angle > np.pi/2: #2014-10-29
angle = np.pi - angle
self.matrix = arr1
b.matrix = arr2
if returnType == "deg" or returnType=="degree":
angle = angle / np.pi * 180
return angle
def getAspectRatios(self, b, threshold=0):
arr1 = self.matrix.copy()
arr2 = b.matrix.copy()
self.matrix = self.threshold(threshold).matrix
b.matrix = b.threshold(threshold).matrix
###
self.getEigens()
b.getEigens()
###
r0 = (self.eigenvalues[0] / b.eigenvalues[0]) **.5
r1 = (self.eigenvalues[1] / b.eigenvalues[1]) **.5
self.matrix = arr1
b.matrix = arr2
return np.array([r0, r1])
def cov(self, dbz2):
"""wrapping the ma.cov function: covariance between two images
"""
phi0 = self.matrix.flatten()
phi1 = dbz2.matrix.flatten()
cov = ma.cov(phi0, phi1)
return cov
def corr(self, dbz2):
"""wrappig the ma.corrcoef function: correlation between two images
"""
phi0 = self.matrix.flatten()
phi1 = dbz2.matrix.flatten()
corr = ma.corrcoef(phi0, phi1)
if (not isinstance(corr, float)) and (not isinstance(corr,int)):
corr = corr[0,1] # return a number
return corr
def regress(self, dbz2):
"""wrapping lstsq
http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html
"""
arr1 = self.matrix.flatten()
arr2 = dbz2.matrix.flatten()
mask = arr1.mask + arr2.mask
#print "(1-mask).sum()",(1-mask).sum() #debug
arr1.mask = mask
arr2.mask = mask
arr1 = arr1.compressed()
arr2 = arr2.compressed()
arr1 = np.vstack([np.ones(len(arr1)), arr1,]).T
#debug
#print "arr1.shape, arr2.shape", arr1.shape, arr2.shape
x, residuals, rank, sing = np.linalg.lstsq(arr1, arr2)
return x, residuals
def localCov(self, dbz2, windowSize=7):
"""plotting the local covariance of two dbz patterns
a slow version of the function
>>> test.tic() ; x=a.localCov(b) ; test.toc()
*************************
time spent: 4091.93978906
>>> x
>>> xm=x.matrix
>>> xm.min()
-1.0000000000000002
>>> xm.max()
1.0000000000000002
>>> xm.mean()
0.21721107449067339
>>> x.name = 'local correlation: dbz20120612.0200 - 0210'
>>> x.outputPath='testing/test112/localCorrelationMatrix.dat'
>>> x.save()
>>> x.matrix=np.flipud(x.matrix)
>>> x.imagePath='testing/test112/localCorrelationMatrix.png'
>>> x.saveImage()
>>>
"""
height, width = self.matrix.shape
E = (windowSize-1)/2 #shorthand
# initialise
localcovar = ma.zeros((height,width))
localcovar.mask = True
for i in range(height):
for j in range(width):
window1 = self.matrix[max(0,i-E):min(i+E+1, height),max(0,j-E):min(j+E+1,width)]
window2 = dbz2.matrix[max(0,i-E):min(i+E+1, height),max(0,j-E):min(j+E+1,width)]
localcovar[i,j] = ma.corrcoef(window1.flatten(), window2.flatten())[0,1]
return localcovar
def shiiba(self,b,centre=(0,0),searchWindowHeight=9, searchWindowWidth=9, *args, **kwargs):
"""wrapping armor.analysis.shiiba
"""
from armor import analysis
self.shiibaResult = analysis.shiiba(self, b, centre=centre,
searchWindowHeight=searchWindowHeight,
searchWindowWidth=searchWindowWidth,
*args, **kwargs)
return self.shiibaResult
def shiibaLocal(self, b, *args, **kwargs):
"""wrapping armor.analyais.shiibaLocal
"""
from armor import analysis
self.shiibaLocalResult = analysis.shiibaLocal(self,b, *args, **kwargs)
self.shiibaLocalResult
def shiibaFree(self,b, *args, **kwargs):
"""wrapping armor.shiiba.regressionCFLfree
"""
from armor.shiiba import regressionCFLfree as cflfree
self.shiibaFreeResult = cflfree.regressGlobal(self,b, *args, **kwargs)
return self.shiibaFreeResult
def getVect(self, C):
"""wrapping armor.shiiba.regression2.convert
"""
from armor.shiiba import regression2
return regression2.convert(C, self)
def getKmeans(self, *args, **kwargs):
"""wrapping armor.kmeans.clustering.getKmeans()
8 April 2013
"""
import armor.kmeans.clustering as clust
x = clust.getKmeans(self, *args, **kwargs)
self.kmeansResults=x
return x
def invariantMoments(self, takeRoots=True, **kwargs):
"""wrappng armor.geometry.moments.HuMoments
normalise with respect to the degree
"""
from armor.geometry import moments
x = moments.HuMoments(self.matrix, **kwargs)
if takeRoots:
x[0] = np.sign(x[0])*abs(x[0])**(.5)
x[1] = np.sign(x[1])*abs(x[1])**(.25)
x[2] = np.sign(x[2])*abs(x[2])**(1./6)
x[3] = np.sign(x[3])*abs(x[3])**(1./6)
x[4] = np.sign(x[4])*abs(x[4])**(1./12)
x[5] = np.sign(x[5])*abs(x[5])**(1./8)
x[6] = np.sign(x[6])*abs(x[6])**(1./12)
self.invMom = x #storing it for the future
return x
def skewness(self, *args, **kwargs):
""" to compute the skewnesses along the major and minor axes
"""
# 1. compute the angle
# 2. compute the transformation matrix
# 3. compute the skewnesses in the x- and y- directions
#from .geometry import tranforms as tr
from .geometry import moments as mmt
self.getEigens()
angle = self.getRelativeAngle()
v0 = self.eigenvectors[:,0]
v1 = np.array([-v0[1], v0[0]])
#I, J = self.IJ()
a2 = self.affineTransform(T=np.vstack([v0,v1]), origin="")
a2 = a2.threshold(0)
results = {}
results['skewness'] = mmt.skewness2(a2.matrix, *args, **kwargs)
results['kurtosis'] = mmt.kurtosis2(a2.matrix, *args, **kwargs)
return results
#a2.show() #debug
#a2.getEigens() #debug
#return a2 #debug
def spline(self):
"""
wrapping the scipy interpolate module
http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectBivariateSpline.html#scipy.interpolate.RectBivariateSpline
"""
height, width = self.matrix.shape
return interpolate.RectBivariateSpline(range(height), range(width), self.matrix)
def granulometry(self, scales=[4,10,14,40], outputFolder="",display=True, *args, **kwargs):
from .geometry import granulometry as gr
granulo = gr.analyse(self, scales=scales, outputFolder=outputFolder, display=display,
*args, **kwargs)
self.granulo = granulo
granulo_objects = [DBZ(matrix=granulo[i], name=self.name+'_granulo_'+str(scales[i]),
vmax=1, vmin=-0.5,) for i in range(len(scales))]
return granulo_objects
def binaryOpening(self, scale=10, threshold=20):
"""
2013-12-06
"""
try:
from scipy import ndimage
from .geometry import granulometry as gr
structure = gr.disk_structure(scale)
opened = ndimage.binary_opening(self.matrix>threshold, structure=structure)
return opened
except ImportError:
print 'Cannot import scipy module "ndimage". Check your scipy package or (re)install it.'
def hausdorffDim(self, epsilon=1, *args, **kwargs):
from geometry import fractal
a1 = self.laplacianOfGaussian(sigma=1.5)
a1.matrix= (abs(a1.matrix>1))
a1.setMaxMin()
a1.cmap ='jet'
#a1.show()
res = fractal.hausdorffDim(a1, epsilon, *args, **kwargs)
try:
self.dimH[epsilon] = res
except AttributeError:
self.dimH = {epsilon: res}
return {'dimH': self.dimH, 'a1': a1}
def hausdorffDimPlot(self, epsilons = [1, 2, 4, 8, 16, 32, 64], display=True, imagePath="", closePlots=True,
ylim=[1,2]):
for epsilon in epsilons:
self.hausdorffDim(epsilon)
x = np.log2(epsilons)
y = [self.dimH[v] for v in epsilons]
if closePlots:
plt.close()
plt.plot(x,y, 'o-')
plt.title("Hausdorff Dimension Plot")
plt.ylabel("dimension ~ side length/epsilon")
plt.xlabel("log2(epsilon)")
plt.ylim(ylim)
if imagePath!="":
plt.savefig(imagePath)
if display:
plt.show(block=False)
def binaryClosing(self, scale=10, threshold=20):
"""
2013-12-10
"""
try:
from scipy import ndimage
from .geometry import granulometry as gr
structure = gr.disk_structure(scale)
closed = ndimage.binary_closing(self.matrix>threshold, structure=structure)
return closed
except ImportError:
print 'Cannot import scipy module "ndimage". Check your scipy package or (re)install it.'
def IJ(self):
height, width = self.matrix.shape
X, Y = np.meshgrid(range(width), range(height))
I, J = Y, X
return I, J
def doubleGaussian(self, verbose=True):
"""
defining a double gaussian covering the blob
"""
from .geometry import transformedCorrelations as tr
self.getEigens() # compute the relevant stats
self.getCentroid()
I, J = self.IJ()
g = tr.doubleGaussian(I, J, self.centroid[0], self.centroid[1],
2* self.eigenvalues[0]**.5, 2* self.eigenvalues[1]**.5,
-self.momentAngle)
if verbose:
plt.imshow(g, origin='lower')
plt.show()
return g
def connectedComponents(self, N=-999):
"""
connected components labelling
http://scipy-lectures.github.io/advanced/image_processing/
sorted in descending order
0 = largest component
N = max. number of labels
"""
from scipy import ndimage
labels, number_of_components = ndimage.label(self.matrix)
if N ==-999:
N = number_of_components
else:
N = min(number_of_components, N)
labelsCounts = [(i, (labels==i).sum()) for i in range(number_of_components)]
labelsCounts.sort(key=lambda v: v[1], reverse=True)
labels2 = np.ones(labels.shape) *-999
for i in range(N):
# reset the labels
labels2+= (labels==labelsCounts[i][0])*(999+i)
labels = labels2
components_object = DBZ(matrix=labels, name=self.name + '_connected_components',
vmin=-2, vmax= N )
self.components = labels
return components_object
def timeDiff(self, days=0, hours=0, minutes=0):
"""
return a string "yyyymmdd.hhmm" computed from self.dataTime and the input
"""
T = self.dataTime
from datetime import datetime, timedelta
timeDiff = timedelta(days=days, microseconds=hours*3600*1000000+minutes*60*1000000) # positive or negative
timeIn = datetime(int(T[0:4]), int(T[4:6]), int(T[6:8]), int(T[9:11]), int(T[11:13]))
timeOut = timeIn + timeDiff
timeOutString = str(timeOut.year) + ('0'+str(timeOut.month))[-2:] + ('0'+str(timeOut.day))[-2:] + \
'.' + ('0'+str(timeOut.hour))[-2:] + ('0'+str(timeOut.minute))[-2:]
return timeOutString
def getScaleSpace(self, order=0, gamma=1, scales=[1,2,5,10,20,40,100]):
"""
wraps armor.spectral.scaleSpace
see the reference there
scales = sigma
"""
from armor.spectral import scaleSpace as ss
scaleSpace = []
for scale in scales:
L_scale = ss.L_normalised(image=self.matrix, sigma=scale, order=order, gamma=gamma)
scaleSpace.append(L_scale)
self.scaleSpace = scaleSpace
self.scales = scales
return scaleSpace
def getScaleMap(self):
try:
scaleSpace = self.scaleSpace
except AttributeError:
scaleSpace = self.getScaleSpace()
height, width = self.matrix.shape
scaleMax = scaleSpace[0]
for i in range(1, len(scaleSpace)):
scaleMax = np.maximum(scaleMax, scaleSpace[i])
scaleMapMatrix = np.zeros((height,width))
for i in range(height):
for j in range(width):
scaleMapMatrix[i,j] = min([k for k in range(len(scaleSpace)) if scaleSpace[k][i,j] == scaleMax[i,j] ]) # pick the "min" scale that sits on the optimal
scaleMap = self.copy()
scaleMap.matrix = scaleMapMatrix
scaleMap.name = self.name + "scale_map;\nscales=" + str(self.scales)
scaleMap.outputPath = self.outputPath[:-4] + 'scales' + self.outputPath[-4:]
scaleMap.imagePath = self.imagePath[:-4] + 'scales' + self.imagePath[-4:]
scaleMap.vmin = -5
scaleMap.vmax = scaleMap.matrix.max() + 2
scaleMap.cmap = 'hsv'
self.scaleMap = scaleMap
return scaleMap
def gaussianCorr(self, wrf, sigma=0, sigmaWRF=0, thres=15, showImage=True, saveImage=True,
outputFolder='', outputType="correlation", *args, **kwargs):
"""
wrapping analysis module
"""
from armor import analysis
return analysis.gaussianSmooothNormalisedCorrelation(obs=self, wrf=wrf, sigma=sigma, thres=thres,
showImage=showImage,saveImage=saveImage,
outputFolder=outputFolder,
outputType=outputType,*args, **kwargs)
def histogram(self, bins=20, matrix="", outputPath="", display=True, **kwargs):
"""
wrapping numpy.histogram
http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
"""
if matrix == "":
matrix = self.matrix
plt.close()
hist, edges = np.histogram(matrix, bins=bins, **kwargs)
plt.plot(edges[:-1], hist)
if outputPath != "":
plt.savefig(outputPath)
if display:
plt.show()
def stormTracking(self, newObject=True, *args, **kwargs):
"""
wrapper of armor.kmeans.stormTracking.stormTracking()
"""
from kmeans import stormTracking as st
results = st.stormTracking(self, newObject=newObject, *args, **kwargs)
return results
# end function on object
############################################################
############################################################
# functions altering (attributes) of object
def findEdges(self, threshold=-9999):
from armor.geometry import edges
m = a.matrix.copy()
if threshold !=-9999:
m.mask += (m<threshold)
m_edges = edges.find(DBZ(matrix=m))
else:
m_edges = edges.find(DBZ(matrix=m))
self.edges = m_edges
return m_edges
def setThreshold(self, threshold=-9999):
mask = self.matrix.mask.copy()
self.matrix.mask = 0
self.matrix = self.matrix + (self.matrix<threshold) * (threshold-self.matrix) #setting the threshold to 0
self.matrix.mask = mask
# end functions altering (attributes) of object
############################################################
########################################################
# supplementary functions
def constructTaiwanReliefData(self, inputFolder=defaultTaiwanReliefDataFolder, outputFolder="",
dilation=0):
"""
construct taiwan relief info to the current grid
and save it to the dataFolder
"""
from armor.taiwanReliefData import convertToGrid
if outputFolder=="":
outputFolder = os.path.dirname(self.dataPath) + '/' #folder name of the dataPath
height, width = self.matrix.shape
LL_lat, LL_long = self.lowerLeftCornerLatitudeLongitude
UR_lat, UR_long = self.upperRightCornerLatitudeLongitude
kwargs = {'files' :['100','1000','2000','3000', 'Coast'],
'width' : width,
'height' : height,
'lowerLeft' : (LL_long, LL_lat),
'upperRight' : (UR_long, UR_lat),
'folder' : inputFolder,
'outputFolder': outputFolder,
'suffix' : ".DAT",
'dilation' : dilation,
}
convertToGrid.main(**kwargs)
def coordinatesToGrid(self, x, verbose=False):
"""
20 jan 2014
taken from armor.taiwanReliefData.convertTogrid
convention:
x = (lat, long, lat long...)
"""
height, width = self.matrix.shape
i1, j1 = self.lowerLeftCornerLatitudeLongitude
i2, j2 = self.upperRightCornerLatitudeLongitude
nx = 1. * width / (j2-j1)
ny = 1. * height / (i2-i1)
x = np.array(x)
y = np.zeros(len(x))
y[0::2] = (np.round((x[0::2] - i1) * ny)).astype(int)
y[1::2] = (np.round((x[1::2] - j1) * nx)).astype(int)
if verbose:
print x, '-->', y
return y
# end supplementary functions
########################################################
########################################################
# features and classifications
def showFeatureLayer(self, n=0, n2="", vmin="", vmax=""):
if n2=="":
self.show(matrix=self.features[:,:,n], vmin=vmin, vmax=vmax)
else:
self.show(matrix=self.features[:,:,n]-self.features[:,:,n2], vmin=vmin, vmax=vmax)
def getFeatureLayer(self, n=0):
return self.features[:,:,n]
#def whiten(self): #doesn't work yet - whiten requires a rank-2 array
# from scipy.cluster import vq
# self.features = vq.whiten(self.features)
def classify(self, features="", k=20, cmap='jet', display=True,
algorithm = 'kmeans',
#algorithm='kmeans-with-weight'
toWhiten=False,
toDrawCentroids=False,
crossRadius=10,
verbose=True,
threshold=0,
scope='full', #whether to cluster all or just selected points
#scope='selected',
intensityStep = 5, # parameter for kmeans-with-weight algorithm
*args, **kwargs):
"""
input: feature vector field
output: classification of the image into various classes
"""
from scipy.cluster import vq
if scope != 'full':
featuresMask = self.matrix.mask + (self.matrix.filled()<threshold)
self.featuresMask = featuresMask
if features =="":
try:
features = self.features
except AttributeError:
print "Features undefined!! - ", self.name
return -1
height, width, depth = features.shape
# review each layer and check for degeneracy
layersToBeDeleted = []
print "features size:", self.features.shape
for i in range(depth):
layerRank = np.linalg.matrix_rank(features[:,:,i])
if verbose:
print "feature layer", i, ": Rank=",
print layerRank
if layerRank<1:
#features = np.dstack([features[:,:,:i], features[:,:,i+1]])
#features = np.delete(features, i, axis=2)
layersToBeDeleted.append(i)
if verbose:
print "removing layers", layersToBeDeleted
features = np.delete(features, layersToBeDeleted, axis=2)
height, width, depth = features.shape
#perform k-means stuff
###########################################################################
##
#
if scope == 'full': # form a line for the features before clustering
f1 = features.reshape((height*width), depth)
else:
#f1 = ma.array(features, mask=ma.dstack([featuresMask]*depth))
f1 = ma.array(features, mask=False)
f2 = np.zeros(((1-featuresMask).sum(), depth))
for i in range(depth):
f1.mask[:,:,i]= featuresMask
print i, "f1[:,:,i].count()", f1[:,:,i].count() #debug
f1 = ma.reshape(f1, (height*width, depth))
print "f1.shape, f2.shape:", f1.shape, f2.shape #debug
print "f1[:,0].compressed().shape, f2[:,0].shape ",f1[:,0].compressed().shape, f2[:,0].shape #debug
for i in range(depth):
f2[:,i] = f1[:,i].compressed()
#print "displaying feature level", i #debug
#plt.imshow(features[:,:,i], origin='lower') #debug
#plt.show() #debug
#time.sleep(1) #debug
f1 = f2
if toWhiten:
f1 = vq.whiten(f1)
# check for other degeneracies
layersToBeDeleted2 = []
for i in range(depth):
if f1[:,i].min() == f1[:,i].max():
layersToBeDeleted2.append(i)
if verbose:
print "deleting layer", i
f1 = np.delete(f1, layersToBeDeleted2, axis=1)
_, depth = f1.shape
#plt.close() #debug
#plt.imshow(f1[:depth,:], origin='lower') #debug
#plt.colorbar() #debug
#plt.show(block=False) #debug
#######################################################################
#
print "algorithm:", algorithm
if algorithm == 'kmeans':
print "k, args, kwargs:", k, args, kwargs #debug
centroids, arr = vq.kmeans2(f1, k=k, *args, **kwargs) #key line
if algorithm == "kmeans-with-weight":
print "k, args, kwargs:", k, args, kwargs #debug
# assuming the first three features are I, J and intensity
maxIntensity = int(f1[:,2].max())+1
f2 = f1.copy()
for i in range(threshold, maxIntensity, intensityStep):
newLayer = f1.copy()
pointsBelow = np.nonzero(newLayer[:2] <= i)[0]
newLayer = np.delete(newLayer, pointsBelow, axis=0)
f2 = np.vstack([f2, newLayer])
print 'f2.shape',f2.shape #debug
# IDEA: WE PERFORM CLUSTERING FOR ALL POINTS OF f2 BUT WE USE ONLY RESULTS FOR f1
# WHICH ARE THE DATAPOINTS AT THE TOP OF THE LIST
centroids, arr = vq.kmeans2(f2, k=k, *args, **kwargs) #key line
arr = arr[0:len(f1)]
#
#######################################################################
if scope == 'full': # reform the resulting arr into original shape
arr = arr.reshape((height, width))
else:
arr2 = ma.ones((height, width))* (-999)
arr2.mask = True
arr2.fill_value= -999
X, Y = np.meshgrid(range(width), range(height)) # don't assume that the first two features are x,y #well, in the end, maybe we should, but won't bother changing this for now.
I, J = Y, X
I = ma.array(I, mask=featuresMask)
J = ma.array(J, mask=featuresMask)
coords = np.vstack([I.compressed(),J.compressed()]).T
print 'len arr, len coords:', len(arr), len(coords) #debug
for i in range(len(coords)):
arr2[coords[i][0], coords[i][1]] = arr[i]
arr = arr2
#
##
###########################################################################
if verbose:
print arr
print arr.shape #debug
a1 = self.copy()
a1.name = "k-means, k=%d, for " %k + self.name
a1.imagePath = self.imagePath[:-4] + "_kmeans_" + self.imagePath[-4:]
a1.outputPath = self.outputPath[:-4] + "_kmeans_" + self.outputPath[-4:]
a1.cmap = cmap
a1.matrix = np.ma.array(arr, fill_value=-999.)
a1.vmax = arr.max()
a1.vmin = arr.min()
if toDrawCentroids:
for i in range(len(centroids)):
try:
print centroids[i,0], centroids[i,1]
a1.drawCross(int(centroids[i,0]), int(centroids[i,1]), radius=crossRadius, newObject=False) # assuming the first two features =i,j
except IndexError:
pass
result= {'centroids':centroids, 'a1':a1}
self.classification = result
if display:
a1.show()
time.sleep(2)
if display:
#a1.show(matrix=np.ma.array(a1.matrix, mask=self.matrix.mask))
a1.show(matrix=np.ma.array(a1.matrix, mask=(self.matrix<threshold)))
a2 = a1.copy()
a2.name = a1.name + '_2'
a2.matrix = np.ma.array(a1.matrix, mask=(self.matrix<threshold))
result['a2'] = a2
self.classificationResult = result
return result
def initialiseFeatures(self, intensityThreshold=0, fill_value=-999):
"""
basic feature vectors: i,j, and intensity
"""
height, width = self.matrix.shape
X, Y = np.meshgrid(range(width), range(height))
I, J = Y, X
a1 = self.copy()
a1.setThreshold(intensityThreshold)
a1.matrix.fill_value= fill_value
a1.matrix = a1.matrix.filled()
self.features = np.dstack([I, J, a1.matrix])
return self.features
def deleteFeature(self, layers=[0]):
"""
to edit the features array
"""
self.features = np.delete(self.features, layers, axis=2)
def granulometryFeatures(self, threshold=0, scales=[1,2,4,8,16,32,64], verbose=True, display=False, outputFolder="",
multiplier=1.,
*args, **kwargs):
from scipy import ndimage
from geometry import granulometry as gr
if not hasattr(self,'features'):
self.initialiseFeatures()
granuloLayers = gr.analyse(im=self.matrix.filled(), threshold=threshold, scales=scales,
verbose=verbose,display=display, outputFolder=outputFolder,
*args, **kwargs)
for n, layer in enumerate(granuloLayers):
scale = scales[n]
if verbose:
print 'constructing granulo feature for layer: scale =', scale
layer = layer.astype(float)
layer = (ndimage.gaussian_filter(layer, scale) >0)
layer = np.ma.array(layer, mask=0, fill_value=-999)
self.features = ma.dstack([self.features, layer]) * multiplier
return granuloLayers
def gaborFeatures(self, sigma=20, scales = [1, 2, 4, 8, 16], NumberOfOrientations = 4, memoryProblem=False,
multiplier=1.,
outputFolder="", *args, **kwargs):
if not hasattr(self, 'features'):
self.initialiseFeatures()
from filter import gabor
filterFeatures = gabor.main(self, sigma, scales, NumberOfOrientations, memoryProblem,
outputFolder, *args, **kwargs)
try:
self.features = np.dstack([self.features, filterFeatures]) * multiplier
return filterFeatures
except:
return filterFeatures
def thresholdFeatures(self, threshold=0, multiplier=100., *args, **kwargs):
"""
those masked or below the threshold are considered one class
"""
if not hasattr(self, 'features'):
self.initialiseFeatures()
m = self.matrix.mask + (self.matrix < threshold)
m = (1-m) * multiplier
self.features = np.dstack([self.features, m])
return m
def globalShapeFeatures(self, lowerThreshold= 0., upperThreshold=35.,
computeSkewness=True, # 2014-11-11
computeAngle=True): #2014-11-24
"""
0. number of components
1. volume
2. centroid
3. high intensity region volume
4. moments
"""
from .geometry import moments as mmt
a1 = self.above(lowerThreshold)
rectangle = a1.getRegionForValue(1)
a1 = a1.connectedComponents()
M1 = a1.matrix.max()
components1 = [(a1.matrix==v).sum() for v in range(M1+1)]
numberOfComponents = len([v for v in components1[1:] if v>=100]) # the background (labelled as 0) doesn't count
volume = a1.matrix.sum()
centroid = self.getCentroid()
eigenvalues, eigenvectors = a1.getEigens()
#
a2 = a1.above(upperThreshold)
highIntensityRegionVolume = a2.matrix.sum()
HuMoments = mmt.HuMoments(self.matrix)
features = { 'numberOfComponents' : numberOfComponents,
'volume' : volume,
'centroid' : centroid,
'highIntensityRegionVolume' : highIntensityRegionVolume,
'HuMoments' : HuMoments,
'rectangle' : rectangle,
'eigenvalues' : eigenvalues,
'eigenvectors' : eigenvectors,
}
if computeSkewness: #2014-11-11
#features['skewness'] = mmt.skewness2(a1.matrix) # skewness with fixed x- y-axes 2014-11-11
#features['kurtosis'] = mmt.kurtosis2(a1.matrix)
#instrinsicSkewness = a1.skewness(lower=lowerThreshold)
instrinsicSkewness = a1.skewness(lower=0, upper=M1+1)
features['skewness'] = instrinsicSkewness['skewness'] # skewness with intrinsic moment arms 2014-11-12
features['kurtosis'] = instrinsicSkewness['kurtosis']
if computeAngle:
features['angle'] = a1.getRelativeAngle()
self.globalFeatures = features
return features
def localShapeFeatures(self, block=False,
#minComponentSize=100,
minComponentSize=dp.defaultMinComponentSize,
lowerThreshold=0,
upperThreshold=51,
*args, **kwargs #2014-11-11
):
"""
from armor/tests/imageToDataTest4.py
"""
a = self.copy()
a = a.above(lowerThreshold)
a1 = a.connectedComponents()
a2 = a.above(upperThreshold).connectedComponents()
#a1.show(block=True)
#a2.show(block=True)
# get the components
M1 = a1.matrix.max()
#M2 = a2.matrix.max()
components1 = [(a1.matrix==v).sum() for v in range(M1+1)]
#components2 = [(a2.matrix==v).sum() for v in range(M2+1)]
#components1 = sorted([(a1.matrix==v).sum() for v in range(M1+1)][1:], reverse=True)
#components2 = sorted([(a2.matrix==v).sum() for v in range(M2+1)][1:], reverse=True)
components1 = [v for v in components1 if v>=minComponentSize]
#components2 = [v for v in components2 if v>=10]
print 'Largest components for %s:' %self.name, sorted(components1, reverse=True)
#print sorted(components2, reverse=True)[1:]
# get the moments
from armor.geometry import moments as mmt
HuPowers = np.array([2., 4., 6., 6., 12., 8., 12.])
HuPowers = (HuPowers)**-1
moments1 = np.array([mmt.HuMoments(a1.matrix==v)**HuPowers for v in range(len(components1))])
#moments2 = np.array([mmt.HuMoments(a2.matrix==v)**HuPowers for v in range(len(components2))])
print moments1
#print moments2
# defining the features
numberOfComponents = len([v for v in components1[1:] if v>=minComponentSize]) # region of at least 100 pixels
volume = a1.matrix.sum() + a2.matrix.sum()
localFeatures = [a1.levelSet(v).globalShapeFeatures(lowerThreshold=lowerThreshold, upperThreshold=upperThreshold ,*args, **kwargs) #2014-11-11
for v in range(len(components1))]
localFeatures.sort(key=lambda(v):v['volume'], reverse=True)
localFeatureVectors = [np.array([(lf['volume'])**.5] + \
(lf['centroid']/10).tolist() + [np.log(v) for v in lf['HuMoments']] + [lf['numberOfComponents']]) \
for lf in localFeatures]
features = { 'dataTime' : a.dataTime,
'globalFeatures' : a1.globalShapeFeatures(lowerThreshold=1, upperThreshold=51,),
'localFeatures' : localFeatures, # this includes the "background"
'localFeatureVectors' : localFeatureVectors,
}
self.localFeatures = features
return features
########################################################
# tests
def powerSpecTest(self, *args, **kwargs):
from . import analysis
return analysis.powerSpecTest(self, *args, **kwargs)
#
########################################################
################################################################################
################################################################################
################################################################################
class VectorField(object):
"""wraps two masked arrays sharing the same mask (how can i make them share a mask?)
example:
>>> from armor import pattern
>>> a = pattern.DBZ(dataTime="20120612.0200")
>>> a.load()
>>> a.show()
>>> b = pattern.VectorField(a.matrix, -a.matrix)
>>> b.plot()
>>> b.show()
"""
def __init__(self, U, V, mask=False, name='vectorfield', dataPath="", outputPath="", imagePath="", \
key='vector field', title='title', gridSize=25):
""" U = first = i-component; V=second=j-component
"""
U = U.view(ma.MaskedArray)
V = V.view(ma.MaskedArray)
mask = U.mask + V.mask + mask
U.mask = mask.copy()
V.mask = mask.copy()
self.U = U
self.V = V
self.mask=mask
#################################################
# i don't know how to make this work; comment out
#if not isinstance(mask, type(False)): # if mask explicitly given, initialise with it
# self.U.mask = mask
# self.V.mask = mask
#################################################
self.name = name
self.dataPath = dataPath
self.outputPath = outputPath # for the future
self.imagePath = imagePath
self.key = key
self.title = title
self.gridSize= gridSize
######################################################################
# start basic operator overloads
def __sub__(self, vect2):
"""defining the subtraction of two vector fields
"""
if isinstance(vect2, tuple) or isinstance(vect2,list):
name = self.name + "_minus_" + str(vect2)
#U = self.U - vect2[0] # before 2014-1-23 we used (x,y) for external interface, not i,j
#V = self.V - vect2[1] # this feature was rarely used so ...
U = self.U - vect2[1] # 2014-1-23 - i changed my mind - because we need something like mn+vect
V = self.V - vect2[0] # hope this doesn't break anything - fingers crossed?!
mask = self.mask.copy()
key = self.key + " minus " + str(vect2)
title = self.title+" minus " + str(vect2)
gridSize = self.gridSize
else:
name = self.name + "_minus_" + vect2.name
U = self.U - vect2.U
V = self.V - vect2.V
mask = self.mask + vect2.mask.copy()
key = self.key + " minus " + vect2.key
title = self.title+" minus " + vect2.title
gridSize = min(self.gridSize, vect2.gridSize)
outputPath = self.outputPath + name + ".dat"
dataPath = outputPath
imagePath = self.imagePath + name + ".png"
return VectorField(U, V, mask=mask, name=name, dataPath=dataPath, outputPath=outputPath,\
imagePath=imagePath, key=key, title=title, gridSize=gridSize)
def __rsub__(self, vect1):
"""
2014-01-24
adapted from __sub__()
focus on the case "A-B" where A is a pair (tuple) or list
"""
if isinstance(vect1, tuple) or isinstance(vect1,list):
name = str(vect1) + "_minus_" + self.name
U = vect1[1] - self.U # convention: (i,j) = (y,x)
V = vect1[0] - self.V # hope this doesn't break anything - fingers crossed?!
mask = self.mask.copy()
key = str(vect1) + " minus " + self.key
title = str(vect1) + " minus " + self.title
gridSize = self.gridSize
else:
name = vect1.name + "_minus_" + self.name
U = vect1.U - self.U
V = vect1.V - self.V
mask = self.mask + vect1.mask.copy()
key = vect1.key + " minus " + self.key
title = vect1.title +" minus " + self.title
gridSize = min(self.gridSize, vect1.gridSize)
outputPath = self.outputPath + name + ".dat"
dataPath = outputPath
imagePath = self.imagePath + name + ".png"
return VectorField(U, V, mask=mask, name=name, dataPath=dataPath, outputPath=outputPath,\
imagePath=imagePath, key=key, title=title, gridSize=gridSize)
def __add__(self, vect2):
"""defining the addition of two vector fields
"""
if isinstance(vect2, tuple) or isinstance(vect2,list):
name = self.name + "_plus_" + str(vect2)
#U = self.U + vect2[0] # before 2014-1-23 we used (x,y) for external interface, not i,j
#V = self.V + vect2[1] # this feature was rarely used so ...
U = self.U + vect2[1] # 2014-1-23 - i changed my mind - because we need something like mn+vect
V = self.V + vect2[0] # hope this doesn't break anything - fingers crossed?!
mask = self.mask.copy()
key = self.key + " plus " + str(vect2)
title = self.title+" plus " + str(vect2)
gridSize = self.gridSize
else:
name = self.name + "_plus_" + vect2.name
U = self.U + vect2.U
V = self.V + vect2.V
mask = self.mask + vect2.mask.copy()
key = self.key + " plus " + vect2.key
title = self.title+" plus " + vect2.title
gridSize = min(self.gridSize, vect2.gridSize)
outputPath = self.outputPath + name + ".dat"
dataPath = outputPath
imagePath = self.imagePath + name + ".png"
return VectorField(U, V, mask=mask, name=name, dataPath=dataPath, outputPath=outputPath,\
imagePath=imagePath, key=key, title=title, gridSize=gridSize)
def __radd__(self, vect2):
"""defining right-addition; wrapping __add__"""
return self+vect2
def __mul__(self, s):
"""scalar for now, will extend later
"""
if isinstance(s, tuple) or isinstance(s,list):
U = self.U * s[0]
V = self.V * s[1]
else:
U = self.U * s
V = self.V * s
mask=self.mask.copy()
name=self.name + "__times__" + str(s)
dataPath=''
outputPath=self.outputPath + "__times__" + str(s)
imagePath =self.imagePath + "__times__" + str(s)
key=self.key + "__times__" + str(s)
title=self.title + "__times__" + str(s)
gridSize = self.gridSize
return VectorField(U=U, V=V, mask=mask, name=name, dataPath=dataPath, \
outputPath=outputPath, imagePath=imagePath, \
key=key, title=title, gridSize=gridSize)
def __rmul__(self,s):
"""wrapping __mul__
2014-01-23
"""
return self * s
def __call__(self, i=-999, j=-999, verbose=False):
"""
22 jan 2014
adapted from DBZ.__call__
"""
if i ==-999 and j ==-999:
height, width = self.U.shape
h = int(height**.5 /2)
w = int(width**.5 /2)
return (self.U.filled().astype(int),
self.V.filled().astype(int) )
else:
"""
returns interpolated value
NOTE TO SELF: can get a better and more efficient interpolation (e.g. spline) later
"""
arr= self.U
i0 = int(i)
j0 = int(j)
i1 = i0 + 1
j1 = j0 + 1
i_frac = i % 1
j_frac = j % 1
f00 = arr[i0,j0]
f01 = arr[i0,j1]
f10 = arr[i1,j0]
f11 = arr[i1,j1]
interpolated_value_U = (1-i_frac)*(1-j_frac) * f00 + \
(1-i_frac)*( j_frac) * f01 + \
( i_frac)*(1-j_frac) * f10 + \
( i_frac)*( j_frac) * f11
if verbose:
print "U:", i_frac, j_frac, f00, f01, f10, f11
# now compute the V-component
arr= self.V
i0 = int(i)
j0 = int(j)
i1 = i0 + 1
j1 = j0 + 1
i_frac = i % 1
j_frac = j % 1
f00 = arr[i0,j0]
f01 = arr[i0,j1]
f10 = arr[i1,j0]
f11 = arr[i1,j1]
interpolated_value_V = (1-i_frac)*(1-j_frac) * f00 + \
(1-i_frac)*( j_frac) * f01 + \
( i_frac)*(1-j_frac) * f10 + \
( i_frac)*( j_frac) * f11
if verbose:
print "V:", i_frac, j_frac, f00, f01, f10, f11
return np.array([interpolated_value_V, interpolated_value_U])
# end basic operator overloads
######################################################################
def plot(self, key="", title="", gridSize=0, X=-1, Y=-1, closeAll=True, lowerLeftKey=False,
vmin="", vmax="",):
"""
make the plot without showing it
adapted from
basics.plotVectorField(U, V, X=-1, Y=-1, gridSize=25, key="vector field",\
title="title", saveFileName="", outputToScreen=False):
"""
# clear the canvass
#plt.clf()
if closeAll:
plt.close()
U = self.U.copy()
V = self.V.copy()
if key =="":
key = self.key
if title =="":
title = self.title
if gridSize == 0:
gridSize = self.gridSize
width = U.shape[1]
height = U.shape[0]
if type(X)==type(-1) or type(Y)==type(-1):
X, Y = np.meshgrid(np.arange(0,width), np.arange(0,height))
left = X[ 0, 0]
bottom = Y[ 0, 0]
#computing the length of the vector field at centre for reference
r_centre = (U[height//2, width//2]**2 + V[height//2, width//2]**2) **(0.5)
print "==computing the length of the vector field at centre for reference:==\nr_centre=",\
"r_centre"
if lowerLeftKey:
# making a grid of standardardised vector in the lower-left corner
# for scale reference
U[1:gridSize+1, 1:gridSize+1] = 1
V[1:gridSize+1, 1:gridSize+1] = 0
Q = plt.quiver( X[::gridSize, ::gridSize], Y[::gridSize, ::gridSize],\
U[::gridSize, ::gridSize], V[::gridSize, ::gridSize],\
color='r', units='x', linewidths=(2,), edgecolors=('k'),\
headaxislength=5 )
qk = plt.quiverkey(Q, 0.7, 0.0, 1, 'length='+str(round(r_centre,5))+' at centre',\
fontproperties={'weight': 'bold'})
if lowerLeftKey:
qk = plt.quiverkey(Q, 0.3, 0.0, 1,\
key+',\nlength of the standard arrow in the lower-left corner=1',\
fontproperties={'weight': 'bold'})
plt.axis([left, left+width-1, bottom, bottom+height-1])
plt.title(title)
def showPlot(self, block=False, **kwargs):
self.plot(**kwargs)
plt.show(block=block)
def show(self,**kwargs): #alias
self.showPlot(**kwargs)
def savePlot(self, imagePath=""):
if imagePath != "":
self.imagePath = imagePath
self.plot()
if self.imagePath =="":
self.imagePath = raw_input("Please enter imagePath:")
plt.savefig(self.imagePath, dpi=200)
def saveImage(self, *args, **kwargs):
"""alias for savePlot
"""
self.savePlot(*args, **kwargs)
def toArray(self):
"""return normal arrays filled with -999 for missing values for other uses
"""
return ma.filled(self.U), ma.filled(self.V)
def saveMatrix(self):
"""
* We convert and save the masked arrays into standard arrays with masked data filled by -999
"""
U, V = self.toArray()
np.savetxt(self.outputPath+"U.dat", U, '%.4f')
np.savetxt(self.outputPath+"V.dat", V, '%.4f')
def pickle(self):
pickle.dump(self)
#####################################################
# functions from vector fields to values
def corr(self, vect2, region1="", region2=""):
"""adapted from DBZ.corr():
"""
height, width = self.U.shape
if region1=="":
region1 = (0, 0, height, width)
if region2=="":
region2 = region1
u1 = self.U[region1[0]:region1[0]+region1[2], \
region1[1]:region1[1]+region1[3]].flatten()
u2 = vect2.U[region2[0]:region2[0]+region2[2], \
region2[1]:region2[1]+region2[3]].flatten()
ucorr = ma.corrcoef(u1, u2)
v1 = self.V[region1[0]:region1[0]+region1[2], \
region1[1]:region1[1]+region1[3]].flatten()
v2 = vect2.V[region2[0]:region2[0]+region2[2], \
region2[1]:region2[1]+region2[3]].flatten()
vcorr = ma.corrcoef(v1, v2)
return {'ucorr': ucorr, 'vcorr': vcorr}
########################################################
# functions from vector fields to DBZ objects
# 2014-01-21
def semiLagrange(self, L, k=6, direction=+1, verbose=True):
"""
22 jan 2014
semi lagrangian advection of a set via self
input:
L = list of points[(i, j), ...] = [(y, x), ...]
k = steps, default = 6 steps
= 1 hour if the time-interval between successive COMPREF charts is 10 mins
output:
results - a list of np.array pairs [ (i,j) coordinates ]
"""
if verbose:
print "semiLagrangian advection of", L, "via", self.name
results = copy.deepcopy(L) # output holder
for n, pt in enumerate(L): # point
for stp in range(k): # step
i, j = results[n]
di, dj = self(i, j)
results[n] = np.array([i+ direction*di, j+ direction*dj])
if verbose:
print pt, "-->", results[n]
return results
# end functions from vector fields to DBZ objects
########################################################
########################################################
# supplementary functions
# end supplementary functions
########################################################
################################################################################
################################################################################
# streams of DBZ objects, with basic operations, comparisons, etc
class DBZstream:
"""
a stream of DBZ objects, with basic i/o facilities
migrating some codes from armor.basicio.dataStream
WE DO ASSUME THAT there are no two sets of data with the same dataTime
or else we would need some extra logic to check for redundancies.
"""
###########################################################
#
# basic construction
def __init__(self, dataFolder='../data_temp/',
#name="COMPREF.DBZ",
name="",
lowerLeftCornerLatitudeLongitude=defaultLowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude=defaultUpperRightCornerLatitudeLongitude,
outputFolder="",
imageFolder="",
taiwanReliefFolder ="",
key1="", # keywords to pick out specific files
key2="", # used only once in the __init__
key3="",
preload=False,
imageExtension = '.png', #added 2013-09-27
dataExtension = '.txt',
dataExtension2 = '.dat',
forceAll =False , #2014-07-26
vmin = -40., #added 2013-10-28
vmax = 100.,
coastDataPath = "", #2014-06-25
):
"""
construct the objects without loading them
input: path of folder "/../../"
process: parse the folder for files
output: sequence of armor.pattern.DBZ objects
DBZ(name, dataPath, dataTime)
# parse the filename and look for clues
"""
if outputFolder =="":
outputFolder = defaultOutputFolder
if not outputFolder.endswith('/'):
outputFolder += '/'
if imageFolder =="":
imageFolder = defaultImageFolder
if taiwanReliefFolder =="":
taiwanReliefFolder = dataFolder
if coastDataPath =="":
coastDataPath = taiwanReliefFolder + "taiwanCoast.dat"
self.dataFolder = dataFolder
self.taiwanReliefFolder = taiwanReliefFolder
self.lowerLeftCornerLatitudeLongitude = lowerLeftCornerLatitudeLongitude
self.upperRightCornerLatitudeLongitude = upperRightCornerLatitudeLongitude
self.outputFolder = outputFolder
self.imageFolder = imageFolder
self.imageExtension = imageExtension
self.dataExtension = dataExtension
self.dataExtension2 = dataExtension2
self.vmin = vmin
self.vmax = vmax
self.coastDataPath = coastDataPath
dbzList = []
dataFolder = re.sub(r'\\', '/' , dataFolder) # standardise: g:\\ARMOR .. --> g:/ARMOR
dataSource = '-'.join(dataFolder.split('/')[-2:]) + '-'
if name != "":
self.name = name
else:
self.name = dataSource
L = os.listdir(dataFolder)
if not forceAll:
L = [v for v in L if (v.lower().endswith(self.dataExtension) or v.lower().endswith(self.dataExtension2))\
and (key1 in v) and (key2 in v) and (key3 in v)] # fetch the data files
L.sort()
for fileName in L:
dataTime = ""
dataTime = re.findall(r'\d{4}', fileName)
if len(dataTime)<3 and not forceAll: # NOT DATED DBZ FILE, REJECT
continue
if len(dataTime)>3: # 2014-05-06 hack - assuming the date-time would be at the end of the filename
dataTime = dataTime[-3:]
try:
dataTime = dataTime[0] + dataTime[1] + '.' + dataTime[2]
except:
dataTime = ""
dbzName = name + dataTime
dataPath = dataFolder + fileName
a = DBZ(dataTime=dataTime,
name=dbzName,
dataPath=dataPath,
outputPath=outputFolder+dbzName+self.dataExtension,
imagePath=imageFolder+dbzName+self.imageExtension,
lowerLeftCornerLatitudeLongitude=lowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude=upperRightCornerLatitudeLongitude,
vmin = self.vmin,
vmax = self.vmax,
coastDataPath = coastDataPath , #2014-06-25
relief100DataPath = taiwanReliefFolder + "relief100.dat",
relief1000DataPath = taiwanReliefFolder + "relief1000.dat",
relief2000DataPath = taiwanReliefFolder + "relief2000.dat",
relief3000DataPath = taiwanReliefFolder + "relief3000.dat",
)
if preload:
a.load()
dbzList.append(a)
a.DBZstream = self
## there you go! ######
#
self.list = dbzList
#
#######################
def __call__(self, N=-999, key2=""):
"""
if N is an integer then return the N-th DBZ pattern in the stream
else if N is a string then return those whose names or dataTimes contains N
"""
if N == -999:
return self.list
elif isinstance(N, int):
return self.list[N]
elif isinstance(N, str):
return [v for v in self.list if (N in v.name or N in v.dataTime) and (key2 in v.name or key2 in v.dataTime) ]
def __getitem__(self, N=-999):
"""alias for self.list[] """
item = self.list[N]
return item
def __len__(self, dataTime=""):
return len([v for v in self.list if dataTime in v.dataTime])
###########################################################
#
# stream operations
def append(self, filePath):
"""
to append a new member to the DBZstream list or a DBZstream to another
"""
pass
def recentreTaichungPark(self):
for D in self.list:
D.recentreTaichungPark()
def recentre(self):
"""alias"""
self.recentreTaichungPark()
def regrid(self, b):
"""
wrapping armor.geometry.regrid.regrid()
b is another DBZ object representing the grid pattern to be transformed to
"""
from armor.geometry import regrid
for i in range(len(self.list)):
a_temp = regrid.regrid(self.list[i], b)
self.list[i].matrix = a_temp.matrix
self.list[i].name += '[regridded]'
def cutUnloaded(self):
"""
cut the unloaded objects
"""
i=0
while i < len(self.list):
dbzObject = self.list[i]
if (dbzObject.matrix**2).sum()==0:
del(self.list[i])
else:
i+=1
return i # length of the stream in the end
def intersect(self, ds2, cut_first=False, cut_second=False, verbose=False):
"""
find the intersection of ds1 and ds2 w.r.t. dataTime
and cut ds1 accordingly
we assume each dataTime (e.g. '20120612.0200') appears only once in any dataStream
this burden is on data management, not here
"""
ds1 = self #alias
# 1. get the list of common dataTimes
dataTimeList1 = [v.dataTime for v in ds1.list]
dataTimeList2 = [v.dataTime for v in ds2.list]
common_dataTimeList = sorted(list(set(dataTimeList1).intersection(set(dataTimeList2))))
if verbose:
print dataTimeList
# 2. cut ds1, ds2
ds1_new_list = [v for v in ds1 if v.dataTime in common_dataTimeList]
ds2_new_list = [v for v in ds2 if v.dataTime in common_dataTimeList]
if cut_first:
ds1.list = ds1_new
if cut_second:
ds2.list = ds2_new
return ds1_new_list, ds2_new_list
def setThreshold(self, threshold):
"""
set the threshold for each member of the stream
"""
for dbzpattern in self:
dbzpattern.setThreshold(threshold)
def backupMatrices(self):
for dbzpattern in self:
dbzpattern.backupMatrix()
def restoreMatrices(self):
for dbzpattern in self:
dbzpattern.restoreMatrix()
def shuffle(self, n=1):
"""
well, technically it's inverse shuffle
"""
for i in range(n):
self.list = self.list[1::2] + self.list[0::2]
###########################################################
#
#def load(self, N=-999, name="", verbose=False):
# basic I/O
def load(self, N=-999, key2="", toInferPositionFromShape=True, verbose=False):
"""
N - index of object to be loaded, if N==-999 : load all
if N is a string, look through the list of dbz objects
and load those whose dataTime string contain N
and whose name contains name
"""
if N==-999:
for img in self.list:
if verbose:
print img.name, '|',
img.load(toInferPositionFromShape=toInferPositionFromShape)
elif isinstance(N, int):
self.list[N].load(toInferPositionFromShape=toInferPositionFromShape)
elif isinstance(N, str):
for img in self.list:
if (N in img.dataTime or N in img.name) and (key2 in img.name or key2 in img.dataTime):
img.load(toInferPositionFromShape=toInferPositionFromShape)
if verbose:
print img.name, '|',
def unload(self, key=""):
"""
unload/delete the loaded DBZ data to save memory
"""
for D in self:
if key in D.name or key in D.dataTime:
D.matrix = np.zeros((1,1))
def setImageFolder(self, folder=""):
# deprecated per the saveImages() function
if folder != "":
self.imageFolder=folder
for dbzPattern in self.list:
dbzPattern.imageFolder = folder
#dbzPattern.imagePath = folder + dbzPattern.name + '_'+dbzPattern.dataTime + ".png"
try:
dbzPattern.imagePath = folder + self.name + '_' + dbzPattern.dataTime + self.imageExtension
except AttributeError: # hack added 2013-09-27
self.imageExtension = defaultImageExtension
dbzPattern.imagePath = folder + self.name + '_' + dbzPattern.dataTime + self.imageExtension
def setImagePaths(self, *args, **kwargs):
""" alias"""
return self.setImageFolder(*args, **kwargs)
def setOutputFolder(self, folder):
"""
set image folders and paths without saving
"""
if not os.path.isdir(folder): # added 2014-03-06
os.makedirs(folder) # added 2014-03-06
self.outputFolder=folder
for dbzPattern in self.list:
dbzPattern.outputFolder = folder
#dbzPattern.outputPath = folder + dbzPattern.name + '_'+dbzPattern.dataTime + ".dat"
dbzPattern.outputPath = folder + dbzPattern.dataTime + self.dataExtension
def setTaiwanReliefFolder(self, folder=''):
if folder =="":
folder = self.taiwanReliefFolder
else:
self.taiwanReliefFolder = folder
for dbzpattern in self:
dbzpattern.coastDataPath = folder + "taiwanCoast.dat"
dbzpattern.relief100DataPath = folder + "relief100.dat"
dbzpattern.relief1000DataPath = folder + "relief1000.dat"
dbzpattern.relief2000DataPath = folder + "relief2000.dat"
dbzpattern.relief3000DataPath = folder + "relief3000.dat"
def setVmin(self, vmin=""): # DBZ.vmin and DBZ.vmax are colour parameters for image output
if vmin == "":
vmin = self.vmin
else:
self.vmin = vmin
for dbzPattern in self: # telling the plotting function the min and max value in each chart
dbzPattern.vmin = vmin
def setVmax(self, vmax=""):
if vmax == "":
vmax = self.vmax
else:
self.vmax = vmax
for dbzPattern in self:
dbzPattern.vmax = vmax
def saveImages(self, toLoad=False, flipud=False, drawCoast=False, verbose=False, dpi=200):
"""
note: here we set the imagePath's first (imageFolder+dataTime+ .png)
and then save the images to it
"""
ds1 = self # just a reminder self= a dbz data stream
try: # make it in case the folder does not exist yet
os.makedirs(ds1.imageFolder)
except OSError: # except if it's already there
pass
for dbzPattern in ds1.list:
dbzPattern.imagePath = ds1.imageFolder + ds1.name + dbzPattern.dataTime + '.png'
if toLoad:
dbzPattern.load()
if drawCoast:
dbzPattern.drawCoast()
if flipud==True:
dbzPattern.matrix = np.flipud(dbzPattern.matrix)
if verbose:
print dbzPattern.imagePath
#xxx = raw_input('press enter to continue:')
dbzPattern.saveImage(dpi=dpi)
def saveMatrices(self, verbose=False):
"""
note: here we set the imagePath's first (imageFolder+dataTime+ .png)
and then save the images to it
"""
ds1 = self # just a reminder self= a dbz data stream
try: # make it in case the folder does not exist yet
os.makedirs(ds1.outputFolder)
except OSError: # except if it's already there
pass
for dbzPattern in ds1.list:
dbzPattern.outputPath = ds1.outputFolder + ds1.name + dbzPattern.dataTime + '.dat'
if verbose:
print dbzPattern.imagePath
xxx = raw_input('press enter to continue:')
dbzPattern.saveMatrix()
###########################################################
#
# functions on streams
def listLoaded(self):
"""
return the list of loaded DBZ objects in the stream
essentially computing those with matrix!=0
"""
L = [v for v in self if (v.matrix**2).sum()!=0]
return L
def countLoaded(self):
"""
return the number of loaded DBZ objects in the stream
essentially computing those with matrix!=0
"""
return len([v for v in self if (v.matrix**2).sum()!=0])
def setMasks(self, lowerThreshold):
"""
reset the masks, masking those values lower than the new lowerThreshold
True=masked
2013-09-27
"""
for dbzpattern in self.list:
m = dbzpattern.matrix
m.mask = False
m.mask = (m < lowerThreshold)
def setFloor(self, lower):
"""
cut of those that are below lower and replace them as lower
"""
for d in self.list:
m = d.matrix
d.matrix = m + (m<lower)* (lower-m)
def setCommonMask(self, key=""):
"""
set the mask to the union of all (biggest mask, showing the general common (smallest) region)
"""
m = self.list[0].matrix.mask
try:
for d in self(key):
m += d.matrix.mask
for d in self(key):
d.matrix.mask = m
except:
print 'cannot construct common mask!'
def fix(dbzstream, key1='', threshold=0): #standard fix, codes from armor.objects2
print 'loading', dbzstream.name, 'with key', key1
dbzstream.load(key1)
print 'cutting the excess'
dbzstream.cutUnloaded()
print 'setting threshold', threshold
dbzstream.setThreshold(threshold)
dbzstream.setTaiwanReliefFolder()
dbzstream.setVmin()
dbzstream.setVmax()
def timeShift(self, days=0, secs=0, verbose=False):
"""
just change the dataTimes and names ; nothing else. the dataPaths aren't changed
2014-03-05
"""
for d in self:
T = d.datetime()
dt = datetime.timedelta(days + 1.*secs/86400)
T += dt
d.setDataTime(T)
d.name += "_timeShift_%ddays_%dsecs" % (days, secs)
if verbose:
print d.name, d.dataTime
###########################################################
#
# Tests and comparisons
def corr(self, ds2, verbose=False):
"""
returns a list of correlation of the streams
[(dataTime <str>, corr <float>),...]
"""
ds1 = self # alias
# 1. get the list of common dataTimes
dataTimeList1 = [v.dataTime for v in ds1.list]
dataTimeList2 = [v.dataTime for v in ds2.list]
dataTimeList = sorted(list(set(dataTimeList1).intersection(set(dataTimeList2))))
if verbose:
print dataTimeList
# 2. compute the correlations with the built in DBZ.corr() method
L = []
for T in dataTimeList:
a = ds1(T)[0]
b = ds2(T)[0]
L.append((T, a.corr(b)))
return L
def invariantMomentsCorr(self, ds2, comparison='distance', verbose=False):
'''
comparison = 'dot', 'correlation', 'distance' (euclidean distance)
'''
ds1 = self
from geometry import moments
# 1. get the list of common dataTimes
dataTimeList1 = [v.dataTime for v in ds1.list]
dataTimeList2 = [v.dataTime for v in ds2.list]
dataTimeList = sorted(list(set(dataTimeList1).intersection(set(dataTimeList2))))
if verbose:
print dataTimeList
# 2. compute the moments and compare them
L = []
for T in dataTimeList:
a = ds1(T)[0]
b = ds2(T)[0]
#a.invMom = a.invariantMoments()
#b.invMom = b.invariantMoments()
a.invariantMoments()
b.invariantMoments()
#debug
print '====== a.invMom, b.invMom: ======='
print a.invMom
print b.invMom
# end debug
if comparison == 'corr':
invarcorr = np.corrcoef(a.invMom, b.invMom)[0,1]
# correlation is not always good. use the vector dot product instead (?!)
# see the next line
print 'their correlation:', invarcorr
elif comparison == 'dot':
invarcorr = np.dot(a.invMom,b.invMom)/ (np.dot(a.invMom,a.invMom)*np.dot(b.invMom,b.invMom))**.5 #added 2013-09-28
print "their dot product:", invarcorr
else:
invarcorr = np.linalg.norm( np.array(a.invMom) - np.array(b.invMom))
print 'their euclidean distance:', invarcorr
L.append((T, invarcorr))
return L
def regionalAndGlobalInvariantMomentsCorr(self, ds2, N=4, verbose=False):
"""
analogous to Chen Sin Gam's averaging method From CWB
returns a list of scores of the given stream ds2 w.r.t. ds1=self
[(dataTime <str>, score <float>),...]
"""
import itertools
import math
ds1 = self
height, width = ds1[0].matrix.shape
h1 = height//N
w1 = width //N
i_splits = range(0, height, h1)
j_splits = range(0, width, w1)
splits = itertools.product(i_splits, j_splits)
#debug
#print 'splits - ', [v for v in splits]
#ds2 = ds2.intersect(ds1)
#ds1 = ds1.intersect(ds2)
dataTimes1 = sorted([v.dataTime for v in ds1])
dataTimes = sorted([v.dataTime for v in ds2 if v.dataTime in dataTimes1])
regionalMomentAverages = []
for T in dataTimes:
invarcorr = 0.
observation = ds1(T)[0] # one way or other; just for convenience
wrf = ds2(T)[0] # reminder to self: there should be/we assume there is only one in the list, so [0] works
# global moments corr
a = observation
b = wrf
a.invariantMoments()
b.invariantMoments()
#invarcorr = ma.corrcoef(a.invMom, b.invMom)[0,1]
# correlation is not always good. use the vector dot product instead (?!)
# see the next line
invarcorr = ma.dot(a.invMom,b.invMom)/ (ma.dot(a.invMom,a.invMom)/ma.dot(b.invMom,b.invMom))**.5 #added 2013-09-28
#if not isinstance(invarcorr, float):
#if (invarcorr==np.nan):
#if not (invarcorr>999 or invarcorr<=999): # if invarcorr!=nan
if math.isnan(invarcorr):
invarcorr = 0.
Count = 0
else:
Count = N
# local moments corr
#debug
print 'invarcorr:', invarcorr, '/',
for i,j in splits:
a1 = a.getWindow(i, j, h1, w1)
b1 = a.getWindow(i, j, h1, w1)
a1.invariantMoments()
b1.invariantMoments()
invarcorr_regional = ma.corrcoef(a1.invMom, b1.invMom)[0,1]
# debug
print invarcorr_regional,
#if not (invarcorr_regional==np.nan):
#if (invarcorr_regional>999 or invarcorr_regional<=999): # if invarcorr_regional==nan
if not math.isnan(invarcorr_regional):
invarcorr += invarcorr_regional
Count +=1
#debug
print invarcorr, ':', Count, '/',
invarcorr = 1. * invarcorr / Count # weighed average
print ':', invarcorr, '||',
regionalMomentAverages.append((T, invarcorr))
return regionalMomentAverages
def gaborFeaturesTest(self, ds2, verbose=False):
"""
use gabor filter to extract local scale and orientation features
"""
pass
def clusteringTest(self, ds2, verbose=False):
"""
comparison via clustering
"""
pass
def LeesTransformation(self, ds2, verbose=False):
"""
Professor Tim-Hau Lee's Idea - perform a normalising transformation
according to the first two moments
before comparisons
"""
ds1 = self
pass
################################################################################
DS = DBZstream
########################
# demo
a = DBZ('20120612.0200')
b = DBZ('20120612.0230')
c = DBZ('20120612.0300')
d = DBZ('20120612.0210')
e = DBZ('20120612.0240')
f = DBZ('20120612.0310')
#a.load()
#b.load()
#c.load()
#d.load()
#e.load()
#f.load()
#a.setThreshold(0)
#b.setThreshold(0)
#c.setThreshold(0)
#d.setThreshold(0)
#e.setThreshold(0)
#f.setThreshold(0)
ds1 = DBZstream(name="COMPREF",
imageFolder = '../labReports/20130827/COMPREF/',
outputFolder = '../labReports/20130827/COMPREF/')
"""
exit()
python
from armor import pattern
"""
try:
print externalHardDriveRoot
ds2 = DBZstream(dataFolder='%sdata/SOULIK/wrf_shue/' %externalHardDriveRoot,
lowerLeftCornerLatitudeLongitude=(17.7094,113.3272),
upperRightCornerLatitudeLongitude=(28.62909, 127.6353),
)
except:
print 'EXTERNAL HARD DRIVE %sdata/SOULIK/wrf_shue/' %externalHardDriveRoot, "NOT FOUND"
try:
ds2 = DBZstream(dataFolder='%sdata/SOULIK/wrf_shue/' %hardDriveRoot,
lowerLeftCornerLatitudeLongitude=(17.7094,113.3272),
upperRightCornerLatitudeLongitude=(28.62909, 127.6353),
)
print 'HARD DRIVE %sdata/SOULIK/wrf_shue/' %hardDriveRoot, "\nFOUND!!"
except:
print 'HARD DRIVE %sdata/SOULIK/wrf_shue/' %hardDriveRoot, "NOT FOUND"
try:
print externalHardDriveRoot2
ds2 = DBZstream(dataFolder='%sdata/SOULIK/wrf_shue/' %externalHardDriveRoot2,
lowerLeftCornerLatitudeLongitude=(17.7094,113.3272),
upperRightCornerLatitudeLongitude=(28.62909, 127.6353),
)
print 'EXTERNAL HARD DRIVE %sdata/SOULIK/wrf_shue/' %externalHardDriveRoot2, "\nFOUND!!"
except:
print 'EXTERNAL HARD DRIVE %sdata/SOULIK/wrf_shue/' %externalHardDriveRoot2, "NOT FOUND"
try:
ds3 = DBZstream(dataFolder='../data_simulation/20120611_12/', name="WRF",
lowerLeftCornerLatitudeLongitude=(17.7094,113.3272),
upperRightCornerLatitudeLongitude=(28.62909, 127.6353),
outputFolder = '../labReports/20130827/WRF/',
imageFolder = '../labReports/20130827/WRF/',
preload=False)
except:
print '../data_simulation/20120611_12/ - NOT FOUND'
"""
The following are constructed from data from mr. shue : https://mail.google.com/mail/u/0/?shva=1#search/azteque%40manysplendid.com/14070bb7d7aef48c
wd3
282x342
MaxLatF = 28.62909
MinLatF = 17.7094
MaxLonF = 127.6353
MinLonF = 113.3272
"""
try:
cx = DBZ(name='WRF20120612.0200', dataTime='20120612.0200',
dataPath= usbRoot + '/data_simulation/20120611_12/out_201206120200.txt',
lowerLeftCornerLatitudeLongitude= (17.7094, 113.3272),
upperRightCornerLatitudeLongitude= (28.62909,127.6353) ,
)
dx = DBZ(name='WRF20120612.0210', dataTime='20120612.0210',
dataPath= usbRoot + '/data_simulation/20120611_12/out_201206120210.txt',
lowerLeftCornerLatitudeLongitude= (17.7094, 113.3272),
upperRightCornerLatitudeLongitude= (28.62909,127.6353) ,
)
except:
print 'data not found! construction of cx and dx skipped'
| cc0-1.0 |
sumspr/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
jayflo/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
adamtiger/tensorflow | tensorflow/examples/learn/text_classification.py | 17 | 6649 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def estimator_spec_for_softmax_classification(
logits, labels, mode):
"""Returns EstimatorSpec instance for softmax classification."""
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def bag_of_words_model(features, labels, mode):
"""A bag-of-words model. Note it disregards the word order in the text."""
bow_column = tf.feature_column.categorical_column_with_identity(
WORDS_FEATURE, num_buckets=n_words)
bow_embedding_column = tf.feature_column.embedding_column(
bow_column, dimension=EMBEDDING_SIZE)
bow = tf.feature_column.input_layer(
features,
feature_columns=[bow_embedding_column])
logits = tf.layers.dense(bow, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def rnn_model(features, labels, mode):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for softmax
# classification over output classes.
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.Series(dbpedia.train.data[:,1])
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.Series(dbpedia.test.data[:,1])
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
# Subtract 1 because VocabularyProcessor outputs a word-id matrix where word
# ids start from 1 and 0 means 'no word'. But
# categorical_column_with_identity assumes 0-based count and uses -1 for
# missing word.
x_train -= 1
x_test -= 1
model_fn = bag_of_words_model
classifier = tf.estimator.Estimator(model_fn=model_fn)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
pytrainer/pytrainer | pytrainer/gui/windowmain.py | 1 | 97568 | # -*- coding: utf-8 -*-
#Copyright (C) Fiz Vazquez [email protected]
# Modified by dgranda
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import logging
import matplotlib
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkPixbuf
import dateutil.parser
from .SimpleGladeApp import SimpleBuilderApp
from .popupmenu import PopupMenu
from .aboutdialog import About
import pytrainer.record
from pytrainer.lib.date import Date, second2time
from pytrainer.lib.xmlUtils import XMLParser
#from pytrainer.lib.gpx import Gpx
from pytrainer.recordgraph import RecordGraph
from pytrainer.weekgraph import WeekGraph
from pytrainer.monthgraph import MonthGraph
from pytrainer.yeargraph import YearGraph
from pytrainer.totalgraph import TotalGraph
from pytrainer.heartrategraph import HeartRateGraph
from pytrainer.gui.drawGraph import DrawGraph
from pytrainer.gui.windowcalendar import WindowCalendar
from pytrainer.lib.listview import ListSearch
from pytrainer.lib.uc import UC
from pytrainer.core.activity import Activity
from pytrainer.lib.localization import gtk_str
from sqlalchemy import and_
class Main(SimpleBuilderApp):
def __init__(self, sport_service, data_path = None, parent = None, version = None, gpxDir = None):
self._sport_service = sport_service
self.version = version
self.parent = parent
self.pytrainer_main = parent
self.data_path = data_path
self.uc = UC()
SimpleBuilderApp.__init__(self, "pytrainer.ui")
self.popup = PopupMenu(data_path,self)
self.block = False
self.activeSport = None
self.gpxDir = gpxDir
self.record_list = None
self.laps = None
#Setup graph
self.grapher = DrawGraph(self, self.pytrainer_main)
self.y1_limits = None
self.y1_color = None
self.y1_linewidth = 1
# setup Search ListView
self.listsearch = ListSearch(sport_service, self)
self.aboutwindow = None
self.mapviewer = None
self.mapviewer_fs = None
self.waypointeditor = None
def new(self):
self.menublocking = 0
self.selected_view="day"
self.window1.set_title ("pytrainer %s" % self.version)
try:
width, height = self.pytrainer_main.profile.getValue("pytraining","window_size").split(',')
self.window1.resize(int(width), int(height))
except:
pass
self.record_list = []
#create the columns for the listdayrecord
columns = [{'name':_("id"), 'visible':False},{'name':_("Start"), }, {'name':_("Sport")},{'name':self.uc.unit_distance}]
self.create_treeview(self.recordTreeView,columns)
#create the columns for the listarea
# different codings for mean see eg http://de.wikipedia.org/wiki/%C3%98#Kodierung
columns=[ {'name':_("id"), 'visible':False},
{'name':_("Title")},
{'name':_("Date")},
{'name':_("Distance"), 'xalign':1.0, 'format_float':'%.2f', 'quantity': 'distance'},
{'name':_("Sport")},
{'name':_("Time"), 'xalign':1.0, 'format_duration':True},
{'name':_(u"\u2300 HR"), 'xalign':1.0},
{'name':_("Max HR"), 'xalign':1.0},
{'name':_(u"\u2300 Speed"), 'xalign':1.0, 'format_float':'%.1f', 'quantity': 'speed'},
{'name':_("Calories"), 'xalign':1.0}
]
self.create_treeview(self.allRecordTreeView,columns)
self.create_menulist(columns)
#create the columns for the waypoints treeview
columns=[{'name':_("id"), 'visible':False},{'name':_("Waypoint")}]
self.create_treeview(self.waypointTreeView,columns)
#create the columns for the athlete history treeview
columns=[ {'name':_("id"), 'visible':False},
{'name':_("Date")},
{'name':_("Weight"), 'xalign':1.0, 'quantity':'weight', 'format_float':'%.1f'},
{'name':_("Body Fat %"), 'xalign':1.0, 'quantity':'bodyfat', 'format_float':'%.1f'},
{'name':_("Resting HR"), 'xalign':1.0},
{'name':_("Max HR"), 'xalign':1.0}
]
self.create_treeview(self.athleteTreeView,columns)
#create the columns for the stats treeview
columns=[ {'name':_("id"), 'visible':False},
{'name':_("Sport")},
{'name':_("Records"), 'xalign':1.0},
{'name':_("Total duration"), 'xalign':1.0, 'format_duration':True},
{'name':_("Total distance"), 'xalign':1.0, 'format_float':'%.1f', 'quantity':'distance'},
{'name':_("Avg speed"), 'format_float':'%.2f', 'quantity':'maxspeed', 'xalign':1.0},
{'name':_("Max speed"), 'format_float':'%.2f', 'quantity':'maxspeed', 'xalign':1.0},
{'name':_("Avg HR"), 'xalign':1.0},
{'name':_("Max HR"), 'xalign':1.0},
{'name':_("Max duration"), 'xalign':1.0, 'format_duration':True},
{'name':_("Max distance"), 'xalign':1.0, 'format_float':'%.1f', 'quantity':'distance'},
]
self.create_treeview(self.statsTreeView,columns)
#create the columns for the laps treeview
columns=[
{'name':_("Lap")},
{'name':_("Trigger"), 'xalign':0, 'pixbuf':True},
{'name':_("Distance"), 'xalign':1.0, 'format_float':'%.2f', 'quantity':'distance'},
{'name':_("Time"), 'xalign':1.0, 'format_duration':True},
{'name':_("Avg speed"), 'format_float':'%.2f', 'quantity':'speed'},
{'name':_("Max speed"), 'format_float':'%.2f', 'quantity':'speed'},
{'name':_("Avg pace"), 'xalign':1.0, 'quantity':'pace'},
{'name':_("Max pace"), 'xalign':1.0, 'quantity':'pace'},
{'name':_("Avg HR"), 'xalign':1.0},
{'name':_("Max HR"), 'xalign':1.0},
{'name':_("Calories"), 'xalign':1.0},
{'name':_("Intensity"), 'visible':False},
{'name':_("Comments"), 'xalign':0.0},
]
self.create_treeview(self.lapsTreeView,columns)
#create the columns for the projected times treeview
columns=[
{'name':_("id"), 'visible':False},
{'name':_("Race"), 'xalign':1.0},
{'name':_("Distance"), 'xalign':1.0, 'format_float':'%.2f', 'quantity':'distance'},
{'name':_("Time"), 'xalign':1.0, 'format_duration':True},
]
self.create_treeview(self.analyticsTreeView,columns,sortable=False)
#create the columns for the rank treeview
columns=[
{'name':_("id"), 'visible':False},
{'name':_("Rank"), 'visible':True},
{'name':_("Date"), 'xalign':1.0},
{'name':_("Distance"), 'xalign':1.0, 'format_float':'%.2f', 'quantity':'distance'},
{'name':_("Time"), 'xalign':1.0, 'format_duration':True},
{'name':_("Speed"), 'format_float':'%.2f', 'quantity':'speed'},
{'name':_("Pace"), 'format_float':'%.2f', 'quantity':'pace'},
{'name':_("Color"), 'visible':False},
]
self.create_treeview(self.rankingTreeView,columns,sortable=False)
self.fileconf = self.pytrainer_main.profile.confdir+"/listviewmenu.xml"
if not os.path.isfile(self.fileconf):
self._createXmlListView(self.fileconf)
self.showAllRecordTreeViewColumns()
self.allRecordTreeView.set_search_column(1)
self.notebook.set_current_page(1)
#Set correct map viewer
if self.pytrainer_main.profile.getValue("pytraining","default_viewer") == "1":
self.radiobuttonOSM.set_active(1)
else:
self.radiobuttonGMap.set_active(1)
self.comboMapLineType.set_active(0)
def _float_or(self, value, default):
'''Function to parse and return a float, or the default if the parsing fails'''
try:
result = float(value)
except Exception as e:
#print type(e)
#print e
result = default
return result
def setup(self):
logging.debug(">>")
self.createGraphs()
self.createMap()
page = self.notebook.get_current_page()
self.on_page_change(None,None,page)
logging.debug("<<")
def _createXmlListView(self,file):
menufile = XMLParser(file)
savedOptions = []
savedOptions.append(("date","True"))
savedOptions.append(("distance","True"))
savedOptions.append(("average","False"))
savedOptions.append(("title","True"))
savedOptions.append(("sport","True"))
savedOptions.append(("id_record","False"))
savedOptions.append(("time","False"))
savedOptions.append(("beats","False"))
savedOptions.append(("maxbeats","False"))
savedOptions.append(("calories","False"))
menufile.createXMLFile("listviewmenu",savedOptions)
def removeImportPlugin(self, plugin):
for widget in self.menuitem1_menu:
if widget.get_name() == plugin[1]:
self.menuitem1_menu.remove(widget)
def removeExtension(self, extension):
for widget in self.recordbuttons_hbox:
if widget.get_name() == extension[1]:
logging.debug("Removing extension: %s ", extension[0])
self.recordbuttons_hbox.remove(widget)
def addExtension(self,extension):
#txtbutton,extensioncode,extensiontype = extension
button = Gtk.Button(extension[0])
button.set_name(extension[1])
button.connect("button_press_event", self.runExtension, extension)
self.recordbuttons_hbox.pack_start(button,False,False,0)
self.recordbuttons_hbox.show_all()
def runExtension(self,widget,widget2,extension):
#print extension
txtbutton,extensioncode,extensiontype = extension
id = None
if extensiontype=="record":
selected,iter = self.recordTreeView.get_selection().get_selected()
id = selected.get_value(iter,0)
self.parent.runExtension(extension,id)
def createGraphs(self):
logging.debug(">>")
self.drawarearecord = RecordGraph(self.record_graph_vbox, self.window1, self.record_combovalue, self.record_combovalue2, self.btnShowLaps, self.tableConfigY1, pytrainer_main=self.pytrainer_main)
self.drawareaheartrate = HeartRateGraph([self.heartrate_vbox, self.heartrate_vbox2, self.heartrate_vbox3], self.window1, pytrainer_main=self.pytrainer_main)
self.day_vbox.hide()
sports = self._sport_service.get_all_sports()
self.drawareaweek = WeekGraph(sports, self.weekview, self.window1, self.week_combovalue, self.week_combovalue2, self.pytrainer_main)
self.drawareamonth = MonthGraph(sports, self.month_vbox, self.window1, self.month_combovalue,self.month_combovalue2, self.pytrainer_main)
self.drawareayear = YearGraph(sports, self.year_vbox, self.window1, self.year_combovalue,self.year_combovalue2, self.pytrainer_main)
self.drawareatotal = TotalGraph(sports, self.total_vbox, self.window1, self.total_combovalue,self.total_combovalue2, self.pytrainer_main)
logging.debug("<<")
def createMap(self):
logging.debug(">>")
if not self.mapviewer and not self.mapviewer_fs and not self.waypointeditor:
try:
from pytrainer.extensions.mapviewer import MapViewer
from pytrainer.extensions.waypointeditor import WaypointEditor
self.mapviewer = MapViewer(self.data_path, pytrainer_main=self.parent, box=self.map_vbox)
self.mapviewer_fs = MapViewer(self.data_path, pytrainer_main=self.parent, box=self.map_vbox_old)
self.waypointeditor = WaypointEditor(self.data_path, self.waypointvbox,
self.pytrainer_main.waypoint,
parent=self.pytrainer_main)
except ImportError:
logging.error("Webkit not found, map functionality not available")
for container in self.map_vbox, self.map_vbox_old, self.waypointvbox:
message = Gtk.Label(_("Webkit not found, map functionality not available"))
message.set_selectable(True)
container.foreach(lambda widget:container.remove(widget))
container.add(message)
container.show_all()
logging.debug("<<")
def updateSportList(self,listSport):
logging.debug(">>")
liststore = self.sportlist.get_model()
firstEntry = _("All Sports")
liststore.clear() #Delete all items
#Re-add "All Sports"
liststore.append([firstEntry])
#Re-add all sports in listSport
for sport in listSport:
liststore.append([sport.name])
self.sportlist.set_active(0)
logging.debug("<<")
def render_duration(self, column, cell, model, iter, notif):
orig = cell.get_property('text')
if not ':' in orig:
h,m,s = second2time(int(orig))
new = '%d:%02d:%02d' % (h,m,s)
else:
new = orig
if orig[:4] == ' 0:0':
new = orig[4:]
elif orig[:3] == ' 0:':
new = orig[3:]
if len(new)>5:
hours = int(new[:-6])
days = _("d")
if hours>23:
new = "%d %s %02d:%s" % (hours / 24, days, hours%24 ,new[-5:])
cell.set_property('text', new)
def render_float(self, column, cell, model, iter, data):
_format, _quantity, _idx = data
_val = model.get_value(iter, _idx)
_val = self.uc.sys2usr(_quantity, _val)
_val_str = _format % float(_val)
cell.set_property('text', _val_str)
def create_treeview(self,treeview,columns,sortable=True):
for column_index, column_dict in enumerate(columns):
if 'pixbuf' in column_dict:
renderer = Gtk.CellRendererPixbuf()
else:
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(column_dict['name'])
column.pack_start(renderer, False)
if 'pixbuf' in column_dict:
column.add_attribute(renderer, 'pixbuf', column_index)
else:
column.add_attribute(renderer, 'text', column_index)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
column.set_resizable(True)
if 'xalign' in column_dict:
renderer.set_property('xalign', column_dict['xalign'])
if 'visible' in column_dict:
column.set_visible(column_dict['visible'])
if 'format_float' in column_dict:
column.set_cell_data_func(renderer, self.render_float, [column_dict['format_float'], column_dict['quantity'], column_index])
if 'format_duration' in column_dict and column_dict['format_duration']:
column.set_cell_data_func(renderer, self.render_duration)
if sortable:
column.set_sort_column_id(column_index)
treeview.append_column(column)
def actualize_recordview(self,activity):
logging.debug(">>")
if activity.id is None:
#Blank out fields
self.record_distance.set_text("")
self.record_upositive.set_text("")
self.record_unegative.set_text("")
self.record_average.set_text("")
self.record_maxspeed.set_text("")
self.record_pace.set_text("")
self.record_maxpace.set_text("")
self.record_sport.set_text("")
self.record_date.set_text("")
self.record_time.set_text("")
self.record_duration.set_text("")
#self.record_minute.set_text("")
#self.record_second.set_text("")
self.record_calories.set_text("")
self.record_title.set_text("")
self.label_record_equipment.set_text("")
self.frame_laps.hide()
com_buffer = self.record_comments.get_buffer()
start,end = com_buffer.get_bounds()
com_buffer.set_text("")
#Move to main record page and grey out
self.recordview.set_current_page(0)
self.recordview.set_sensitive(0)
logging.debug("<<")
return
#Set the units for the activity results, e.g. km, km/h etc
self.r_distance_unit.set_text(self.uc.unit_distance)
self.r_speed_unit.set_text(self.uc.unit_speed)
self.r_maxspeed_unit.set_text(self.uc.unit_speed)
self.r_pace_unit.set_text(self.uc.unit_pace)
self.r_maxpace_unit.set_text(self.uc.unit_pace)
self.r_ascent_unit.set_text(self.uc.unit_height)
self.r_descent_unit.set_text(self.uc.unit_height)
if activity.has_data:
self.recordview.set_sensitive(1)
dateTime = activity.date_time
recordDateTime = dateTime.strftime("%Y-%m-%d %H:%M:%S")
recordDate = dateTime.strftime("%x")
recordTime = dateTime.strftime("%X")
recordDateTimeOffset = dateTime.strftime("%z")
self.record_distance.set_text(activity.get_value_f('distance', "%0.2f"))
self.record_upositive.set_text(activity.get_value_f('upositive', "%0.2f"))
self.record_unegative.set_text(activity.get_value_f('unegative', "%0.2f"))
self.record_average.set_text(activity.get_value_f('average', "%0.2f"))
self.record_maxspeed.set_text(activity.get_value_f('maxspeed', "%0.2f"))
self.record_pace.set_text(activity.get_value_f('pace', "%s"))
self.record_maxpace.set_text(activity.get_value_f('maxpace', "%s"))
self.record_sport.set_text(activity.sport_name)
self.record_date.set_text(recordDate)
self.record_time.set_text(recordTime)
self.record_duration.set_text(activity.get_value_f('time', '%s'))
self.record_calories.set_text(activity.get_value_f('calories', "%0.0f"))
if activity.title:
self.record_title.set_text(activity.title)
else:
self.record_title.set_text('')
hrun,mrun,srun = second2time(activity.duration)
hpause,mpause,spause = second2time(activity.time_pause)
self.record_runrest.set_text("%02d:%02d:%02d / %02d:%02d:%02d" %(hrun,mrun,srun,hpause,mpause,spause))
buffer = self.record_comments.get_buffer()
if activity.comments:
buffer.set_text(activity.comments)
else:
buffer.set_text('')
if len(activity.equipment) > 0:
equipment_text = ", ".join([item.description for item in activity.equipment])
self.label_record_equipment.set_text(equipment_text)
else:
self.label_record_equipment.set_markup("<i>None</i>")
if len(activity.Laps)>1:
store = Gtk.ListStore(
GObject.TYPE_INT,
GdkPixbuf.Pixbuf,
GObject.TYPE_FLOAT,
GObject.TYPE_STRING,
GObject.TYPE_FLOAT,
GObject.TYPE_FLOAT,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_INT,
GObject.TYPE_INT,
GObject.TYPE_INT,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
)
for lap in activity.Laps:
t = lap.duration
m = lap.distance
m = self.uc.speed(m)
s = m / float(t) * 3.6
max_speed = lap.max_speed * 3.6
if s > 0:
pace = "%d:%02d" %((3600/s)/60,(3600/s)%60)
if max_speed >0:
max_pace = "%d:%02d" %((3600/max_speed)/60,(3600/max_speed)%60)
else:
max_pace = "0:00"
else:
pace = "0:00"
max_pace = "0:00"
color = {
'active' : '#000000',
'rest' : '#808080',
'resting' : '#808080',
}
pic = GdkPixbuf.Pixbuf.new_from_file(self.data_path+"glade/trigger_%s.png" % lap.laptrigger)
iter = store.append()
store.set(iter,
0, lap.lap_number + 1,
1, pic,
2, m/1000,
3, str(int(float(t))),
4, s,
5, max_speed,
6, pace,
7, max_pace,
8, lap.avg_hr if lap.avg_hr else 0,
9, lap.max_hr if lap.max_hr else 0,
10, lap.calories,
11, color[lap.intensity],
12, '' if not lap.comments else (lap.comments if len(lap.comments)<40 else "%s..." % lap.comments[:40]),
)
self.lapsTreeView.set_model(store)
self.lapsTreeView.set_rules_hint(True)
# Use grey color for "rest" laps
for c in self.lapsTreeView.get_columns():
for cr in c.get_cells():
if type(cr)==Gtk.CellRendererText:
cr.set_property('foreground', 'gray')
def edited_cb(cell, path, new_text, data):
liststore, activity = data
liststore[path][12] = new_text
activity.Laps[int(path)].comments = gtk_str(new_text)
self.pytrainer_main.ddbb.session.commit()
def show_tooltip(widget, x, y, keyboard_mode, tooltip, user_param1):
path = self.lapsTreeView.get_path_at_pos(x,y-20)
if not path: return False
if path[1] != self.lapsTreeView.get_columns()[12]: return False
comments = user_param1[1].laps[path[0][0]]['comments']
if comments and len(comments)>40:
tooltip.set_text(comments)
return True
return False
if getattr(self.lapsTreeView, 'tooltip_handler_id', None):
self.lapsTreeView.disconnect(self.lapsTreeView.tooltip_handler_id)
self.lapsTreeView.tooltip_handler_id = self.lapsTreeView.connect('query-tooltip', show_tooltip, (store, activity))
i = 0
for cr in self.lapsTreeView.get_columns()[12].get_cells():
cr.set_property('editable', True)
if getattr(self, 'lapview_handler_id', None):
cr.disconnect(self.lapview_handler_id)
self.lapview_handler_id = cr.connect('edited', edited_cb, (store, activity))
tooltip = Gtk.Tooltip()
tooltip.set_text(activity.laps[i]['comments'])
# FIXME Use TreePath to set tooltip
#self.lapsTreeView.set_tooltip_cell(tooltip, i, self.lapsTreeView.get_columns()[12], cr)
self.lapsTreeView.set_tooltip_cell(tooltip, None, self.lapsTreeView.get_columns()[12], cr)
i += 1
self.frame_laps.show()
else:
self.frame_laps.hide()
else:
self.recordview.set_current_page(0)
self.recordview.set_sensitive(0)
logging.debug("<<")
def actualize_recordgraph(self,activity):
logging.debug(">>")
self.record_list = activity.tracks
self.laps = activity.laps
if activity.gpx_file is not None:
if not self.pytrainer_main.startup_options.newgraph:
logging.debug("Using the original graphing")
logging.debug("Activity has GPX data")
#Show drop down boxes
self.hbox30.show()
#Hide new graph details
self.graph_data_hbox.hide()
self.hboxGraphOptions.hide()
#Enable graph
self.record_vbox.set_sensitive(1)
self.drawarearecord.drawgraph(self.record_list,self.laps)
else:
#Still just test code....
logging.debug("Using the new TEST graphing approach")
#Hide current drop down boxes
self.hbox30.hide()
self.graph_data_hbox.hide()
#Enable graph
self.record_vbox.set_sensitive(1)
#Create a frame showing data available for graphing
#Remove existing frames
for child in self.graph_data_hbox.get_children():
if isinstance(child, Gtk.Frame):
self.graph_data_hbox.remove(child)
#Build frames and vboxs to hold checkbuttons
xFrame = Gtk.Frame(label=_("Show on X Axis"))
y1Frame = Gtk.Frame(label=_("Show on Y1 Axis"))
y2Frame = Gtk.Frame(label=_("Show on Y2 Axis"))
limitsFrame = Gtk.Frame(label=_("Axis Limits"))
xvbox = Gtk.VBox()
y1box = Gtk.Table()
y2box = Gtk.Table()
limitsbox = Gtk.Table()
#Populate X axis data
#Create x axis items
xdistancebutton = Gtk.RadioButton(label=_("Distance"))
xtimebutton = Gtk.RadioButton(group=xdistancebutton, label=_("Time"))
xlapsbutton = Gtk.CheckButton(label=_("Laps"))
y1gridbutton = Gtk.CheckButton(label=_("Left Axis Grid"))
y2gridbutton = Gtk.CheckButton(label=_("Right Axis Grid"))
xgridbutton = Gtk.CheckButton(label=_("X Axis Grid"))
#Set state of buttons
if activity.x_axis == "distance":
xdistancebutton.set_active(True)
elif activity.x_axis == "time":
xtimebutton.set_active(True)
xlapsbutton.set_active(activity.show_laps)
y1gridbutton.set_active(activity.y1_grid)
y2gridbutton.set_active(activity.y2_grid)
xgridbutton.set_active(activity.x_grid)
#Connect handlers to buttons
xdistancebutton.connect("toggled", self.on_xaxischange, "distance", activity)
xtimebutton.connect("toggled", self.on_xaxischange, "time", activity)
xlapsbutton.connect("toggled", self.on_xlapschange, activity)
y1gridbutton.connect("toggled", self.on_gridchange, "y1", activity)
y2gridbutton.connect("toggled", self.on_gridchange, "y2", activity)
xgridbutton.connect("toggled", self.on_gridchange, "x", activity)
#Add buttons to frame
xvbox.pack_start(xdistancebutton, False, True, 0)
xvbox.pack_start(xtimebutton, False, True, 0)
xvbox.pack_start(xlapsbutton, False, True, 0)
xvbox.pack_start(y1gridbutton, False, True, 0)
xvbox.pack_start(y2gridbutton, False, True, 0)
xvbox.pack_start(xgridbutton, False, True, 0)
xFrame.add(xvbox)
#Populate axis limits frame
#TODO Need to change these to editable objects and redraw graphs if changed....
#Create labels etc
minlabel = Gtk.Label(label="<small>Min</small>")
minlabel.set_use_markup(True)
maxlabel = Gtk.Label(label="<small>Max</small>")
maxlabel.set_use_markup(True)
xlimlabel = Gtk.Label(label="X")
limits = {}
xminlabel = Gtk.Entry(max_length=10)
xmaxlabel = Gtk.Entry(max_length=10)
limits['xminlabel'] = xminlabel
limits['xmaxlabel'] = xmaxlabel
xminlabel.set_width_chars(5)
xminlabel.set_alignment(1.0)
xmaxlabel.set_width_chars(5)
xmaxlabel.set_alignment(1.0)
y1limlabel = Gtk.Label(label="Y1")
y1minlabel = Gtk.Entry(max_length=10)
y1maxlabel = Gtk.Entry(max_length=10)
limits['y1minlabel'] = y1minlabel
limits['y1maxlabel'] = y1maxlabel
y1minlabel.set_width_chars(5)
y1minlabel.set_alignment(1.0)
y1maxlabel.set_width_chars(5)
y1maxlabel.set_alignment(1.0)
y2limlabel = Gtk.Label(label="Y2")
y2minlabel = Gtk.Entry(max_length=10)
y2maxlabel = Gtk.Entry(max_length=10)
limits['y2minlabel'] = y2minlabel
limits['y2maxlabel'] = y2maxlabel
y2minlabel.set_width_chars(5)
y2minlabel.set_alignment(1.0)
y2maxlabel.set_width_chars(5)
y2maxlabel.set_alignment(1.0)
resetbutton = Gtk.Button(_('Reset Limits'))
resetbutton.connect("clicked", self.on_setlimits, activity, True, None)
setbutton = Gtk.Button(_('Set Limits'))
setbutton.connect("clicked", self.on_setlimits, activity, False, limits)
#Add labels etc to table
limitsbox.attach(minlabel, 1, 2, 0, 1, yoptions=Gtk.AttachOptions.SHRINK)
limitsbox.attach(maxlabel, 2, 3, 0, 1, yoptions=Gtk.AttachOptions.SHRINK)
limitsbox.attach(xlimlabel, 0, 1, 1, 2, yoptions=Gtk.AttachOptions.SHRINK)
limitsbox.attach(xminlabel, 1, 2, 1, 2, yoptions=Gtk.AttachOptions.SHRINK, xpadding=5)
limitsbox.attach(xmaxlabel, 2, 3, 1, 2, yoptions=Gtk.AttachOptions.SHRINK, xpadding=5)
limitsbox.attach(y1limlabel, 0, 1, 2, 3, yoptions=Gtk.AttachOptions.SHRINK)
limitsbox.attach(y1minlabel, 1, 2, 2, 3, yoptions=Gtk.AttachOptions.SHRINK, xpadding=5)
limitsbox.attach(y1maxlabel, 2, 3, 2, 3, yoptions=Gtk.AttachOptions.SHRINK, xpadding=5)
limitsbox.attach(y2limlabel, 0, 1, 3, 4, yoptions=Gtk.AttachOptions.SHRINK)
limitsbox.attach(y2minlabel, 1, 2, 3, 4, yoptions=Gtk.AttachOptions.SHRINK, xpadding=5)
limitsbox.attach(y2maxlabel, 2, 3, 3, 4, yoptions=Gtk.AttachOptions.SHRINK, xpadding=5)
limitsbox.attach(setbutton, 0, 3, 4, 5, yoptions=Gtk.AttachOptions.SHRINK)
limitsbox.attach(resetbutton, 0, 3, 5, 6, yoptions=Gtk.AttachOptions.SHRINK)
limitsFrame.add(limitsbox)
row = 0
if activity.x_axis == "distance":
data = activity.distance_data
elif activity.x_axis == "time":
data = activity.time_data
else:
logging.error("x axis is unknown")
#Populate Y axis data
for graphdata in sorted(data.keys()):
#First Y axis...
#Create button
y1button = Gtk.CheckButton(label=data[graphdata].title)
#Make button active if this data is to be displayed...
y1button.set_active(data[graphdata].show_on_y1)
#Connect handler for toggle state changes
y1button.connect("toggled", self.on_y1change, y1box, graphdata, activity)
#Attach button to container
y1box.attach(y1button, 0, 1, row, row+1, xoptions=Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL)
if data[graphdata].linecolor is not None:
#Create a color choser
y1color = Gtk.ColorButton()
#Set color to current activity color
_color = Gdk.color_parse(data[graphdata].linecolor)
y1color.set_color(_color)
#Connect handler for color state changes
y1color.connect("color-set", self.on_y1colorchange, y1box, graphdata, activity)
#Attach to container
y1box.attach(y1color, 1, 2, row, row+1)
else:
blanklabel = Gtk.Label(label="")
y1box.attach(blanklabel, 1, 2, row, row+1)
#Second Y axis
y2button = Gtk.CheckButton(label=data[graphdata].title)
y2button.set_active(data[graphdata].show_on_y2)
y2button.connect("toggled", self.on_y2change, y2box, graphdata, activity)
y2box.attach(y2button, 0, 1, row, row+1, xoptions=Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL)
if data[graphdata].y2linecolor is not None:
y2color = Gtk.ColorButton()
_color = Gdk.color_parse(data[graphdata].y2linecolor)
y2color.set_color(_color)
y2color.connect("color-set", self.on_y2colorchange, y2box, graphdata, activity)
#Attach to container
y2box.attach(y2color, 1, 2, row, row+1)
else:
blanklabel = Gtk.Label(label="")
y2box.attach(blanklabel, 1, 2, row, row+1)
row += 1
y1Frame.add(y1box)
y2Frame.add(y2box)
self.graph_data_hbox.pack_start(xFrame, expand=False, fill=False, padding=5)
self.graph_data_hbox.pack_start(y1Frame, expand=False, fill=False, padding=5)
self.graph_data_hbox.pack_start(y2Frame, expand=False, fill=False, padding=5)
self.graph_data_hbox.pack_start(limitsFrame, expand=False, fill=True, padding=5)
#self.graph_data_hbox.show_all()
self.hboxGraphOptions.show_all()
act = self.grapher.drawActivityGraph(activity=activity, box=self.record_graph_vbox)
if act.x_limits_u[0] is not None:
xmin, xmax = act.x_limits_u
else:
xmin, xmax = act.x_limits
if act.y1_limits_u[0] is not None:
y1min, y1max = act.y1_limits_u
else:
y1min, y1max = act.y1_limits
if act.y2_limits_u[0] is not None:
y2min, y2max = act.y2_limits_u
else:
y2min, y2max = act.y2_limits
#print y1min, y1max, y2min, y2max
if xmin is not None and xmax is not None:
xminlabel.set_text(str(xmin))
xmaxlabel.set_text(str(xmax))
if y1min is not None and y1max is not None:
y1minlabel.set_text(str(y1min))
y1maxlabel.set_text(str(y1max))
if y2min is not None and y2max is not None:
y2minlabel.set_text(str(y2min))
y2maxlabel.set_text(str(y2max))
#Default to showing options
self.buttonGraphShowOptions.hide()
self.scrolledwindowGraphOptions.show()
self.buttonGraphHideOptions.show()
else:
logging.debug("Activity has no GPX data")
#Show drop down boxes
self.hbox30.show()
#Hide new graph details
self.graph_data_hbox.hide()
self.hboxGraphOptions.hide()
#Remove graph
vboxChildren = self.record_graph_vbox.get_children()
logging.debug('Vbox has %d children %s', len(vboxChildren), vboxChildren)
# ToDo: check why vertical container is shared
for child in vboxChildren:
#Remove all FigureCanvasGTK and NavigationToolbar2GTKAgg to stop double ups of graphs
if isinstance(child, matplotlib.backends.backend_gtk3agg.FigureCanvasGTK3Agg) or isinstance(child, matplotlib.backends.backend_gtk3.NavigationToolbar2GTK3):
logging.debug('Removing child: %s', child)
self.record_graph_vbox.remove(child)
self.record_vbox.set_sensitive(0)
logging.debug("<<")
def actualize_heartrategraph(self,activity):
logging.debug(">>")
if activity.tracks is not None and len(activity.tracks)>0:
self.heartrate_vbox_.set_sensitive(1)
self.drawareaheartrate.drawgraph(activity.tracks)
else:
self.heartrate_vbox_.set_sensitive(0)
logging.debug("<<")
def actualize_hrview(self,activity):
logging.debug(">>")
zones = self.pytrainer_main.profile.getZones()
record_list = activity.tracks
is_karvonen_method = self.pytrainer_main.profile.getValue("pytraining","prf_hrzones_karvonen")
if record_list is not None and len(record_list)>0:
record_list=record_list[0]
self.record_zone1.set_text("%s-%s" %(zones[4][0],zones[4][1]))
self.record_zone2.set_text("%s-%s" %(zones[3][0],zones[3][1]))
self.record_zone3.set_text("%s-%s" %(zones[2][0],zones[2][1]))
self.record_zone4.set_text("%s-%s" %(zones[1][0],zones[1][1]))
self.record_zone5.set_text("%s-%s" %(zones[0][0],zones[0][1]))
beats = activity.beats
maxbeats = activity.maxbeats
self.record_beats.set_text("%0.0f" %beats)
self.record_maxbeats.set_text("%0.0f" %maxbeats)
self.record_calories2.set_text("%0.0f" %activity.calories)
if is_karvonen_method=="True":
self.record_zonesmethod.set_text(_("Karvonen method"))
else:
self.record_zonesmethod.set_text(_("Percentages method"))
#else:
# self.recordview.set_sensitive(0)
logging.debug("<<")
def actualize_analytics(self,activity):
logging.debug(">>")
record_list = activity.tracks
def project(d,a):
# avoid divide by zero if distance is 0
try:
return int(a.duration * (d / a.distance)**1.06)
except ZeroDivisionError:
return 0
DISTANCES = {
.8 : _("800 m"),
1.5 : _("1500 m"),
5 : _("5K"),
7 : _("7K"),
10 : _("10K"),
21.1 : _("Half marathon"),
42.195 : _("Marathon"),
100 : _("100K"),
}
projected_store = Gtk.ListStore(
GObject.TYPE_STRING, #id
GObject.TYPE_STRING, #name
GObject.TYPE_STRING, #distance
GObject.TYPE_STRING, #time
)
for d in sorted(DISTANCES.keys()):
v = DISTANCES[d]
iter = projected_store.append()
projected_store.set (
iter,
0, str(d),
1, v,
2, str(d),
3, str(project(d, activity)),
)
self.analyticsTreeView.set_model(projected_store)
self.analytics_activity = activity
self.on_change_rank_percentage()
logging.debug("<<")
def on_change_rank_percentage(self, widget=None):
activity = self.analytics_activity
if widget:
percentage = widget.get_value() / 100
else:
percentage = .05
records = self.pytrainer_main.ddbb.session.query(Activity).filter(and_(Activity.distance.between(activity.distance * (1-percentage), activity.distance * (1+percentage)), Activity.sport == activity.sport)).all()
count = 1
for r in records:
if r.average > activity.average:
count += 1
import numpy
speeds = [r.average for r in records]
self.label_ranking_range.set_text("%.2f - %.2f %s" % (self.uc.distance(activity.distance * (1-percentage)), self.uc.distance(activity.distance * (1+percentage)), self.uc.unit_distance))
self.label_ranking_rank.set_text("%s/%s" % (count, len(records)))
self.label_ranking_avg.set_text("%.2f %s" % (self.uc.speed(numpy.average(speeds)), self.uc.unit_speed))
self.label_ranking_speed.set_text("%.2f %s" % (self.uc.speed(activity.average), self.uc.unit_speed))
self.label_ranking_stddev.set_text("%.4f" % (self.uc.speed(numpy.std(speeds))))
self.label_ranking_dev.set_text("%+.2fσ" % ((activity.average - numpy.average(speeds)) / numpy.std(speeds)))
rank_store = Gtk.ListStore(
GObject.TYPE_INT, #id
GObject.TYPE_INT, #rank
GObject.TYPE_STRING, #date
GObject.TYPE_FLOAT, #distance
GObject.TYPE_INT, #time
GObject.TYPE_FLOAT, #speed
GObject.TYPE_FLOAT, #pace
GObject.TYPE_STRING, #color
)
length = len(records)
rec_set = [0,]
for r in range(max(count-3, 1) if count>1 else count, min(count+3, length-2) if count < length else count):
rec_set.append(r)
if length>1 and count!=length:
rec_set.append(-1)
for i in rec_set:
r = records[i]
iter = rank_store.append()
rank = length if i==-1 else i+1
rank_store.set (
iter,
0, i,
1, rank,
2, str(r.date),
3, r.distance,
4, r.duration,
5, r.average,
6, r.pace,
7, '#3AA142' if rank==count else '#000000',
)
for c in self.rankingTreeView.get_columns()[:-1]:
for cr in c.get_cells():
if type(cr)==Gtk.CellRendererText:
cr.set_property('foreground', 'gray')
self.rankingTreeView.set_model(rank_store)
def actualize_dayview(self, date):
logging.debug(">>")
self.d_distance_unit.set_text(self.uc.unit_distance)
self.d_speed_unit.set_text(self.uc.unit_speed)
self.d_maxspeed_unit.set_text(self.uc.unit_speed)
self.d_pace_unit.set_text(self.uc.unit_pace)
self.d_maxpace_unit.set_text(self.uc.unit_pace)
if self.activeSport:
sport = self._sport_service.get_sport_by_name(self.activeSport)
else:
sport = None
activity_list = self.pytrainer_main.activitypool.get_activities_for_day(date, sport=sport)
tbeats, distance, calories, timeinseconds, beats, maxbeats, maxspeed, average, maxpace, pace, totalascent, totaldescent = self._totals_from_activities(activity_list)
if timeinseconds:
self.dayview.set_sensitive(1)
else:
self.dayview.set_sensitive(0)
self.day_distance.set_text("%0.2f" %distance)
hour,min,sec = second2time(timeinseconds)
self.day_hour.set_text("%d" %hour)
self.day_minute.set_text("%02d" %min)
self.day_second.set_text("%02d" %sec)
if tbeats:
self.day_beats.set_text("%0.0f" %tbeats)
else:
self.day_beats.set_text("")
self.day_maxbeats.set_text("%0.0f" %maxbeats)
if average:
self.day_average.set_text("%0.2f" %average)
else:
self.day_average.set_text("")
self.day_maxspeed.set_text("%0.2f" %maxspeed)
self.day_pace.set_text("%s" %pace)
self.day_maxpace.set_text("%s" %maxpace)
self.day_ascdesc.set_text("%d/%d" %(int(totalascent),int(totaldescent)))
self.day_calories.set_text("%0.0f" %calories)
self.day_topic.set_text(str(date))
logging.debug("<<")
def actualize_daygraph(self,record_list):
logging.debug(">>")
if len(record_list)>0:
self.day_vbox.set_sensitive(1)
else:
self.day_vbox.set_sensitive(0)
self.drawareaday.drawgraph(record_list)
logging.debug("<<")
def actualize_map(self,activity, full_screen=False):
logging.debug(">>")
if self.mapviewer and self.mapviewer_fs:
#Check which type of map viewer to use
if self.radiobuttonOSM.get_active():
#Use OSM to draw map
logging.debug("Using OSM to draw map....")
from pytrainer.extensions.osm import Osm
htmlfile = Osm(data_path=self.data_path, waypoint=self.pytrainer_main.waypoint, pytrainer_main=self.parent).drawMap(activity, self.comboMapLineType.get_active())
elif self.radiobuttonGMap.get_active():
#Use Google to draw map
logging.debug("Using Google to draw map")
from pytrainer.extensions.googlemaps import Googlemaps
htmlfile = Googlemaps(data_path=self.data_path, waypoint=self.pytrainer_main.waypoint, pytrainer_main=self.parent).drawMap(activity, self.comboMapLineType.get_active())
else:
#Unknown map type...
logging.error("Unknown map viewer requested")
htmlfile = self.mapviewer.createErrorHtml()
logging.debug("Displaying htmlfile: %s" % htmlfile)
if full_screen:
logging.debug("Displaying in full screen mode")
self.mapviewer_fs.display_map(htmlfile=htmlfile)
else:
logging.debug("Displaying in embedded mode")
self.mapviewer.display_map(htmlfile=htmlfile)
logging.debug("<<")
def actualize_weekview(self, date_range):
logging.debug(">>")
self.week_date.set_text("%s - %s (%d)" % (date_range.start_date.strftime("%a %d %b"), date_range.end_date.strftime("%a %d %b"), int(date_range.end_date.strftime("%V"))) )
if self.activeSport:
sport = self._sport_service.get_sport_by_name(self.activeSport)
else:
sport = None
activity_list = self.pytrainer_main.activitypool.get_activities_period(date_range, sport=sport)
tbeats, distance, calories, timeinseconds, beats, maxbeats, maxspeed, average, maxpace, pace, totalascent, totaldescent = self._totals_from_activities(activity_list)
if timeinseconds:
self.weekview.set_sensitive(1)
else:
self.weekview.set_sensitive(0)
self.w_distance_unit.set_text(self.uc.unit_distance)
self.w_speed_unit.set_text(self.uc.unit_speed)
self.w_maxspeed_unit.set_text(self.uc.unit_speed)
self.w_pace_unit.set_text(self.uc.unit_pace)
self.w_maxpace_unit.set_text(self.uc.unit_pace)
self.weeka_distance.set_text("%0.2f" %distance)
hour,min,sec = second2time(timeinseconds)
self.weeka_hour.set_text("%d" %hour)
self.weeka_minute.set_text("%02d" %min)
self.weeka_second.set_text("%02d" %sec)
self.weeka_maxbeats.set_text("%0.0f" %(maxbeats))
self.weeka_beats.set_text("%0.0f" %(tbeats))
self.weeka_average.set_text("%0.2f" %average)
self.weeka_maxspeed.set_text("%0.2f" %maxspeed)
self.weeka_pace.set_text(pace)
self.weeka_maxpace.set_text(maxpace)
self.weeka_ascdesc.set_text("%d/%d" %(int(totalascent),int(totaldescent)))
self.weeka_calories.set_text("%0.0f" %calories)
self.weekview.set_sensitive(1)
self.drawareaweek.drawgraph(activity_list, date_range.start_date)
logging.debug("<<")
def actualize_monthview(self, date_range, nameMonth, daysInMonth):
logging.debug(">>")
self.month_date.set_text(nameMonth)
if self.activeSport:
sport = self._sport_service.get_sport_by_name(self.activeSport)
else:
sport = None
activity_list = self.pytrainer_main.activitypool.get_activities_period(date_range, sport=sport)
tbeats, distance, calories, timeinseconds, beats, maxbeats, maxspeed, average, maxpace, pace, totalascent, totaldescent = self._totals_from_activities(activity_list)
if timeinseconds:
self.monthview.set_sensitive(1)
else:
self.monthview.set_sensitive(0)
self.m_distance_unit.set_text(self.uc.unit_distance)
self.m_speed_unit.set_text(self.uc.unit_speed)
self.m_maxspeed_unit.set_text(self.uc.unit_speed)
self.m_pace_unit.set_text(self.uc.unit_pace)
self.m_maxpace_unit.set_text(self.uc.unit_pace)
self.montha_distance.set_text("%0.2f" %distance)
hour,min,sec = second2time(timeinseconds)
self.montha_hour.set_text("%d" %hour)
self.montha_minute.set_text("%02d" %min)
self.montha_second.set_text("%02d" %sec)
self.montha_maxbeats.set_text("%0.0f" %(maxbeats))
self.montha_beats.set_text("%0.0f" %(tbeats))
self.montha_average.set_text("%0.2f" %average)
self.montha_maxspeed.set_text("%0.2f" %maxspeed)
self.montha_pace.set_text(pace)
self.montha_maxpace.set_text(maxpace)
self.montha_ascdesc.set_text("%d/%d" %(int(totalascent),int(totaldescent)))
self.montha_calories.set_text("%0.0f" %calories)
self.drawareamonth.drawgraph(activity_list, daysInMonth)
logging.debug("<<")
def actualize_yearview(self, date_range, year):
logging.debug(">>")
self.year_date.set_text("%d" %int(year))
if self.activeSport:
sport = self._sport_service.get_sport_by_name(self.activeSport)
else:
sport = None
activity_list = self.pytrainer_main.activitypool.get_activities_period(date_range, sport=sport)
tbeats, distance, calories, timeinseconds, beats, maxbeats, maxspeed, average, maxpace, pace, totalascent, totaldescent = self._totals_from_activities(activity_list)
if timeinseconds:
self.yearview.set_sensitive(1)
else:
self.yearview.set_sensitive(0)
self.drawareayear.drawgraph([])
self.y_distance_unit.set_text(self.uc.unit_distance)
self.y_speed_unit.set_text(self.uc.unit_speed)
self.y_maxspeed_unit.set_text(self.uc.unit_speed)
self.y_pace_unit.set_text(self.uc.unit_pace)
self.y_maxpace_unit.set_text(self.uc.unit_pace)
self.yeara_distance.set_text("%0.2f" %distance)
hour,min,sec = second2time(timeinseconds)
self.yeara_hour.set_text("%d" %hour)
self.yeara_minute.set_text("%02d" %min)
self.yeara_second.set_text("%02d" %sec)
self.yeara_beats.set_text("%0.0f" %tbeats)
self.yeara_maxbeats.set_text("%0.0f" %(maxbeats))
self.yeara_average.set_text("%0.2f" %average)
self.yeara_maxspeed.set_text("%0.2f" %maxspeed)
self.yeara_pace.set_text(pace)
self.yeara_maxpace.set_text(maxpace)
self.yeara_ascdesc.set_text("%d/%d " %(totalascent,totaldescent))
self.yeara_calories.set_text("%0.0f" %calories)
self.drawareayear.drawgraph(activity_list)
logging.debug("<<")
def actualize_athleteview(self, athlete):
logging.debug(">>")
self.labelName.set_text(athlete.name)
self.labelDOB.set_text(athlete.age)
self.labelHeight.set_text(athlete.height+" cm")
#Create history treeview
history_store = Gtk.ListStore(
GObject.TYPE_INT, #id
GObject.TYPE_STRING, #date
GObject.TYPE_FLOAT, #weight
GObject.TYPE_FLOAT, #body fat %
GObject.TYPE_INT, #resting HR
GObject.TYPE_INT #max HR
)
for data in athlete.data:
iter = history_store.append()
history_store.set (
iter,
0, data['id_athletestat'],
1, str(data['date']),
2, data['weight'],
3, data['bodyfat'],
4, data['restinghr'],
5, data['maxhr'],
)
self.athleteTreeView.set_model(history_store)
self.grapher.drawAthleteGraph(athlete=athlete, box=self.boxAthleteGraph)
logging.debug("<<")
def actualize_statsview(self, stats, record_list):
logging.debug(">>")
self.labelTotalDistance.set_text(str(round(stats.data['total_distance'])) + " km")
self.labelTotalDuration.set_text(str(round(stats.data['total_duration'] / 3600)) + " hours")
# skip date format if no stats are saved yet
try:
self.labelStartDate.set_text(stats.data['start_date'].strftime('%Y-%m-%d'))
self.labelEndDate.set_text(stats.data['end_date'].strftime('%Y-%m-%d'))
except KeyError:
pass
data = self.parent.stats.data
store = Gtk.ListStore(
GObject.TYPE_INT,
GObject.TYPE_STRING,
GObject.TYPE_INT,
GObject.TYPE_INT,
GObject.TYPE_FLOAT,
GObject.TYPE_FLOAT,
GObject.TYPE_FLOAT,
GObject.TYPE_INT,
GObject.TYPE_INT,
GObject.TYPE_INT,
GObject.TYPE_FLOAT
)
for s in data['sports'].values():
iter = store.append()
c = 0
store.set (iter, c, c)
c += 1
store.set (iter, c, s['name'])
c += 1
store.set (iter, c, s['count'])
for f in data['fields'][3:]:
c += 1
store.set (iter, c, s['total_'+f])
c += 1
if s['total_duration']!=0: # Avoid division by zero if 0 length sport activity exists in DB
store.set (iter, c, s['total_distance'] / s['total_duration'] * 3600.)
for f in data['fields']:
c += 1
store.set (iter, c, s[f])
self.statsTreeView.set_model(store)
self.statsTreeView.set_rules_hint(True)
store.set_sort_column_id(3, Gtk.SortType.DESCENDING)
self.drawareatotal.drawgraph(record_list)
logging.debug("<<")
def actualize_listview(self,record_list):
logging.debug(">>")
store = Gtk.ListStore(
GObject.TYPE_INT,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_FLOAT,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_INT,
GObject.TYPE_INT,
GObject.TYPE_FLOAT,
GObject.TYPE_INT,
object)
for i in record_list:
try:
hour,min,sec = second2time(i.duration)
except (ValueError, TypeError):
hour,min,sec = (0,0,0)
_time = "%2d:%02d:%02d" %(hour,min,sec)
try:
_id = i.id
except (ValueError, TypeError) as e:
logging.debug("Unable to determine id for record: %s. Exception: %s", i, e)
continue
_title = i.title
_date = str(i.date)
try:
_distance = float(i.distance)
except (ValueError, TypeError):
_distance = 0
_sport = i.sport.name
try:
_average = i.average
except (ValueError, TypeError):
_average = 0
try:
_calories = i.calories
except (ValueError, TypeError):
_calories = 0
try:
_beats = round(i.beats)
except (ValueError, TypeError) as e:
logging.debug("Unable to parse beats for record: %s. Exception: %s", i.id, e)
_beats = 0.0
try:
_maxbeats = round(i.maxbeats)
except (ValueError, TypeError) as e:
logging.debug("Unable to parse maxbeats for record: %s. Exception: %s", i.id, e)
_maxbeats = 0.0
iter = store.append()
store.set (
iter,
0, _id,
1, _title,
2, _date,
3, _distance,
4, _sport,
5, _time,
6, _beats,
7, _maxbeats,
8, _average,
9, _calories
)
#self.allRecordTreeView.set_headers_clickable(True)
self.allRecordTreeView.set_model(store)
self.allRecordTreeView.set_rules_hint(True)
logging.debug("<<")
def actualize_waypointview(self,record_list,default_waypoint,redrawmap = 1):
logging.debug(">>")
#redrawmap: indica si tenemos que refrescar tb el mapa. 1 si 0 no
#waypoint list tiene:
#id_waypoint,lat,lon,ele,comment,time,name,sym
#Laas columnas son:
#column_names=[_("id"),_("Waypoint")]
store = Gtk.ListStore(
GObject.TYPE_INT,
GObject.TYPE_STRING,
object)
iterOne = False
iterDefault = False
counter = 0
default_id = 0
for i in record_list:
iter = store.append()
if not iterOne:
iterOne = iter
if int(i[0])==default_waypoint:
iterDefault = iter
default_id = counter
store.set (
iter,
0, int(i[0]),
1, str(i[6])
)
counter+=1
self.waypointTreeView.set_model(store)
if iterDefault:
self.waypointTreeView.get_selection().select_iter(iterDefault)
elif iterOne:
self.waypointTreeView.get_selection().select_iter(iterOne)
if len(record_list) > 0:
self.waypoint_latitude.set_text(str(record_list[default_id][1]))
self.waypoint_longitude.set_text(str(record_list[default_id][2]))
self.waypoint_name.set_text(str(record_list[default_id][6]))
self.waypoint_description.set_text(str(record_list[default_id][4]))
self.set_waypoint_type(str(record_list[default_id][7]))
if redrawmap == 1 and self.waypointeditor:
self.waypointeditor.createHtml(default_waypoint)
self.waypointeditor.drawMap()
logging.debug("<<")
def set_waypoint_type(self, type):
x = 0
tree_model = self.waypoint_type.get_model()
if tree_model is not None:
#iter = tree_model.get_iter_root()
for item in tree_model:
#if isinstance(item, Gtk.TreeModelRow):
if item[0] == type:
self.waypoint_type.set_active(x)
return
x += 1
self.waypoint_type.insert_text(0, type)
self.waypoint_type.set_active(0)
return
def on_waypointTreeView_button_press(self, treeview, event):
x = int(event.x)
y = int(event.y)
time = event.time
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
if event.button == 1:
selected,iter = treeview.get_selection().get_selected()
id_waypoint=selected.get_value(iter,0)
self.parent.refreshWaypointView(id_waypoint)
return False
def on_listareasearch_clicked(self, widget):
lisOpt = {
_("Title"):"title",
_("Date"):"date",
_("Distance"):"distance",
_("Sport"):"sport",
_("Time"):"time",
_("Beats"):"beats",
_("Maxbeats"):"maxbeats",
_("Average"):"average",
_("Calories"):"calories"
}
self.listsearch.title = gtk_str(self.lsa_searchvalue.get_text())
self.listsearch.sport = self.lsa_sport.get_active()
self.listsearch.past = self.lsa_past.get_active()
self.listsearch.duration = self.lsa_duration.get_active()
self.listsearch.distance = self.lsa_distance.get_active()
self.parent.refreshListView(self.listsearch.condition)
def on_listareareset_clicked(self, widget):
self.listsearch.reset_lsa()
self.parent.refreshListView(self.listsearch.condition)
def create_menulist(self,columns):
for i, column_dict in enumerate(columns):
if 'visible' in column_dict and not column_dict['visible']:
pass
else:
item = Gtk.CheckMenuItem(column_dict['name'])
#self.lsa_searchoption.append_text(name)
item.connect("button_press_event", self.on_menulistview_activate, i)
self.menulistviewOptions.append(item)
self.menulistviewOptions.show_all()
def on_menulistview_activate(self,widget,widget2,widget_position):
listMenus = {
0:"title",
1:"date",
2:"distance",
3:"sport",
4:"time",
5:"beats",
6:"maxbeats",
7:"average",
8:"calories" }
items = self.menulistviewOptions.get_children()
if items[widget_position-1].get_active():
newValue = "False"
else:
newValue = "True"
menufile = XMLParser(self.fileconf)
menufile.setValue("listviewmenu",listMenus[widget_position-1],newValue)
self.showAllRecordTreeViewColumns()
def showAllRecordTreeViewColumns(self):
menufile = XMLParser(self.fileconf)
listMenus = {
"id_record":0,
"title":1,
"date":2,
"distance":3,
"sport":4,
"time":5,
"beats":6,
"maxbeats":7,
"average":8,
"calories":9 }
columns = self.allRecordTreeView.get_columns()
menuItems = self.menulistviewOptions.get_children()
for column in listMenus:
visible = menufile.getValue("listviewmenu",column)
if visible == "True":
visible = True
else:
visible = False
numcolumn = listMenus[column]
#show the selected columns
columns[numcolumn].set_visible(visible)
#select the choice in the menu
if numcolumn != 0 and self.menublocking != 1:
menuItems[numcolumn-1].set_active(visible)
self.menublocking = 1
def zoom_graph(self, y1limits=None, y1color=None, y1_linewidth=1):
logging.debug(">>")
logging.debug("Reseting graph Y axis with ylimits: %s", y1limits)
self.drawarearecord.drawgraph(self.record_list,self.laps, y1limits=y1limits, y1color=y1color, y1_linewidth=y1_linewidth)
logging.debug("<<")
def update_athlete_item(self, idx, date, weight, bf, restingHR, maxHR):
logging.debug(">>")
#Prepare vars
idx = str(idx)
date = str(date)
weight = str(weight)
bf = str(bf)
restingHR = str(restingHR)
maxHR = str(maxHR)
#Set vars
self.labelAthleteIdx.set_text(idx)
self.entryAthleteDate.set_text(date)
self.entryAthleteWeight.set_text(weight)
self.entryAthleteBF.set_text(bf)
self.entryAthleteRestingHR.set_text(restingHR)
self.entryAthleteMaxHR.set_text(maxHR)
logging.debug("<<")
######################
## Lista de eventos ##
######################
def on_xaxischange(self, widget, data=None, activity=None):
'''Handler for record graph axis selection changes'''
if widget.get_active():
activity.x_axis = data
self.actualize_recordgraph(activity)
def on_xlapschange(self, widget, activity=None):
if widget.get_active():
activity.show_laps = True
else:
activity.show_laps = False
self.actualize_recordgraph(activity)
def on_gridchange(self, widget, axis=None, activity=None):
'''Handler for record graph grid selection changes'''
if axis == 'y1':
activity.y1_grid = not activity.y1_grid
elif axis == 'y2':
activity.y2_grid = not activity.y2_grid
elif axis == 'x':
activity.x_grid = not activity.x_grid
self.actualize_recordgraph(activity)
def on_y1colorchange(self, widget, box, graphdata, activity):
'''Hander for changes to y1 color selection'''
logging.debug("Setting %s to color %s", graphdata, widget.get_color())
if activity.x_axis == "distance":
activity.distance_data[graphdata].set_color(str(widget.get_color()))
elif activity.x_axis == "time":
activity.time_data[graphdata].set_color(str(widget.get_color()))
#Replot the activity
self.actualize_recordgraph(activity)
def on_y2colorchange(self, widget, box, graphdata, activity):
'''Hander for changes to y2 color selection'''
logging.debug("Setting %s to color %s", graphdata, widget.get_color())
if activity.x_axis == "distance":
activity.distance_data[graphdata].set_color(None, str(widget.get_color()))
elif activity.x_axis == "time":
activity.time_data[graphdata].set_color(None, str(widget.get_color()))
#Replot the activity
self.actualize_recordgraph(activity)
def on_y1change(self, widget, box, graphdata, activity):
'''Hander for changes to y1 selection'''
logging.debug("Y1 selection toggled: %s", graphdata)
#Loop through all options at set data correctly
for child in box.get_children():
if activity.x_axis == "distance":
for item in activity.distance_data:
if activity.distance_data[item].title == child.get_label():
logging.debug("Setting %s to %s", item, child.get_active())
activity.distance_data[item].show_on_y1 = child.get_active()
elif activity.x_axis == "time":
for item in activity.time_data:
if activity.time_data[item].title == child.get_label():
logging.debug("Setting %s to %s", item, child.get_active())
activity.time_data[item].show_on_y1 = child.get_active()
#Replot the activity
self.actualize_recordgraph(activity)
def on_y2change(self, widget, box, graphdata, activity):
'''Hander for changes to y2 selection'''
logging.debug("Y2 selection toggled: %s", graphdata)
#Loop through all options at set data correctly
for child in box.get_children():
if activity.x_axis == "distance":
for item in activity.distance_data:
if activity.distance_data[item].title == child.get_label():
logging.debug("Setting %s to %s", item, child.get_active())
activity.distance_data[item].show_on_y2 = child.get_active()
elif activity.x_axis == "time":
for item in activity.time_data:
if activity.time_data[item].title == child.get_label():
logging.debug("Setting %s to %s", item, child.get_active())
activity.time_data[item].show_on_y2 = child.get_active()
#Replot the activity
self.actualize_recordgraph(activity)
def on_setlimits(self, widget, activity, reset, data):
'''Handler for setting graph limits buttons'''
if data is None:
logging.debug("Resetting graph limits...")
activity.x_limits_u = (None, None)
activity.y1_limits_u = (None, None)
activity.y2_limits_u = (None, None)
#Replot the activity
self.actualize_recordgraph(activity)
else:
#Setting to limits in boxes
logging.debug("Setting graph limits...")
#Determine contents of boxes...
xmin = self._float_or(gtk_str(data['xminlabel'].get_text()), activity.x_limits[0])
xmax = self._float_or(gtk_str(data['xmaxlabel'].get_text()), activity.x_limits[1])
y1min = self._float_or(gtk_str(data['y1minlabel'].get_text()), activity.y1_limits[0])
y1max = self._float_or(gtk_str(data['y1maxlabel'].get_text()), activity.y1_limits[1])
y2min = self._float_or(gtk_str(data['y2minlabel'].get_text()), activity.y2_limits[0])
y2max = self._float_or(gtk_str(data['y2maxlabel'].get_text()), activity.y2_limits[1])
logging.debug("Setting graph limits x: (%s,%s), y1: (%s,%s), y2: (%s,%s)",
xmin, xmax, y1min, y1max, y2min, y2max)
activity.x_limits_u = (xmin, xmax)
activity.y1_limits_u = (y1min, y1max)
activity.y2_limits_u = (y2min, y2max)
#Replot the activity
self.actualize_recordgraph(activity)
def on_window1_configure_event(self, widget, event):
#print widget #window widget
#print event # resize event
self.size = self.window1.get_size()
def on_buttonShowOptions_clicked(self, widget):
position_set = self.hpaned1.get_property('position-set')
if position_set:
#Currently not showing options - show them
self.hpaned1.set_property('position-set', False)
self.buttonShowOptions.set_tooltip_text(_('Hide graph display options') )
else:
#Hide options
self.hpaned1.set_position(0)
self.buttonShowOptions.set_tooltip_text(_('Show graph display options') )
logging.debug('Position set: %s', self.hpaned1.get_property('position-set'))
def on_buttonGraphHideOptions_clicked(self, widget):
logging.debug('on_buttonGraphHideOptions_clicked')
self.buttonGraphHideOptions.hide()
self.scrolledwindowGraphOptions.hide()
#for child in self.graph_data_hbox.get_children():
# if isinstance(child, Gtk.Frame):
# child.hide()
self.buttonGraphShowOptions.show()
def on_buttonGraphShowOptions_clicked(self, widget):
logging.debug('on_buttonGraphShowOptions_clicked')
self.buttonGraphShowOptions.hide()
#for child in self.graph_data_hbox.get_children():
# if isinstance(child, Gtk.Frame):
# child.show()
self.scrolledwindowGraphOptions.show()
self.buttonGraphHideOptions.show()
def on_buttonRedrawMap_clicked(self, widget):
logging.debug('on_buttonRedrawMap_clicked')
self.parent.refreshMapView()
def on_radiobuttonMap_toggled(self, widget):
#Ignore the deselected toggle event
if widget.get_active() == False:
return
logging.debug('on_radiobuttonMap_toggled %s activated', widget.get_name())
self.parent.refreshMapView()
def on_comboMapLineType_changed(self, widget):
logging.debug('on_comboMapLineType_changed %s = %s', widget.get_name(), widget.get_active())
self.parent.refreshMapView()
def on_hpaned1_move_handle(self, widget):
logging.debug("Handler %s", widget)
def on_spinbuttonY1_value_changed(self, widget):
y1min = self.spinbuttonY1Min.get_value()
y1max = self.spinbuttonY1Max.get_value()
#Check to see if the min and max have the same...
if y1min == y1max:
if widget.get_name() == "spinbuttonY1Min": #User was changing the min spinbutton, so move max up
y1max += 1
else: #Move min down
y1min -= 1
self.y1_limits=(y1min, y1max)
self.zoom_graph(y1limits=self.y1_limits, y1color=self.y1_color, y1_linewidth=self.y1_linewidth)
def on_buttonResetGraph_clicked(self, widget):
#self.zoom_graph()
#Reset stored values
self.y1_limits = None
self.y1_color = None
self.y1_linewidth = 1
self.zoom_graph()
def on_colorbuttonY1LineColor_color_set(self, widget):
y1color = widget.get_color()
cs = y1color.to_string()
self.y1_color = cs[0:3] + cs[5:7] + cs[9:11]
self.drawarearecord.drawgraph(self.record_list,self.laps, y1limits=self.y1_limits, y1color=self.y1_color, y1_linewidth=self.y1_linewidth)
def on_spinbuttonY1LineWeight_value_changed(self, widget):
self.y1_linewidth = self.spinbuttonY1LineWeight.get_value_as_int()
self.drawarearecord.drawgraph(self.record_list,self.laps, y1limits=self.y1_limits, y1color=self.y1_color, y1_linewidth=self.y1_linewidth)
def on_edit_clicked(self,widget):
selected,iter = self.recordTreeView.get_selection().get_selected()
id_record = selected.get_value(iter,0)
self.parent.editRecord(id_record, self.selected_view)
def on_remove_clicked(self,widget):
selected,iter = self.recordTreeView.get_selection().get_selected()
id_record = selected.get_value(iter,0)
self.parent.removeRecord(id_record)
def on_export_csv_activate(self,widget):
self.parent.exportCsv()
def on_newrecord_clicked(self,widget):
if self.selected_view == 'athlete':
#print 'New athlete'
self.on_athleteTreeView_edit( None, None)
else:
self.parent.newRecord(view=self.selected_view)
def on_edituser_activate(self,widget):
self.parent.editProfile()
def on_calendar_doubleclick(self,widget):
self.parent.newRecord()
def on_sportlist_changed(self,widget):
logging.debug("--")
if gtk_str(self.sportlist.get_active_text()) != self.activeSport:
self.activeSport = gtk_str(self.sportlist.get_active_text())
self.parent.refreshListRecords()
self.parent.refreshGraphView(self.selected_view)
else:
logging.debug("on_sportlist_changed called with no change")
def on_page_change(self,widget,gpointer,page):
logging.debug("--")
if page == 0:
self.selected_view="record"
elif page == 1:
self.selected_view="day"
elif page == 2:
self.selected_view="week"
elif page == 3:
self.selected_view="month"
elif page == 4:
self.selected_view="year"
elif page == 5:
self.selected_view="athlete"
elif page == 6:
self.selected_view="stats"
else:
self.selected_view="record"
self.parent.refreshGraphView(self.selected_view)
def on_recordpage_change(self,widget,gpointer,page):
if page == 0:
selected_view="info"
elif page == 1:
selected_view="graphs"
elif page == 2:
selected_view="map"
elif page == 3:
selected_view="heartrate"
elif page == 4:
selected_view="analytics"
self.parent.refreshRecordGraphView(selected_view)
def on_showmap_clicked(self,widget):
self.infoarea.hide()
self.maparea.show()
self.parent.refreshMapView(full_screen=True)
def on_hidemap_clicked(self,widget):
self.maparea.hide()
self.infoarea.show()
def on_btnShowLaps_toggled(self,widget):
logging.debug("--")
self.parent.refreshGraphView(self.selected_view)
def on_day_combovalue_changed(self,widget):
logging.debug("--")
self.parent.refreshGraphView(self.selected_view)
def on_week_combovalue_changed(self,widget):
logging.debug("--")
self.parent.refreshGraphView(self.selected_view)
def on_month_combovalue_changed(self,widget):
logging.debug("--")
self.parent.refreshGraphView(self.selected_view)
def on_year_combovalue_changed(self,widget):
logging.debug("--")
self.parent.refreshGraphView(self.selected_view)
def on_total_combovalue_changed(self,widget):
logging.debug("--")
self.parent.refreshGraphView(self.selected_view)
def on_calendar_selected(self, widget):
logging.debug(">>")
logging.debug("Block (%s) | Selected view: %s", self.block, self.selected_view)
if self.block:
self.block = False
else:
if self.selected_view == "record":
self.recordview.set_current_page(0)
self.parent.refreshRecordGraphView("info")
self.parent.refreshListRecords()
self.parent.refreshGraphView(self.selected_view)
logging.debug("<<")
def on_calendar_changemonth(self,widget):
logging.debug("--")
self.block = True
self.notebook.set_current_page(3)
self.selected_view="month"
self.parent.refreshListRecords()
self.parent.refreshGraphView(self.selected_view)
def on_calendar_next_year(self,widget):
logging.debug("--")
self.block = True
self.notebook.set_current_page(4)
self.selected_view="year"
self.parent.refreshListRecords()
self.parent.refreshGraphView(self.selected_view)
def on_classicview_activate(self,widget):
self.waypointarea.hide()
self.listarea.hide()
#self.athletearea.hide()
self.selected_view = "record"
self.classicarea.show()
def on_listview_activate(self,widget):
self.waypointarea.hide()
self.classicarea.hide()
#self.athletearea.hide()
self.selected_view = "listview"
#self.parent.refreshListView()
self.parent.refreshListView(self.listsearch.condition)
self.listarea.show()
def on_athleteview_activate(self,widget=None):
#self.waypointarea.hide()
#self.classicarea.hide()
#self.listarea.hide()
self.parent.refreshAthleteView()
#self.athletearea.show()
def on_statsview_activate(self,widget=None):
self.parent.refreshStatsView()
def on_waypointsview_activate(self,widget):
self.listarea.hide()
self.classicarea.hide()
#self.athletearea.hide()
self.parent.refreshWaypointView()
self.waypointarea.show()
def on_menu_importdata_activate(self,widget):
self.parent.importData()
def on_extensions_activate(self,widget):
self.parent.editExtensions()
def on_gpsplugins_activate(self,widget):
self.parent.editGpsPlugins()
#hasta aqui revisado
def on_recordTreeView_button_press_event(self, treeview, event):
''' Handler for clicks on recordTreeview list (all records for a day)
event.button = mouse button pressed (i.e. 1 = left, 3 = right)
'''
logging.debug(">>")
x = int(event.x)
y = int(event.y)
time = event.time
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
if event.button == 3:
selected,iter = treeview.get_selection().get_selected()
#Por si hay un registro (malo) sin fecha, pa poder borrarlo
try:
date = self.parent.date.getDate()
except:
date = None
self.popup.show(selected.get_value(iter,0), event.button, time, str(date))
elif event.button == 1:
self.notebook.set_current_page(0)
self.parent.refreshGraphView("record")
logging.debug("<<")
return False
def on_allRecordTreeView_button_press(self, treeview, event):
''' Handler for clicks on listview list
event.button = mouse button pressed (i.e. 1 = left, 3 = right)
'''
logging.debug(">>")
x = int(event.x)
y = int(event.y)
time = event.time
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
if event.button == 3:
selected,iter = treeview.get_selection().get_selected()
#Por si hay un registro (malo) sin fecha, pa poder borrarlo
try:
date = self.parent.date.getDate()
except:
pass
self.popup.show(selected.get_value(iter,0), event.button, time, selected.get_value(iter,2))
elif event.button == 1:
self.notebook.set_current_page(0)
self.parent.refreshGraphView("record")
logging.debug("<<")
return False
def actualize_recordTreeView(self, date):
logging.debug(">>")
iterOne = False
store = Gtk.TreeStore(
GObject.TYPE_INT, #record_id
GObject.TYPE_STRING, #Time
GObject.TYPE_STRING, #Sport
GObject.TYPE_STRING, #Distance
object)
if self.activeSport:
sport = self._sport_service.get_sport_by_name(self.activeSport)
else:
sport = None
for activity in self.pytrainer_main.activitypool.get_activities_for_day(date, sport=sport):
iter = store.append(None)
if not iterOne:
iterOne = iter
localTime = activity.date_time.strftime("%H:%M")
dist = self.uc.distance(activity.distance)
distance = "%0.2f" % (float(dist) )
store.set (
iter,
0, activity.id,
1, localTime,
2, activity.sport.name,
3, str(distance) #Needs to be US pref aware....
)
for lap in activity.Laps:
lapNumber = "%s %02d" % (_("lap"), lap.lap_number + 1)
dist = self.uc.distance(lap.distance)
distance = "%0.2f" % (float(dist) / 1000.0)
timeHours = int(lap.duration / 3600)
timeMin = int((lap.duration / 3600.0 - timeHours) * 60)
timeSec = lap.duration - (timeHours * 3600) - (timeMin * 60)
if timeHours > 0:
duration = "%d%s%02d%s%02d%s" % (timeHours, _("h"), timeMin, _("m"), timeSec, _("s"))
else:
duration = "%2d%s%02d%s" % (timeMin, _("m"), timeSec, _("s"))
child_iter = store.append(iter)
store.set (
child_iter,
0, activity.id,
1, lapNumber,
2, duration,
3, distance
)
store.set_sort_column_id(1, Gtk.SortType.ASCENDING)
self.recordTreeView.set_model(store)
if iterOne:
self.recordTreeView.get_selection().select_iter(iterOne)
logging.debug("<<")
def parseFloat(self,string):
try:
return float(string)
except:
return float(0)
def actualize_calendar(self,record_list):
logging.debug(">>")
self.calendar.clear_marks()
#Mark each day that has activity
for i in record_list:
self.calendar.mark_day(i)
#Turn on displaying of week numbers
display_options = self.calendar.get_display_options()
self.calendar.set_display_options(display_options|Gtk.CalendarDisplayOptions.SHOW_WEEK_NUMBERS)
logging.debug("<<")
def on_about_activate(self,widget):
if self.aboutwindow is None:
self.aboutwindow = About(self.data_path, self.version)
self.aboutwindow.run()
else:
self.aboutwindow.present()
def getSportSelected(self):
sport = self.sportlist.get_active()
if (sport > 0):
return gtk_str(self.sportlist.get_active_text())
else:
return None
def quit(self, *args):
window_size = "%d, %d" % self.size
self.pytrainer_main.profile.setValue("pytraining","window_size", window_size)
self.parent.quit()
#sys.exit("Exit!")
#self.parent.webservice.stop()
#self.gtk_main_quit()
def on_yearview_clicked(self,widget):
self.notebook.set_current_page(2)
self.selected_view="year"
self.actualize_yearview()
def on_recordTree_clicked(self,widget,num,num2):
selected,iter = widget.get_selection().get_selected()
self.parent.editRecord(selected.get_value(iter,0), self.selected_view)
### athleteview events ###
def on_athleteTreeView_button_press_event(self, treeview, event):
x = int(event.x)
y = int(event.y)
time = event.time
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
selected,iter = treeview.get_selection().get_selected()
if event.button == 3:
#Right mouse button...
idx = selected.get_value(iter,0)
date = selected.get_value(iter,1)
weight = selected.get_value(iter,2)
bf = selected.get_value(iter,3)
restingHR = selected.get_value(iter,4)
maxHR = selected.get_value(iter,5)
#print "show popup etc (clicked on idx %s, date %s)" % (idx, date)
#Show popup menu...
popup = Gtk.Menu()
#Edit Entry Item
menuitem = Gtk.MenuItem(label=_("Edit Entry"))
menuitem.connect("activate", self.on_athleteTreeView_edit, {'id':idx, 'date':date, 'weight':weight, 'bf':bf, 'restingHR':restingHR, 'maxHR':maxHR})
popup.attach(menuitem, 0, 1, 0, 1)
#New Entry Item
menuitem = Gtk.MenuItem(label=_("New Entry"))
menuitem.connect("activate", self.on_athleteTreeView_edit, None)
popup.attach(menuitem, 0, 1, 1, 2)
#Separator
menuitem = Gtk.SeparatorMenuItem()
popup.attach(menuitem, 0, 1, 2, 3)
#Delete Entry Item
menuitem = Gtk.MenuItem(label=_("Delete Entry"))
menuitem.connect("activate", self.on_athleteTreeView_delete, idx)
popup.attach(menuitem, 0, 1, 3, 4)
popup.show_all()
popup.popup_at_pointer(None)
else:
#Left mouse - so display this row
pass
'''
idx = selected.get_value(iter,0)
date = selected.get_value(iter,1)
weight = selected.get_value(iter,2)
bf = selected.get_value(iter,3)
restingHR = selected.get_value(iter,4)
maxHR = selected.get_value(iter,5)
self.update_athlete_item(idx, date, weight, bf, restingHR, maxHR)'''
def on_athleteTreeView_edit(self, widget, data):
logging.debug('>>')
if data is None:
#New entry...
logging.debug('New athlete entry')
title = _('Create Athlete Entry')
data = {'id': None, 'date': Date().getDate().strftime("%Y-%m-%d"),
'weight': None, 'bf': None, 'restingHR': None, 'maxHR': None}
else:
logging.debug('Edit existing athlete entry: %s', data)
title = _('Edit Athlete Entry')
dialog = Gtk.Dialog(title=title, parent=self.pytrainer_main.windowmain.window1, flags= Gtk.DialogFlags.DESTROY_WITH_PARENT,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT,
Gtk.STOCK_SAVE, Gtk.ResponseType.ACCEPT))
dialog.set_modal(False)
#Get Content area of dialog
vbox = dialog.get_content_area()
#Build data display
table = Gtk.Table(1,2)
self.entryList = []
#Add date
label = Gtk.Label(label=_("<b>Date</b>"))
label.set_use_markup(True)
entry = Gtk.Entry()
entry.set_text(data['date'])
self.entryList.append(entry)
#Date calander widget
cal = Gtk.Image()
cal.set_from_stock(Gtk.STOCK_INDEX, Gtk.IconSize.BUTTON)
calbut = Gtk.Button()
calbut.add(cal)
calbut.connect("clicked", self.on_athletecalendar_clicked)
table.attach(label,0,1,0,1)
table.attach(entry,1,2,0,1)
#table.attach(calbut,2,3,0,1) #TODO
#Add weight
label = Gtk.Label(label=_("<b>Weight</b>"))
label.set_use_markup(True)
entry = Gtk.Entry()
if data['weight']:
entry.set_text(str(round(data['weight'], 2)))
self.entryList.append(entry)
table.attach(label,0,1,1,2)
table.attach(entry,1,2,1,2)
#Add Body fat
label = Gtk.Label(label=_("<b>Body Fat</b>"))
label.set_use_markup(True)
entry = Gtk.Entry()
if data['bf']:
entry.set_text(str(round(data['bf'], 2)))
self.entryList.append(entry)
table.attach(label,0,1,2,3)
table.attach(entry,1,2,2,3)
#Add Resting HR
label = Gtk.Label(label=_("<b>Resting Heart Rate</b>"))
label.set_use_markup(True)
entry = Gtk.Entry()
if data['restingHR']:
entry.set_text(str(data['restingHR']))
self.entryList.append(entry)
table.attach(label,0,1,3,4)
table.attach(entry,1,2,3,4)
#Add Max HR
label = Gtk.Label(label=_("<b>Max Heart Rate</b>"))
label.set_use_markup(True)
entry = Gtk.Entry()
if data['maxHR']:
entry.set_text(str(data['maxHR']))
self.entryList.append(entry)
table.attach(label,0,1,4,5)
table.attach(entry,1,2,4,5)
vbox.add(table)
vbox.show_all()
response = dialog.run()
#dialog.destroy()
if response == Gtk.ResponseType.ACCEPT:
#print "on_athleteTreeView_edit save called", data
data['date'] = gtk_str(self.entryList[0].get_text())
data['weight'] = gtk_str(self.entryList[1].get_text())
data['bf'] = gtk_str(self.entryList[2].get_text())
data['restingHR'] = gtk_str(self.entryList[3].get_text())
data['maxHR'] = gtk_str(self.entryList[4].get_text())
self.on_athleteSave(data)
logging.debug('Athlete data saved: %s', data)
dialog.destroy()
logging.debug('<<')
def on_athleteTreeView_delete(self, widget, data):
'''User has opted to delete entry'''
logging.debug(">>")
msg = _("Delete this database entry?")
md = Gtk.MessageDialog(self.pytrainer_main.windowmain.window1, Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.QUESTION, Gtk.ButtonsType.OK_CANCEL, msg)
md.set_title(_("Are you sure?"))
response = md.run()
md.destroy()
if response == Gtk.ResponseType.OK:
logging.debug("User confirmed deletion of athlete entry with id: %s", data)
self.pytrainer_main.athlete.delete_record(data)
self.parent.refreshAthleteView()
else:
logging.debug("User canceled athlete record deletion for id %s", data)
logging.debug("<<")
def on_athleteSave(self, data):
#Get data in fields
id_athletestat = data['id']
date = data['date']
#Check if valid date supplied
try:
_date = dateutil.parser.parse(date).date()
except (ValueError) as e:
logging.error("Invalid date %s", e)
return
weight = data['weight'] or None
bodyfat = data['bf'] or None
restinghr = data['restingHR'] or None
maxhr = data['maxHR'] or None
#TODO - are any other fields required?
#Check if an entry has been edited or is a new one
if id_athletestat is None or id_athletestat == "":
#New entry
logging.debug('Creating new entry with values: date %s, weight %s, bodyfat %s, restinghr %s, maxhr %s', date, weight, bodyfat, restinghr, maxhr)
self.parent.athlete.insert_athlete_stats(date, weight, bodyfat, restinghr, maxhr)
else:
#Edited existing entry
logging.debug('Updating id_athletestat:%s with values: date %s, weight %s, bodyfat %s, restinghr %s, maxhr %s', id_athletestat, date, weight, bodyfat, restinghr, maxhr)
self.parent.athlete.update_athlete_stats(id_athletestat, date, weight, bodyfat, restinghr, maxhr)
self.parent.refreshAthleteView()
def on_athletecalendar_clicked(self,widget):
logging.debug(">>")
calendardialog = WindowCalendar(self.data_path,self)
calendardialog.run()
logging.debug("<<")
def setDate(self,date):
logging.debug(date)
#self.entryAthleteDate.set_text(date)
######## waypoints events ##########
def on_savewaypoint_clicked(self,widget):
selected,iter = self.waypointTreeView.get_selection().get_selected()
id_waypoint = selected.get_value(iter,0)
lat = gtk_str(self.waypoint_latitude.get_text())
lon = gtk_str(self.waypoint_longitude.get_text())
name = gtk_str(self.waypoint_name.get_text())
desc = gtk_str(self.waypoint_description.get_text())
sym = gtk_str(self.waypoint_type.get_active_text())
self.parent.updateWaypoint(id_waypoint,lat,lon,name,desc,sym)
def on_removewaypoint_clicked(self,widget):
selected,iter = self.waypointTreeView.get_selection().get_selected()
id_waypoint = selected.get_value(iter,0)
self.parent.removeWaypoint(id_waypoint)
def on_hrpiebutton_clicked(self,widget):
self.heartrate_vbox2.show()
self.heartrate_vbox.hide()
self.heartrate_vbox3.hide()
def on_hrplotbutton_clicked(self,widget):
self.heartrate_vbox.show()
self.heartrate_vbox2.hide()
self.heartrate_vbox3.hide()
def on_hrhistobutton_clicked(self,widget):
self.heartrate_vbox3.show()
self.heartrate_vbox.hide()
self.heartrate_vbox2.hide()
def _totals_from_activities(self, activity_list):
tbeats = 0
distance = 0
calories = 0
timeinseconds = 0
beats = 0
maxbeats = 0
maxspeed = 0
average = 0
maxpace = "0:00"
pace = "0:00"
totalascent = 0
totaldescent = 0
for activity in activity_list:
distance += activity.distance
if activity.calories:
calories += activity.calories
timeinseconds += activity.duration
if activity.upositive:
totalascent += activity.upositive
if activity.unegative:
totaldescent += activity.unegative
if activity.beats:
tbeats += activity.beats*(activity.duration/60/60)
if activity.maxspeed and activity.maxspeed > maxspeed:
maxspeed = activity.maxspeed
if activity.maxbeats and activity.maxbeats > maxbeats:
maxbeats = activity.maxbeats
distance = self.uc.distance(distance)
maxspeed = self.uc.speed(maxspeed)
if tbeats > 0 and timeinseconds > 0:
tbeats = tbeats/(timeinseconds/60/60)
if distance > 0 and timeinseconds > 0:
average = distance/(float(timeinseconds)/60/60)
if maxspeed > 0:
maxpace = "%d:%02d" %((3600/maxspeed)/60,(3600/maxspeed)%60)
if average > 0:
pace = "%d:%02d" %((3600/average)/60,(3600/average)%60)
return tbeats, distance, calories, timeinseconds, beats, maxbeats, maxspeed, average, maxpace, pace, totalascent, totaldescent
| gpl-2.0 |
pankajk/MasterThesis | Code/Graph_Kernels/SKG/kroneckerTester.py | 1 | 2542 | from KroneckerInitMatrix import InitMatrix
import KroneckerGenerator
import numpy as np
import networkx as nx
import testgg as test
import matplotlib.pyplot as plt
def get_graph(nxgraph):
x = nxgraph
cc_conn = nx.connected_components(x)
num_cc = nx.number_connected_components(x)
#largest_cc = len(cc_conn[0])
return x, cc_conn, num_cc #, largest_cc
def create_graph_stats(nxgraph):
(x, cc_conn, num_cc) = get_graph(nxgraph) #, largest_cc
cc = nx.closeness_centrality(x)
bc = nx.betweenness_centrality(x)
deg = nx.degree_centrality(x)
dens = nx.density(x)
stats = {'cc':cc, 'bc':bc, 'deg':deg, \
'num_cc':num_cc, 'dens':dens}#, 'largest_cc':largest_cc}
return stats #conn,
#above are methods to make input for histogram
nodes = 2
init = InitMatrix(nodes)
init.make()
#Alpha Beta Method of Testing
init.addEdge(0, 1)
init.addSelfEdges()
init.makeStochasticAB(0.4, 0.2)
#Custom Method of Testing
#p = 15
#c = 6
#probArr = np.array([1, c*p, p/c, 0, 0, c*p, 1, p/c, 0, 0, p/c, p/c, 1, p/c, p/c, 0, 0, p/c, 1, c*p, 0, 0, p/c, c*p, 1])
#init.makeStochasticCustom(probArr)
#Networkx Graph Gen as Seed, Alpha Beta after Testing
#G = nx.watts_strogatz_graph(5, 2, 0.1)
#nx.draw(G)
#plt.show() # if you want to visualize your seed graph first
#init = InitMatrix(nodes)
#init = init.makeStochasticABFromNetworkxGraph(G, 0.75, 0.5)
#Networkx Graph Gen as Seed Testing, not Stochastic after
#G = nx.watts_strogatz_graph(5, 3, 0.1)
#G = nx.hypercube_graph(3)
#nx.draw(G)
#plt.show() # if you want to visualize your seed graph first
#init = InitMatrix(nodes)
#init = init.makeFromNetworkxGraph(G)
#init.addSelfEdges() # if you want to ensure self edges for Kronecker
k = 5
print ("Seed Matrix Nodes:")
print (nodes)
print ("Kronecker Iterations:")
print (k)
nxgraph = KroneckerGenerator.generateStochasticKron(init, k, True)
#for line in nx.generate_edgelist(nxgraph, data=False):
# print(line)
print ("Done Creating Network!")
is_bipart = nx.is_bipartite(nxgraph)
print ("is_bipart:")
print (is_bipart)
is_conn = nx.is_connected(nxgraph)
print ("is_conn:")
print (is_conn) #test
#print "Exporting to GML File"
#nx.write_gml(nxgraph,"KronSeed1_75a5b.gml") #export to gml file for gephi
#nx.draw(nxgraph, pos=nx.spring_layout(nxgraph))
#plt.show()
#print "Printing Statistics..."
#stats = create_graph_stats(nxgraph)
#print "Density: "
#print stats['dens']
#print "Creating Histogram..."
#histogramInput = create_graph_stats(nxgraph)
#test.histogram(histogramInput, 30)
| mit |
mmottahedi/neuralnilm_prototype | scripts/e125.py | 2 | 4535 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
"""
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
],
layer_changes={
251: {
'remove_from': -3,
'new_layers':
[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(10),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
}
}
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=5000)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.