repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
dhhagan/py-openaq | openaq/decorators.py | 1 | 2862 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from functools import wraps
import warnings
from unittest import SkipTest
from .utils import to_naive_timestamp, clean_encodings
try:
import pandas as pd
_no_pandas = False
except ImportError:
_no_pandas = True
def skipif(skipcondition, msg = ""):
"""
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if skipcondition == True:
raise SkipTest(msg)
return f(*args, **kwargs)
return decorated_function
return decorator
def pandasize():
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
df = kwargs.pop('df', False)
index = kwargs.pop('index', 'local')
if df == True and _no_pandas == False:
status, resp = f(*args, **kwargs)
if status == 200:
resp = resp['results']
if f.__name__ == 'latest':
d = []
for i in resp:
for m in i['measurements']:
tmp = m
tmp['country'] = i['country']
tmp['city'] = i['city']
tmp['location'] = i['location']
tmp['lastUpdated'] = pd.to_datetime(tmp['lastUpdated'])
d.append(tmp)
resp = d
data = pd.io.json.json_normalize(resp)
# If there are any datetimes, make them datetimes!
for each in [i for i in data.columns if 'date' in i]:
if 'local' in each:
data[each] = pd.to_datetime(data[each].apply(lambda x: to_naive_timestamp(x)))
else:
data[each] = pd.to_datetime(data[each])
if f.__name__ in ('latest'):
data.index = data['lastUpdated']
del data['lastUpdated']
elif f.__name__ in ('measurements'):
if index == 'utc':
data.index = data['date.utc']
del data['date.utc']
elif index == 'local':
data.index = data['date.local']
del data['date.local']
else:
pass
# Clean up encodings
if 'unit' in data.columns:
data['unit'] = data['unit'].apply(clean_encodings)
return data
return f(*args, **kwargs)
return decorated_function
return decorator
| mit |
fmv1992/data_utilities | data_utilities/sklearn_utilities/grid_search.py | 1 | 7907 | """Grid search utilities for scikit-learn models.
This module:
(1) Do one thing and do it well.
(2) Maximum flexibility, sane defaults.
(3) Enable paralellism.
(3) Enable persistence.
"""
import pickle
import os
import hashlib
import io
import zipfile
import multiprocessing as mp
import functools
from data_utilities.sklearn_utilities import get_estimator_name
class BasePersistentGrid(object):
"""Base class for persistent grids.
Its characteristics are:
* Simple usage. Instatiate it once per project (same call) feed into
the function and let them take care of the work.
* Store combinations of dataset + classifier + grid.
* Parallelization.
"""
def __new__(cls, **kwargs):
"""Allow single interface for instantiation and creation of objects.
All objects can be created with `cls.load_from_path()`.
"""
if os.path.isfile(kwargs['persistent_grid_path']):
loaded_object = cls.load_from_path(**kwargs)
return loaded_object
else:
return super(BasePersistentGrid, cls).__new__(cls)
def __init__(self,
persistent_grid_path=None,
dataset_path=None,
hash_function=hashlib.md5,
save_every_n_interations=10):
# Create creation date if needed.
self.temporary_results = {}
# These attributes are constant even between runs.
self.hash_function = hash_function
self.persistent_grid_path = persistent_grid_path
self.save_every_n_interations = save_every_n_interations
# These two attributes can change between interactions.
self.dataset_path = dataset_path
if not hasattr(self, 'dataset_hash'):
self._update_dataset_hash(self.dataset_path)
# Initilize multiprocessing attributes: manager, lock, data and
# _n_counter.
self._instantiate_shared_attributes()
# Zip file object to be written to disk from time to time.
# self._data_zip = zipfile.ZipFile(io.BytesIO(), mode='x')
# Unpacked dictionary: hash -> value
# self.data = self._load_data_from_bytesio()
# These attributes change every session.
# self._n_counter = 0
@staticmethod
def load_from_path(*args, **kwargs):
if os.path.isfile(kwargs['persistent_grid_path']):
with open(kwargs['persistent_grid_path'], 'rb') as f:
loaded_object = pickle.load(f)
# Refresh data set hash.
print(kwargs['dataset_path'])
loaded_object._update_dataset_hash(kwargs['dataset_path'])
# All other attributes are kept.
loaded_object._instantiate_shared_attributes()
return loaded_object
else:
pg = PersistentGrid(**kwargs)
return pg
def get_multiprocessing_manager(self):
return self.mp_manager
def _instantiate_shared_attributes(self):
# Common manager (for other common attributes).
try:
self.mp_manager
except AttributeError:
self.mp_manager = mp.Manager()
# Common lock.
self.mp_lock = self.mp_manager.Lock()
# Shared computed values (data).
self.mp_data = self.mp_manager.dict()
# Common attributes update counter (_n_counter).
self._mp_n_counter_value = self.mp_manager.Value(int, 0)
def _update_dataset_hash(self, dataset_path):
self.dataset_hash = self.get_hash(dataset_path)
def update(self, estimator, grid, results):
request_hash = self.compute_request_hash(estimator, grid)
with self.mp_lock:
self.mp_data[request_hash] = results
self._mp_n_counter_value.value += 1
if self._mp_n_counter_value.value % self.save_every_n_interations == 0:
self.save()
def _get_multiprocessing_shared_attributes(self):
pass
def save(self):
# Store values.
(_store_manager, _store_lock, _store_data, _store_counter) = (
self.mp_manager, self.mp_lock, self.mp_data,
self._mp_n_counter_value)
# Delete values.
del (self.mp_manager, self.mp_lock, self._mp_n_counter_value)
# Make sure data is pickable.
self.mp_data = dict(self.mp_data)
with open(self.persistent_grid_path, 'wb') as f:
pickle.dump(self, f)
# Store saved values.
(self.mp_manager, self.mp_lock, self.mp_data, self._mp_n_counter_value) = (_store_manager, _store_lock, _store_data, _store_counter)
def compute_request_hash(self, estimator, grid):
estimator_hash = self.get_hash(get_estimator_name(estimator))
grid_hash = self.get_hash(grid)
final_hash = self.get_hash(
self.dataset_hash + estimator_hash + grid_hash)
return final_hash
def retrieve(self, estimator, grid):
# If already stored just return.
request_hash = self.compute_request_hash(estimator, grid)
return self.mp_data.get(request_hash, None)
def get_hash(self, x):
# TODO: cached for strings and paths.
# Need to generalize the reading of:
# 1) file paths
# 2) small strings
# 3) python_objects
if isinstance(x, (dict, bytes)) or not os.path.isfile(x): # use function cache.
return self._get_hash_from_hashable(self._transform_to_hashable(x))
# For files.
hash_obj = self.hash_function()
iter_of_bytes = open(x, 'rb')
try:
data = iter_of_bytes.read(io.DEFAULT_BUFFER_SIZE)
while data:
hash_obj.update(data)
data = iter_of_bytes.read(io.DEFAULT_BUFFER_SIZE)
finally:
iter_of_bytes.close()
return hash_obj.digest()
def _transform_to_hashable(self, x):
if isinstance(x, str):
bytesobj = x.encode()
elif isinstance(x, bytes):
bytesobj = x
else:
bytesobj = pickle.dumps(x)
return bytesobj
@functools.lru_cache(maxsize=2**12)
def _get_hash_from_hashable(self, hashable):
hash_obj = self.hash_function()
hash_obj.update(hashable)
return hash_obj.digest()
def _load_data_from_bytesio(self, zipf):
"""Return a dictionary of hexdigest -> results."""
keys = map(lambda x: x.strip('.pickle'), zipf.namelist())
values = map(zipf.read, zipf.namelist())
# TODO: use a shared dictionary
return dict(zip(keys, values))
class PersistentGrid(BasePersistentGrid):
"""Allow easy persistence between interrupted grid searches.
Example:
>>> import pandas as pd, numpy as np
>>> from sklearn.tree import DecisionTreeClassifier as DTC
>>> from sklearn.datasets import load_breast_cancer
>>> pg_path = '/tmp/pg.pickle'
>>> dset_path = '/tmp/data.csv'
>>> dataset = load_breast_cancer()
>>> df = pd.DataFrame(np.column_stack((dataset.data, dataset.target)))
>>> df.to_csv(dset_path)
>>> dt_grid = {'split': ['random', 'best'],
... 'max_depth': [2, 4]}
>>> clf = DTC()
>>> dset_path = '/tmp/data.csv'
>>> persistent_grid = grid_search.PersistentGrid().load_from_path( persistent_grid_path='/tmp/pg.pickle', dataset_path='/tmp/data.csv')
>>> persistent_grid
"""
pass
if __name__ == '__main__':
bpg1 = PersistentGrid(
persistent_grid_path='../../__/persistent_grid.pickle',
dataset_path='../../__/iris_dataset.csv')
bpg2 = PersistentGrid.load_from_path(
persistent_grid_path='../../__/persistent_grid.pickle',
dataset_path='../../__/iris_dataset.csv')
| gpl-3.0 |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v3.8/gnmt-tpuv3-8/code/gnmt/model/t2t/tensor2tensor/utils/video/prediction2gif.py | 3 | 6570 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generates gifs out of a video checkpoint.
Usage:
prediction2gif \
--problem="gym_pong_deterministic-v4_random" \
--model="next_frame_sv2p" \
--hparams_set="next_frame_sv2p" \
--output_dir=$CHECKPOINT_DIRECTORY \
--data_dir=$DATA_DIRECTORY \
--output_gif=$USER/out.gif \
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import matplotlib as mpl
import numpy as np
from queue import Queue
from tensor2tensor.bin import t2t_trainer # pylint: disable=unused-import
from tensor2tensor.layers import common_video
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import usr_dir
import tensorflow as tf
mpl.use("Agg")
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_steps", 100, "Number of prediction steps.")
flags.DEFINE_integer("fps", 10, "Generated gif FPS.")
flags.DEFINE_string("output_gif", None, "Output path to save the gif.")
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
# Create hparams
hparams = trainer_lib.create_hparams(
FLAGS.hparams_set,
FLAGS.hparams,
data_dir=os.path.expanduser(FLAGS.data_dir),
problem_name=FLAGS.problem)
hparams.force_full_predict = True
hparams.scheduled_sampling_k = -1
# Params
num_agents = 1 # TODO(mbz): fix the code for more agents
num_steps = FLAGS.num_steps
if hasattr(hparams.problem, "num_actions"):
num_actions = hparams.problem.num_actions
else:
num_actions = None
frame_shape = hparams.problem.frame_shape
resized_frame = hparams.preprocess_resize_frames is not None
if resized_frame:
frame_shape = hparams.preprocess_resize_frames
frame_shape += [hparams.problem.num_channels]
dataset = registry.problem(FLAGS.problem).dataset(
tf.estimator.ModeKeys.TRAIN,
shuffle_files=True,
data_dir=os.path.expanduser(FLAGS.data_dir),
hparams=hparams)
dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(num_agents))
data = dataset.make_one_shot_iterator().get_next()
# Setup input placeholders
input_size = [num_agents, hparams.video_num_input_frames]
if num_actions is None:
placeholders = {
"inputs": tf.placeholder(tf.float32, input_size + frame_shape)
}
else:
placeholders = {
"inputs": tf.placeholder(tf.float32, input_size + frame_shape),
"input_action": tf.placeholder(tf.int64, input_size + [1]),
"input_reward": tf.placeholder(tf.int64, input_size + [1]),
"reset_internal_states": tf.placeholder(tf.float32, []),
}
# Create model.
model_cls = registry.model(FLAGS.model)
model = model_cls(hparams, tf.estimator.ModeKeys.PREDICT)
prediction_ops = model.infer(placeholders)
states_q = Queue(maxsize=hparams.video_num_input_frames)
actions_q = Queue(maxsize=hparams.video_num_input_frames)
rewards_q = Queue(maxsize=hparams.video_num_input_frames)
if num_actions is not None:
all_qs = [states_q, actions_q, rewards_q]
else:
all_qs = [states_q]
writer = common_video.WholeVideoWriter(
fps=FLAGS.fps, output_path=FLAGS.output_gif)
saver = tf.train.Saver(tf.trainable_variables())
with tf.train.SingularMonitoredSession() as sess:
# Load latest checkpoint
ckpt = tf.train.get_checkpoint_state(FLAGS.output_dir).model_checkpoint_path
saver.restore(sess.raw_session(), ckpt)
# get init frames from the dataset
data_np = sess.run(data)
frames = np.split(data_np["inputs"], hparams.video_num_input_frames, 1)
for frame in frames:
frame = np.squeeze(frame, 1)
states_q.put(frame)
writer.write(frame[0].astype(np.uint8))
if num_actions is not None:
actions = np.split(data_np["input_action"],
hparams.video_num_input_frames, 1)
for action in actions:
actions_q.put(np.squeeze(action, 1))
rewards = np.split(data_np["input_reward"],
hparams.video_num_input_frames, 1)
for reward in rewards:
rewards_q.put(np.squeeze(reward, 1))
for step in range(num_steps):
print(">>>>>>> ", step)
if num_actions is not None:
random_actions = np.random.randint(num_actions-1)
random_actions = np.expand_dims(random_actions, 0)
random_actions = np.tile(random_actions, (num_agents, 1))
# Shape inputs and targets
inputs, input_action, input_reward = (
np.stack(list(q.queue), axis=1) for q in all_qs)
else:
assert len(all_qs) == 1
q = all_qs[0]
elems = list(q.queue)
# Need to adjust shapes sometimes.
for i, e in enumerate(elems):
if len(e.shape) < 4:
elems[i] = np.expand_dims(e, axis=0)
inputs = np.stack(elems, axis=1)
# Predict next frames
if num_actions is None:
feed = {placeholders["inputs"]: inputs}
else:
feed = {
placeholders["inputs"]: inputs,
placeholders["input_action"]: input_action,
placeholders["input_reward"]: input_reward,
placeholders["reset_internal_states"]: float(step == 0),
}
predictions = sess.run(prediction_ops, feed_dict=feed)
if num_actions is None:
predicted_states = predictions[:, 0]
else:
predicted_states = predictions["targets"][:, 0]
predicted_reward = predictions["target_reward"][:, 0]
# Update queues
if num_actions is None:
new_data = (predicted_states)
else:
new_data = (predicted_states, random_actions, predicted_reward)
for q, d in zip(all_qs, new_data):
q.get()
q.put(d.copy())
writer.write(np.round(predicted_states[0]).astype(np.uint8))
writer.finish_to_disk()
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
avmarchenko/exa | exa/util/mpl.py | 2 | 6838 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Matplotlib Utilities
###############################
"""
import seaborn as sns
import numpy as np
#from mpl_toolkits.mplot3d import Axes3D
legend = {'legend.frameon': True, 'legend.facecolor': 'white',
'legend.fancybox': True, 'patch.facecolor': 'white',
'patch.edgecolor': 'black'}
axis = {'axes.formatter.useoffset': False}
mpl_legend = {'legend.frameon': True, 'legend.facecolor': 'white',
'legend.edgecolor': 'black'}
mpl_mathtext = {'mathtext.default': 'rm'}
mpl_save = {'savefig.format': 'pdf', 'savefig.bbox': 'tight',
'savefig.transparent': True, 'savefig.pad_inches': 0.1,
'pdf.compression': 9}
mpl_rc = mpl_legend
mpl_rc.update(mpl_mathtext)
mpl_rc.update(mpl_save)
sns.set(context='poster', style='white', palette='colorblind', font_scale=1.3,
font='serif', rc=mpl_rc)
def _gen_projected(nxplot, nyplot, projection, figargs):
total = nxplot * nyplot
fig = sns.mpl.pyplot.figure(**figargs)
kwargs = {'projection': projection}
axs = [fig.add_subplot(nxplot, nyplot, i, **kwargs) for i in range(1, total + 1)]
return fig, axs
def _gen_shared(nxplot, nyplot, sharex, sharey, figargs):
fig, axs = sns.mpl.pyplot.subplots(nxplot, nyplot, sharex=sharex,
sharey=sharey, **figargs)
axs = fig.get_axes()
return fig, axs
def _gen_figure(nxplot=1, nyplot=1, figargs=None, projection=None,
sharex='none', joinx=False, sharey='none', joiny=False,
x=None, nxlabel=None, xlabels=None, nxdecimal=None, xmin=None, xmax=None,
y=None, nylabel=None, ylabels=None, nydecimal=None, ymin=None, ymax=None,
z=None, nzlabel=None, zlabels=None, nzdecimal=None, zmin=None, zmax=None,
r=None, nrlabel=None, rlabels=None, nrdecimal=None, rmin=None, rmax=None,
t=None, ntlabel=None, tlabels=None, fontsize=20):
"""
Returns a figure object with as much customization as provided.
"""
figargs = {} if figargs is None else figargs
if projection is not None:
fig, axs = _gen_projected(nxplot, nyplot, projection, figargs)
else:
fig, axs = _gen_shared(nxplot, nyplot, sharex, sharey, figargs)
adj = {}
if joinx: adj.update({'hspace': 0})
if joiny: adj.update({'wspace': 0})
fig.subplots_adjust(**adj)
data = {}
if projection is None:
data = {'x': x, 'y': y}
elif projection == '3d':
data = {'x': x, 'y': y, 'z': z}
elif projection == 'polar':
data = {'r': r, 't': t}
methods = {}
for ax in axs:
if 'x' in data:
methods['x'] = (ax.set_xlim, ax.set_xticks, ax.set_xticklabels,
nxlabel, xlabels, nxdecimal, xmin, xmax)
if 'y' in data:
methods['y'] = (ax.set_ylim, ax.set_yticks, ax.set_yticklabels,
nylabel, ylabels, nydecimal, ymin, ymax)
if 'z' in data:
methods['z'] = (ax.set_zlim, ax.set_zticks, ax.set_zticklabels,
nzlabel, zlabels, nzdecimal, zmin, zmax)
if 'r' in data:
methods['r'] = (ax.set_rlim, ax.set_rticks, ax.set_rgrids,
nrlabel, rlabels, nrdecimal, rmin, rmax)
if 't' in data:
methods['t'] = (ax.set_thetagrids, ntlabel, tlabels)
for dim, arr in data.items():
if dim == 't':
grids, nlabel, labls = methods[dim]
if ntlabel is not None:
theta = np.arange(0, 2 * np.pi, 2 * np.pi / ntlabel)
if labls is not None:
grids(np.degrees(theta), labls, fontsize=fontsize)
else:
grids(np.degrees(theta), fontsize=fontsize)
else:
lim, ticks, labels, nlabel, labls, decs, mins, maxs = methods[dim]
if arr is not None:
amin = mins if mins is not None else arr.min()
amax = maxs if maxs is not None else arr.max()
lim((amin, amax))
elif mins is not None and maxs is not None:
if nlabel is not None:
ticks(np.linspace(amin, amax, nlabel))
if decs is not None:
sub = "{{:.{}f}}".format(decs).format
labels([sub(i) for i in np.linspace(amin, amax, nlabel)])
if labls is not None:
labels(labls)
ax.tick_params(axis=dim, labelsize=fontsize)
return fig
def _plot_surface(x, y, z, nxlabel, nylabel, nzlabel, method,
figargs, axargs):
fig = _gen_figure(x=x, y=y, z=z, nxlabel=nxlabel,
nylabel=nylabel, nzlabel=nzlabel,
figargs=figargs, projection='3d')
ax = fig.get_axes()[0]
convenience = {'wireframe': ax.plot_wireframe,
'contour': ax.contour,
'contourf': ax.contourf,
'trisurf': ax.plot_trisurf,
'scatter': ax.scatter,
'line': ax.plot}
if method not in convenience.keys():
raise Exception('Method must be in {}.'.format(convenience.keys()))
sx, sy = np.meshgrid(x, y)
if method in ['trisurf', 'scatter', 'line']:
if method == 'line':
axargs = {key: val for key, val in axargs.items() if key != 'cmap'}
convenience[method](sx.flatten(), sy.flatten(), z.flatten(), **axargs)
else:
convenience[method](sx, sy, z, **axargs)
return fig
def _plot_contour(x, y, z, vmin, vmax, cbarlabel, ncbarlabel, ncbardecimal,
nxlabel, nylabel, method, colorbar, figargs, axargs):
fig = _gen_figure(x=x, y=y, nxlabel=nxlabel, nylabel=nylabel, figargs=figargs)
ax = fig.get_axes()[0]
convenience = {'contour': ax.contour,
'contourf': ax.contourf,
'pcolormesh': ax.pcolormesh,
'pcolor': ax.pcolor}
if method not in convenience.keys():
raise Exception('method must be in {}'.format(convenience.keys()))
t = convenience[method](x, y, z, **axargs)
cbar = fig.colorbar(t) if colorbar else None
if cbar is not None and cbarlabel is not None:
cbar.set_label(cbarlabel)
if cbar is not None and ncbarlabel is not None:
newticks = np.linspace(vmin, vmax, ncbarlabel)
cbar.set_ticks(newticks)
if ncbardecimal is not None:
fmt = '{{:.{}f}}'.format(ncbardecimal).format
cbar.set_ticklabels([fmt(i) for i in newticks])
return fig, cbar
| apache-2.0 |
SANDAG/urbansim | urbansim/models/supplydemand.py | 5 | 7150 | """
Tools for modeling how supply and demand affect real estate prices.
"""
import logging
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
def _calculate_adjustment(
lcm, choosers, alternatives, alt_segmenter,
clip_change_low, clip_change_high, multiplier_func=None):
"""
Calculate adjustments to prices to compensate for
supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
clip_change_low : float
The minimum amount by which to multiply prices each iteration.
clip_change_high : float
The maximum amount by which to multiply prices each iteration.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
alts_muliplier : pandas.Series
Same index as `alternatives`, values clipped to `clip_change_low`
and `clip_change_high`.
submarkets_multiplier : pandas.Series
Index is unique values from `alt_segmenter`, values are the ratio
of demand / supply for each segment in `alt_segmenter`.
finished : boolean
boolean indicator that this adjustment should be considered the
final adjustment (if True). If false, the iterative algorithm
should continue.
"""
logger.debug('start: calculate supply and demand price adjustment ratio')
# probabilities of agents choosing * number of agents = demand
demand = lcm.summed_probabilities(choosers, alternatives)
# group by submarket
demand = demand.groupby(alt_segmenter.loc[demand.index].values).sum()
# number of alternatives
supply = alt_segmenter.value_counts()
if multiplier_func is not None:
multiplier, finished = multiplier_func(demand, supply)
else:
multiplier, finished = (demand / supply), False
multiplier = multiplier.clip(clip_change_low, clip_change_high)
# broadcast multiplier back to alternatives index
alts_muliplier = multiplier.loc[alt_segmenter]
alts_muliplier.index = alt_segmenter.index
logger.debug(
('finish: calculate supply and demand price adjustment multiplier '
'with mean multiplier {}').format(multiplier.mean()))
return alts_muliplier, multiplier, finished
def supply_and_demand(
lcm, choosers, alternatives, alt_segmenter, price_col,
base_multiplier=None, clip_change_low=0.75, clip_change_high=1.25,
iterations=5, multiplier_func=None):
"""
Adjust real estate prices to compensate for supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : str, array, or pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
If a string, it is expected to be the name of a column
in `alternatives`. If a Series it should have the same index
as `alternatives`.
price_col : str
The name of the column in `alternatives` that corresponds to price.
This column is what is adjusted by this model.
base_multiplier : pandas.Series, optional
A series describing a starting multiplier for submarket prices.
Index should be submarket IDs.
clip_change_low : float, optional
The minimum amount by which to multiply prices each iteration.
clip_change_high : float, optional
The maximum amount by which to multiply prices each iteration.
iterations : int, optional
Number of times to update prices based on supply/demand comparisons.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
new_prices : pandas.Series
Equivalent of the `price_col` in `alternatives`.
submarkets_ratios : pandas.Series
Price adjustment ratio for each submarket. If `base_multiplier` is
given this will be a cummulative multiplier including the
`base_multiplier` and the multipliers calculated for this year.
"""
logger.debug('start: calculating supply and demand price adjustment')
# copy alternatives so we don't modify the user's original
alternatives = alternatives.copy()
# if alt_segmenter is a string, get the actual column for segmenting demand
if isinstance(alt_segmenter, str):
alt_segmenter = alternatives[alt_segmenter]
elif isinstance(alt_segmenter, np.array):
alt_segmenter = pd.Series(alt_segmenter, index=alternatives.index)
choosers, alternatives = lcm.apply_predict_filters(choosers, alternatives)
alt_segmenter = alt_segmenter.loc[alternatives.index]
# check base ratio and apply it to prices if given
if base_multiplier is not None:
bm = base_multiplier.loc[alt_segmenter]
bm.index = alt_segmenter.index
alternatives[price_col] = alternatives[price_col] * bm
base_multiplier = base_multiplier.copy()
for _ in range(iterations):
alts_muliplier, submarkets_multiplier, finished = _calculate_adjustment(
lcm, choosers, alternatives, alt_segmenter,
clip_change_low, clip_change_high, multiplier_func=multiplier_func)
alternatives[price_col] = alternatives[price_col] * alts_muliplier
# might need to initialize this for holding cumulative multiplier
if base_multiplier is None:
base_multiplier = pd.Series(
np.ones(len(submarkets_multiplier)),
index=submarkets_multiplier.index)
base_multiplier *= submarkets_multiplier
if finished:
break
logger.debug('finish: calculating supply and demand price adjustment')
return alternatives[price_col], base_multiplier
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 41 | 3668 | from __future__ import unicode_literals
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.feature_extraction import FeatureHasher
from sklearn.utils.testing import assert_raises, assert_true, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37},
{"foo": "baz", "gaga": u"string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_feature_hasher_pairs_with_string_values():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": "a"},
{"baz": u"abc", "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 1], x1_nz)
assert_equal([1, 1, 4], x2_nz)
raw_X = (iter(d.items()) for d in [{"bax": "abc"},
{"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert_equal([1], x1_nz)
assert_equal([1], x2_nz)
assert_array_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
wheeler-microfluidics/dmf_control_board | docs/rename.py | 3 | 2578 | import sys
import pandas as pd
from path_helpers import path
def main(root, old_name, new_name):
names = pd.Series([old_name, new_name], index=['old', 'new'])
underscore_names = names.map(lambda v: v.replace('-', '_'))
camel_names = names.str.split('-').map(lambda x: ''.join([y.title()
for y in x]))
# Replace all occurrences of provided original name with new name, and all
# occurrences where dashes (i.e., '-') are replaced with underscores.
#
# Dashes are used in Python package names, but underscores are used in
# Python module names.
for p in path(root).walkfiles():
data = p.bytes()
if '.git' not in p and (names.old in data or
underscore_names.old in data or
camel_names.old in data):
p.write_bytes(data.replace(names.old, names.new)
.replace(underscore_names.old, underscore_names.new)
.replace(camel_names.old, camel_names.new))
def rename_path(p):
if '.git' in p:
return
if underscore_names.old in p.name:
p.rename(p.parent.joinpath(p.name.replace(underscore_names.old,
underscore_names.new)))
if camel_names.old in p.name:
p.rename(p.parent.joinpath(p.name.replace(camel_names.old,
camel_names.new)))
# Rename all files/directories containing original name with new name, and
# all occurrences where dashes (i.e., '-') are replaced with underscores.
#
# Process list of paths in *reverse order* to avoid renaming parent
# directories before children.
for p in sorted(list(path(root).walkdirs()))[-1::-1]:
rename_path(p)
for p in path(root).walkfiles():
rename_path(p)
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='Rename template project with'
'hyphen-separated <new name> (path names and in '
'files).')
parser.add_argument('new_name', help='New project name (e.g., '
' `my-new-project`)')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
main('.', 'dmf-control-board-firmware', args.new_name)
| gpl-3.0 |
kubeflow/kfserving | docs/samples/v1alpha2/transformer/image_transformer/setup.py | 1 | 1396 | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
tests_require = [
'pytest',
'pytest-tornasync',
'mypy'
]
setup(
name='transformer',
version='0.1.0',
author_email='[email protected]',
license='../../LICENSE.txt',
url='https://github.com/kubeflow/kfserving/docs/sameples/transformer',
description='Transformer',
long_description=open('README.md').read(),
python_requires='>=3.6',
packages=find_packages("transformer"),
install_requires=[
"kfserving>=0.6.0",
"argparse>=1.4.0",
"requests>=2.22.0",
"joblib>=0.13.2",
"pandas>=0.24.2",
"numpy>=1.16.3",
"kubernetes >= 9.0.0",
"torchvision>=0.4.1",
"pillow==8.1.1"
],
tests_require=tests_require,
extras_require={'test': tests_require}
)
| apache-2.0 |
freegnu/spacemacs-portable | elpa/ein-20170201.751/ein.py | 2 | 3105 | """
Python utilities to use it from ein.el
Copyright (C) 2012- Takafumi Arakaki
Author: Takafumi Arakaki <aka.tkf at gmail.com>
ein.py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ein.py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ein.py. If not, see <http://www.gnu.org/licenses/>.
"""
def export_nb(nb_json, format):
import IPython.nbconvert as nbconvert
import IPython.nbformat as nbformat
nb = nbformat.reads(nb_json, nbformat.NO_CONVERT)
output = nbconvert.export_by_name(format, nb)
print(output[0])
def _find_edit_target_012(*args, **kwds):
from IPython.core.interactiveshell import InteractiveShell
inst = InteractiveShell.instance()
return inst._find_edit_target(*args, **kwds)
def _find_edit_target_013(*args, **kwds):
from IPython.core.interactiveshell import InteractiveShell
inst = InteractiveShell.instance()
return CodeMagics._find_edit_target(inst, *args, **kwds)
def _find_edit_target_python(name):
from inspect import getsourcefile, getsourcelines
try:
obj = eval(name)
except NameError:
return False
else:
sfile = getsourcefile(obj)
sline = getsourcelines(obj)[-1]
if sfile and sline:
return(sfile, sline, False)
else:
return False
try:
from IPython.core.magics import CodeMagics
_find_edit_target = _find_edit_target_013
except ImportError:
_find_edit_target = _find_edit_target_012
def set_figure_size(*dim):
try:
from matplotlib.pyplot import rcParams
rcParams['figure.figsize'] = dim
except:
raise RuntimeError("Matplotlib not installed in this instance of python!")
def find_source(name):
"""Given an object as string, `name`, print its place in source code."""
# FIXME: use JSON display object instead of stdout
ret = _find_edit_target_python(name) or _find_edit_target(name, {}, [])
if ret:
(filename, lineno, use_temp) = ret
if not use_temp:
print(filename)
print(lineno)
return
raise RuntimeError("Source code for {0} cannot be found".format(name))
def run_docstring_examples(obj, verbose=True):
from IPython.core.interactiveshell import InteractiveShell
import doctest
inst = InteractiveShell.instance()
globs = inst.user_ns
return doctest.run_docstring_examples(obj, globs, verbose=verbose)
def print_object_info_for(obj):
import IPython.core.oinspect
import json
inspector = IPython.core.oinspect.Inspector()
try:
print(json.dumps(inspector.info(obj)))
except NameError:
print(json.dumps(inspector.noinfo()))
| agpl-3.0 |
rrohan/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 140 | 6926 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
nvoron23/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
dbendet/sklearn_pycon2015 | notebooks/fig_code/ML_flow_chart.py | 61 | 4970 | """
Tutorial Diagrams
-----------------
This script plots the flow-charts used in the scikit-learn tutorials.
"""
import numpy as np
import pylab as pl
from matplotlib.patches import Circle, Rectangle, Polygon, Arrow, FancyArrow
def create_base(box_bg = '#CCCCCC',
arrow1 = '#88CCFF',
arrow2 = '#88FF88',
supervised=True):
fig = pl.figure(figsize=(9, 6), facecolor='w')
ax = pl.axes((0, 0, 1, 1),
xticks=[], yticks=[], frameon=False)
ax.set_xlim(0, 9)
ax.set_ylim(0, 6)
patches = [Rectangle((0.3, 3.6), 1.5, 1.8, zorder=1, fc=box_bg),
Rectangle((0.5, 3.8), 1.5, 1.8, zorder=2, fc=box_bg),
Rectangle((0.7, 4.0), 1.5, 1.8, zorder=3, fc=box_bg),
Rectangle((2.9, 3.6), 0.2, 1.8, fc=box_bg),
Rectangle((3.1, 3.8), 0.2, 1.8, fc=box_bg),
Rectangle((3.3, 4.0), 0.2, 1.8, fc=box_bg),
Rectangle((0.3, 0.2), 1.5, 1.8, fc=box_bg),
Rectangle((2.9, 0.2), 0.2, 1.8, fc=box_bg),
Circle((5.5, 3.5), 1.0, fc=box_bg),
Polygon([[5.5, 1.7],
[6.1, 1.1],
[5.5, 0.5],
[4.9, 1.1]], fc=box_bg),
FancyArrow(2.3, 4.6, 0.35, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.75, 4.2, 0.5, -0.2, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(5.5, 2.4, 0, -0.4, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(2.0, 1.1, 0.5, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.3, 1.1, 1.3, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(6.2, 1.1, 0.8, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2)]
if supervised:
patches += [Rectangle((0.3, 2.4), 1.5, 0.5, zorder=1, fc=box_bg),
Rectangle((0.5, 2.6), 1.5, 0.5, zorder=2, fc=box_bg),
Rectangle((0.7, 2.8), 1.5, 0.5, zorder=3, fc=box_bg),
FancyArrow(2.3, 2.9, 2.0, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
Rectangle((7.3, 0.85), 1.5, 0.5, fc=box_bg)]
else:
patches += [Rectangle((7.3, 0.2), 1.5, 1.8, fc=box_bg)]
for p in patches:
ax.add_patch(p)
pl.text(1.45, 4.9, "Training\nText,\nDocuments,\nImages,\netc.",
ha='center', va='center', fontsize=14)
pl.text(3.6, 4.9, "Feature\nVectors",
ha='left', va='center', fontsize=14)
pl.text(5.5, 3.5, "Machine\nLearning\nAlgorithm",
ha='center', va='center', fontsize=14)
pl.text(1.05, 1.1, "New Text,\nDocument,\nImage,\netc.",
ha='center', va='center', fontsize=14)
pl.text(3.3, 1.7, "Feature\nVector",
ha='left', va='center', fontsize=14)
pl.text(5.5, 1.1, "Predictive\nModel",
ha='center', va='center', fontsize=12)
if supervised:
pl.text(1.45, 3.05, "Labels",
ha='center', va='center', fontsize=14)
pl.text(8.05, 1.1, "Expected\nLabel",
ha='center', va='center', fontsize=14)
pl.text(8.8, 5.8, "Supervised Learning Model",
ha='right', va='top', fontsize=18)
else:
pl.text(8.05, 1.1,
"Likelihood\nor Cluster ID\nor Better\nRepresentation",
ha='center', va='center', fontsize=12)
pl.text(8.8, 5.8, "Unsupervised Learning Model",
ha='right', va='top', fontsize=18)
def plot_supervised_chart(annotate=False):
create_base(supervised=True)
if annotate:
fontdict = dict(color='r', weight='bold', size=14)
pl.text(1.9, 4.55, 'X = vec.fit_transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(3.7, 3.2, 'clf.fit(X, y)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(1.7, 1.5, 'X_new = vec.transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(6.1, 1.5, 'y_new = clf.predict(X_new)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
def plot_unsupervised_chart():
create_base(supervised=False)
if __name__ == '__main__':
plot_supervised_chart(False)
plot_supervised_chart(True)
plot_unsupervised_chart()
pl.show()
| bsd-3-clause |
mhdella/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
girving/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans_test.py | 39 | 20233 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators import kmeans as kmeans_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
steps = 10 * self.num_points // self.batch_size
kmeans.fit(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=learn.RunConfig(tf_random_seed=14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.fit(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertNear(self.true_score, score, self.true_score * 0.01)
def _infer_helper(self, kmeans, clusters, num_points):
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
# Test predict
assignments = list(
kmeans.predict_cluster_idx(input_fn=self.input_fn(
batch_size=num_points, points=points, num_epochs=1)))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(
input_fn=lambda: (constant_op.constant(points), None))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1,
keepdims=True) - 2 * np.dot(points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.clusters()
# Run inference on small datasets.
self._infer_helper(kmeans, clusters, num_points=10)
self._infer_helper(kmeans, clusters, num_points=1)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.fit(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0, keepdims=True))[
0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0, keepdims=True))[
0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = self.kmeans.transform(input_fn=self.input_fn(
batch_size=self.num_points))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
assignments = list(
self.kmeans.predict_cluster_idx(input_fn=self.input_fn(
num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.clusters())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_idx(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=run_config.RunConfig(tf_random_seed=3))
tf_kmeans.fit(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.clusters()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None),
steps=1))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
morrisonwudi/zipline | zipline/protocol.py | 15 | 17562 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from six import iteritems, iterkeys
import pandas as pd
from pandas.tseries.tools import normalize_date
import numpy as np
from . utils.protocol_utils import Enum
from . utils.math_utils import nanstd, nanmean, nansum
from zipline.finance.trading import with_environment
from zipline.utils.algo_instance import get_algo_instance
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = Enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION',
'CLOSE_POSITION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = [
'id',
'payment_sid',
'cash_amount',
'share_count',
]
def dividend_payment(data=None):
"""
Take a dictionary whose values are in DIVIDEND_PAYMENT_FIELDS and return a
series representing the payment of a dividend.
Ids are assigned to each historical dividend in
PerformanceTracker.update_dividends. They are guaranteed to be unique
integers with the context of a single simulation. If @data is non-empty, a
id is required to identify the historical dividend associated with this
payment.
Additionally, if @data is non-empty, either data['cash_amount'] should be
nonzero or data['payment_sid'] should be an asset identifier and
data['share_count'] should be nonzero.
The returned Series is given its id value as a name so that concatenating
payments results in a DataFrame indexed by id. (Note, however, that the
name value is not used to construct an index when this series is returned
by function passed to `DataFrame.apply`. In such a case, pandas preserves
the index of the DataFrame on which `apply` is being called.)
"""
return pd.Series(
data=data,
name=data['id'] if data is not None else None,
index=DIVIDEND_PAYMENT_FIELDS,
dtype=object,
)
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
class Order(Event):
pass
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
# Have to convert to primitive dict
state_dict['positions'] = dict(self.positions)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Portfolio saved state is too old.")
self.positions = Positions()
self.positions.update(state.pop('positions'))
self.__dict__.update(state)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_leverage = 0.0
self.net_liquidation = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Account({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Account saved state is too old.")
self.__dict__.update(state)
class Position(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Position({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Protocol Position saved state is too old.")
self.__dict__.update(state)
class Positions(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
class SIDData(object):
# Cache some data on the class so that this is shared for all instances of
# siddata.
# The dt where we cached the history.
_history_cache_dt = None
# _history_cache is a a dict mapping fields to pd.DataFrames. This is the
# most data we have for a given field for the _history_cache_dt.
_history_cache = {}
# This is the cache that is used for returns. This will have a different
# structure than the other history cache as this is always daily.
_returns_cache_dt = None
_returns_cache = None
# The last dt that we needed to cache the number of minutes.
_minute_bar_cache_dt = None
# If we are in minute mode, there is some cost associated with computing
# the number of minutes that we need to pass to the bar count of history.
# This will remain constant for a given bar and day count.
# This maps days to number of minutes.
_minute_bar_cache = {}
def __init__(self, sid, initial_values=None):
self._sid = sid
self._freqstr = None
# To check if we have data, we use the __len__ which depends on the
# __dict__. Because we are foward defining the attributes needed, we
# need to account for their entrys in the __dict__.
# We will add 1 because we need to account for the _initial_len entry
# itself.
self._initial_len = len(self.__dict__) + 1
if initial_values:
self.__dict__.update(initial_values)
@property
def datetime(self):
"""
Provides an alias from data['foo'].datetime -> data['foo'].dt
`datetime` was previously provided by adding a seperate `datetime`
member of the SIDData object via a generator that wrapped the incoming
data feed and added the field to each equity event.
This alias is intended to be temporary, to provide backwards
compatibility with existing algorithms, but should be considered
deprecated, and may be removed in the future.
"""
return self.dt
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def __len__(self):
return len(self.__dict__) - self._initial_len
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "SIDData({0})".format(self.__dict__)
def _get_buffer(self, bars, field='price', raw=False):
"""
Gets the result of history for the given number of bars and field.
This will cache the results internally.
"""
cls = self.__class__
algo = get_algo_instance()
now = algo.datetime
if now != cls._history_cache_dt:
# For a given dt, the history call for this field will not change.
# We have a new dt, so we should reset the cache.
cls._history_cache_dt = now
cls._history_cache = {}
if field not in self._history_cache \
or bars > len(cls._history_cache[field][0].index):
# If we have never cached this field OR the amount of bars that we
# need for this field is greater than the amount we have cached,
# then we need to get more history.
hst = algo.history(
bars, self._freqstr, field, ffill=True,
)
# Assert that the column holds ints, not security objects.
if not isinstance(self._sid, str):
hst.columns = hst.columns.astype(int)
self._history_cache[field] = (hst, hst.values, hst.columns)
# Slice of only the bars needed. This is because we strore the LARGEST
# amount of history for the field, and we might request less than the
# largest from the cache.
buffer_, values, columns = cls._history_cache[field]
if raw:
sid_index = columns.get_loc(self._sid)
return values[-bars:, sid_index]
else:
return buffer_[self._sid][-bars:]
def _get_bars(self, days):
"""
Gets the number of bars needed for the current number of days.
Figures this out based on the algo datafrequency and caches the result.
This caches the result by replacing this function on the object.
This means that after the first call to _get_bars, this method will
point to a new function object.
"""
def daily_get_max_bars(days):
return days
def minute_get_max_bars(days):
# max number of minute. regardless of current days or short
# sessions
return days * 390
def daily_get_bars(days):
return days
@with_environment()
def minute_get_bars(days, env=None):
cls = self.__class__
now = get_algo_instance().datetime
if now != cls._minute_bar_cache_dt:
cls._minute_bar_cache_dt = now
cls._minute_bar_cache = {}
if days not in cls._minute_bar_cache:
# Cache this calculation to happen once per bar, even if we
# use another transform with the same number of days.
prev = env.previous_trading_day(now)
ds = env.days_in_range(
env.add_trading_days(-days + 2, prev),
prev,
)
# compute the number of minutes in the (days - 1) days before
# today.
# 210 minutes in a an early close and 390 in a full day.
ms = sum(210 if d in env.early_closes else 390 for d in ds)
# Add the number of minutes for today.
ms += int(
(now - env.get_open_and_close(now)[0]).total_seconds() / 60
)
cls._minute_bar_cache[days] = ms + 1 # Account for this minute
return cls._minute_bar_cache[days]
if get_algo_instance().sim_params.data_frequency == 'daily':
self._freqstr = '1d'
# update this method to point to the daily variant.
self._get_bars = daily_get_bars
self._get_max_bars = daily_get_max_bars
else:
self._freqstr = '1m'
# update this method to point to the minute variant.
self._get_bars = minute_get_bars
self._get_max_bars = minute_get_max_bars
# Not actually recursive because we have already cached the new method.
return self._get_bars(days)
def mavg(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanmean(prices)
def stddev(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanstd(prices, ddof=1)
def vwap(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
vols = self._get_buffer(max_bars, field='volume', raw=True)[-bars:]
vol_sum = nansum(vols)
try:
ret = nansum(prices * vols) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
def returns(self):
algo = get_algo_instance()
now = algo.datetime
if now != self._returns_cache_dt:
self._returns_cache_dt = now
self._returns_cache = algo.history(2, '1d', 'price', ffill=True)
hst = self._returns_cache[self._sid]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
class BarData(object):
"""
Holds the event data for all sids for a given dt.
This is what is passed as `data` to the `handle_data` function.
Note: Many methods are analogues of dictionary because of historical
usage of what this replaced as a dictionary subclass.
"""
def __init__(self, data=None):
self._data = data or {}
self._contains_override = None
self._factor_matrix = None
self._factor_matrix_expires = pd.Timestamp(0, tz='UTC')
@property
def factors(self):
algo = get_algo_instance()
today = normalize_date(algo.get_datetime())
if today > self._factor_matrix_expires:
self._factor_matrix, self._factor_matrix_expires = \
algo.compute_factor_matrix(today)
return self._factor_matrix.loc[today]
def __contains__(self, name):
if self._contains_override:
if self._contains_override(name):
return name in self._data
else:
return False
else:
return name in self._data
def has_key(self, name):
"""
DEPRECATED: __contains__ is preferred, but this method is for
compatibility with existing algorithms.
"""
return name in self
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __delitem__(self, name):
del self._data[name]
def __iter__(self):
for sid, data in iteritems(self._data):
# Allow contains override to filter out sids.
if sid in self:
if len(data):
yield sid
def iterkeys(self):
# Allow contains override to filter out sids.
return (sid for sid in iterkeys(self._data) if sid in self)
def keys(self):
# Allow contains override to filter out sids.
return list(self.iterkeys())
def itervalues(self):
return (value for _sid, value in self.iteritems())
def values(self):
return list(self.itervalues())
def iteritems(self):
return ((sid, value) for sid, value
in iteritems(self._data)
if sid in self)
def items(self):
return list(self.iteritems())
def __len__(self):
return len(self.keys())
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self._data)
| apache-2.0 |
alexblaessle/PyFRAP | pyfrp/modules/pyfrp_plot_module.py | 1 | 26786 | #=====================================================================================================================================
#Copyright
#=====================================================================================================================================
#Copyright (C) 2014 Alexander Blaessle, Patrick Mueller and the Friedrich Miescher Laboratory of the Max Planck Society
#This software is distributed under the terms of the GNU General Public License.
#This file is part of PyFRAP.
#PyFRAP is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#===========================================================================================================================================================================
#Module Description
#===========================================================================================================================================================================
"""Plotting module for PyFRAP toolbox.
Contains functions and classes that are often used by PyFRAP toolbox and simplify plot creation and management.
"""
#===========================================================================================================================================================================
#Importing necessary modules
#===========================================================================================================================================================================
#numpy
import numpy as np
import scipy.interpolate
#Plotting
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import matplotlib.patches as ptc
#Misc
import sys
import os
#PyFRAP
import pyfrp_img_module
from pyfrp_term_module import *
from pyfrp.modules import pyfrp_idx_module
#===========================================================================================================================================================================
#Module Functions
#===========================================================================================================================================================================
class FRAPBoundarySelector():
"""Simple GUI widget to select circular FRAP boundaries.
Has useful center marker that is activatable that helps finding the
center of the image.
Mouse Input:
* Left: Set center.
* Right: Set Radius.
* Middle: Activate center marker.
Keyboard Input:
* Left Arrow: Move center to the left.
* Right Arrow: Move center to the right.
* Up Arrow: Move center upwards.
* Down Arrow: Move center downwards.
* Control + Up Arrow: Increase circle radius.
* Control + Down Arrow: Decrease circle radius.
.. note:: If ``embryo`` is given at initiation, will use first image specified
in embryo's ``fileList`` as background image.
Example Usage:
>>> sel=FRAPBoundarySelector(fn="path/to/img/file")
Use mouse/keyboard to define circular boundary.
>>> center,radius=sel.getResults()
Keyword Args:
embryo (pyfrp.subclasses.pyfrp_embryo.embryo): PyFRAP embryo instance.
fn (str): Filepath to image file taken for boundary selection.
"""
def __init__(self,embryo=None,fn=None,img=None):
#Passing embryo to class
self.embryo=embryo
self.fn=fn
#Img
self.img=img
#Plot resolution
self.dpi = 100
#Some bookkeeping variables
self.radiusPt=[]
self.centerPt=[]
self.centerMarker=[]
#Results
self.radius=None
self.center=None
#Creating figure and canvas
self.createCanvas()
#Close everthing down if faulty input
self.checkInput()
#Plot image if existent
self.showFirstDataImg()
plt.show()
def checkInput(self):
"""Checks if at least one of the two keyword arguments, ``embryo`` or ``fn``, is given.
If not, prints error message and closes down widget.
"""
if self.fn==None and self.embryo==None and self.img==None:
printError("No Embryo, image or fn defined. Going to exit.")
plt.close(self.fig)
return
def createCanvas(self):
"""Creates figure and canvas used for plotting."""
h=500/self.dpi
v=500/self.dpi
self.fig = plt.figure()
self.fig.show()
self.fig.set_size_inches(h,v,forward=True)
self.canvas=self.fig.canvas
self.ax = self.fig.add_subplot(111)
if self.embryo!=None:
self.ax.set_xlim([0, self.embryo.dataResPx])
self.ax.set_ylim([0, self.embryo.dataResPx])
#Connect to mouseclick event
self.fig.canvas.mpl_connect('close_event', self.closeFigure)
self.canvas.mpl_connect('button_press_event', self.getMouseInput)
self.canvas.mpl_connect('key_press_event', self.keyPressed)
self.canvas.draw()
return
def keyPressed(self,event):
"""Directs all key press events to the respective functions."""
if event.key=='left':
self.moveLeft()
elif event.key=='right':
self.moveRight()
elif event.key=='up':
self.moveUp()
elif event.key=='down':
self.moveDown()
elif event.key=='ctrl+up':
self.increaseRadius()
elif event.key=='ctrl+down':
self.decreaseRadius()
def moveLeft(self):
"""Moves center 1 px to the left."""
if self.center!=None:
self.center=[self.center[0]-1,self.center[1]]
self.drawCenter()
def moveRight(self):
"""Moves center 1 px to the right."""
if self.center!=None:
self.center=[self.center[0]+1,self.center[1]]
self.redraw()
def moveUp(self):
"""Moves center 1 px up."""
if self.center!=None:
self.center=[self.center[0],self.center[1]+1]
self.redraw()
def moveDown(self):
"""Moves center 1 px down."""
if self.center!=None:
self.center=[self.center[0],self.center[1]-1]
self.redraw()
def redraw(self):
"""Redraws both center and radius if available."""
if self.center!=None:
self.drawCenter()
if self.radius!=None:
self.drawRadius()
def increaseRadius(self):
"""Increases radius by 1 px."""
if self.radius!=None:
self.radius=self.radius+1
self.redraw()
def decreaseRadius(self):
"""Decreases radius by 1 px."""
if self.radius!=None:
self.radius=self.radius-1
self.redraw()
def closeFigure(self,event):
"""Returns center and radius at close ``event``."""
return self.center,self.radius
def getEmbryo(self):
"""Returns ``embryo`` object if given.``"""
return self.embryo
def getResults(self):
"""Returns center and radius."""
return self.center,self.radius
def drawCenterMarker(self):
"""Draws a yellow marker in center of the image, making it
easier to find image center when selecting center of boundary."""
centerImg=[self.img.shape[0]/2.,self.img.shape[1]/2.]
if len(self.centerMarker)>0:
self.clearCenterMarker()
else:
pt=ptc.Circle(centerImg,radius=3,fill=True,color='y')
self.centerMarker.append(self.ax.add_patch(pt))
self.fig.canvas.draw()
return self.centerMarker
def clearCenterMarker(self):
"""Removes center maker from canvas."""
for pt in self.centerMarker:
pt.remove()
self.fig.canvas.draw()
self.centerMarker=[]
def drawCenter(self):
"""Draws a red marker at selected center on canvas."""
if len(self.centerPt)>0:
self.clearCenter()
pt=ptc.Circle(self.center,radius=3,fill=True,color='r')
self.centerPt.append(self.ax.add_patch(pt))
self.fig.canvas.draw()
return self.centerPt
def clearCenter(self):
"""Removes center marker from canvas."""
for pt in self.centerPt:
pt.remove()
self.centerPt=[]
self.fig.canvas.draw()
def drawRadius(self):
"""Draws a red circle around selected center with selected radius on canvas."""
if len(self.radiusPt)>0:
self.clearRadius()
pt=ptc.Circle(self.center,radius=self.radius,fill=False,color='r')
self.radiusPt.append(self.ax.add_patch(pt))
self.fig.canvas.draw()
return self.radiusPt
def clearRadius(self):
"""Removes circle from canvas."""
for pt in self.radiusPt:
pt.remove()
self.radiusPt=[]
self.fig.canvas.draw()
return self.radiusPt
def showFirstDataImg(self):
"""Shows either first data image defined in ``embryo.fileList`` or
image specified by ``fn``.
.. note:: If both are given, will use the embryo option.
"""
if self.embryo!=None:
self.embryo.updateFileList()
fnImg=self.embryo.fnDatafolder
if fnImg[-1]!='/':
fnImg=fnImg+'/'
fnImg=fnImg+self.embryo.fileList[0]
self.img=pyfrp_img_module.loadImg(fnImg,self.embryo.dataEnc)
elif self.fn!=None:
self.img=pyfrp_img_module.loadImg(self.fn,'uint16')
self.ax.set_xlim([0, self.img.shape[0]])
self.ax.set_ylim([0, self.img.shape[1]])
self.showImg(self.img)
def showImg(self,img):
"""Shows image on canvas.
Args:
img (numpy.ndarray): Image to be shown.
"""
self.ax.imshow(img)
self.fig.canvas.draw()
return self.canvas
def computeRadiusFromCoordinate(self,x,y):
"""Computes radius from given cordinate ``(x,y)``.
"""
return np.sqrt((x-self.center[0])**2+(y-self.center[1])**2)
def getMouseInput(self,event):
"""Directs mouse input to the right actions."""
#Check if click in axes
if event.xdata==None:
return
#Left click to define center
if event.button==1:
self.center=[event.xdata,event.ydata]
self.drawCenter()
if len(self.radiusPt)>0:
self.drawRadius()
#Right click to define radius
elif event.button==3:
if len(self.centerPt)>0:
self.radius=self.computeRadiusFromCoordinate(event.xdata,event.ydata)
self.drawRadius()
#Middle click to activate center marker
if event.button==2:
self.drawCenterMarker()
self.fig.canvas.draw()
return
def makeSubplot(size,titles=None,tight=False,sup=None,proj=None,fig=None,show=True):
"""Generates matplotlib figure with (x,y) subplots.
.. note:: List of ``titles`` needs to be same size as desired number of axes.
Otherwise will turn off titles.
.. note:: List of ``proj`` needs to be same size as desired number of axes.
Otherwise will turn off projections.
Example:
>>> makeSubplot([2,2],titles=["Axes 1", "Axes 2", "Axes 3", "Axes 4"],proj=[None,None,'3d',None])
will result in
.. image:: ../imgs/pyfrp_plot_module/makeSubplot.png
Args:
size (list): Size of subplot arrangement.
Keyword Args:
titles (list): List of axes titles.
tight (bool): Use tight layout.
sup (str): Figure title.
proj (list): List of projections.
fig (matplotlib.figure): Figure used for axes.
show (bool): Show figure right away.
Returns:
tuple: Tuple containing:
* fig (matplotlib.figure): Figure.
* axes (list): List of Matplotlib axes.
"""
#How many axes need to be created
n_ax=size[0]*size[1]
if proj==None:
proj=n_ax*[None]
#Creating figure
if fig==None:
fig=plt.figure()
if show:
fig.show()
fig.set_tight_layout(tight)
#Suptitle
if sup!=None:
fig.suptitle(sup)
#Add axes
axes=[]
for i in range(n_ax):
try:
if proj[i]!=None:
ax=fig.add_subplot(size[0],size[1],i+1,projection=proj[i])
else:
ax=fig.add_subplot(size[0],size[1],i+1)
except IndexError:
printWarning("Axes " + str(i) + "does not have a projection specified. Will create normal 2D plot.")
ax=fig.add_subplot(size[0],size[1],i+1)
axes.append(ax)
#Print titles
if titles!=None:
try:
ax.set_title(titles[i])
except IndexError:
printWarning("Axes " + str(i) + " does not have a title specified.")
#Draw
plt.draw()
#Return axes handle
return fig,axes
def makeGeometryPlot(titles=None,tight=False,sup=None,fig=None,show=True,unit="px"):
"""Generates matplotlib figure and single axes optimized for geometry plots.
See also :py:func:`pyfrp.modules.pyfrp_plot_module.makeSubplot`.
Keyword Args:
titles (list): List of axes titles.
tight (bool): Use tight layout.
sup (str): Figure title.
fig (matplotlib.figure): Figure used for axes.
show (bool): Show figure right away.
unit (str): Unit displayed in axes label.
Returns:
tuple: Tuple containing:
* fig (matplotlib.figure): Figure.
* axes (list): List of Matplotlib axes.
"""
fig,axes=makeSubplot([1,1],proj=['3d'],titles=titles,tight=tight,sup=sup,fig=fig,show=show)
if unit==None:
axes[0].set_xlabel("x")
axes[0].set_ylabel("y")
axes[0].set_zlabel("z")
else:
axes[0].set_xlabel("x ("+unit+")")
axes[0].set_ylabel("y ("+unit+")")
axes[0].set_zlabel("z ("+unit+")")
return fig,axes
def adjustImshowRange(axes,vmin=None,vmax=None):
"""Adjust display range of ``matplotlib.pyplot.imshow`` plots in
list of axes.
Finds first image artist in each axes in ``axes`` list and then
sets display range to ``[vmin,vmax]``.
Args:
axes (list): List of matplotlib axes.
Keyword Args:
vmin (float): Minimum value of display range.
vmax (float): Maximum value of display range.
Returns:
list: Updated list of matplotlib axes.
"""
#Loop through axes
for ax in axes:
#Grab plot
implot=findArtist(ax,"Image")
#Rescale
if implot!=None:
implot.set_clim(vmin,vmax)
#Draw
redraw(ax)
return axes
def findArtist(ax,key):
"""Finds ``matplotlib.artist`` which name contains ``key``.
.. note:: Will stop searching after first artist is found.
Will return ``None`` if no artist can be found.
Args:
ax (matplotlib.axes): Matplotlib axes.
key (str): Key used for search.
Returns:
matplotlib.artist: Matplotlib artist.
"""
c=ax.get_children()
for x in c:
if key in str(x):
return x
break
return None
def redraw(ax):
"""Redraws axes's figure's canvas.
Makes sure that current axes content is visible.
Args:
ax (matplotlib.axes): Matplotlib axes.
Returns:
matplotlib.axes: Matplotlib axes
"""
ax.get_figure().canvas.draw()
return ax
def plotTS(xvec,yvec,label='',title='',sup='',ax=None,color=None,linewidth=1,legend=True,linestyle='-',show=True,alpha=1.,legLoc=-1):
"""Plot timeseries all-in-one function.
Args:
xvec (numpy.ndarray): x-data to be plotted.
yvec (numpy.ndarray): y-data to be plotted.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes used for plotting. If not specified, will generate new one.
color (str): Color of plot.
linestyle (str): Linestyle of plot.
linewidth (float): Linewidth of plot.
legend (bool): Show legend.
sup (str): Figure title.
title (str): Axes title.
label (str): Label for legend.
show (bool): Show figure.
alpha (float): Transparency of line.
legLoc (int): Location of legend.
Returns:
matplotlib.axes: Axes used for plotting.
"""
if len(xvec)!=len(yvec):
printWarning('len(xvec) != len (yvec). This could be due to incomplete simulation/analysis/pinning. Will not plot.')
return None
if ax==None:
fig,axes = makeSubplot([1,1],titles=[title],sup=sup,tight=False,show=show)
ax=axes[0]
else:
ax.set_title(title)
ax.plot(xvec,yvec,color=color,label=label,linestyle=linestyle,alpha=alpha)
if legend:
if legLoc==-1:
ax.get_figure().subplots_adjust(right=0.7)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
ax.legend(loc=legLoc)
redraw(ax)
return ax
def plotSolutionVariable(x,y,val,ax=None,vmin=None,vmax=None,nlevels=25,colorbar=True,plane='xy',zs=None,zdir=None,title="Solution Variable",sup="",dThresh=None,nPts=1000,mode='normal',typ='contour'):
"""Plots simulation solution variable as 2D contour plot or 3D surface plot.
.. note:: If no ``ax`` is given, will create new one.
.. note:: ``x`` and ``y`` do not necessarily have to be coordinates in x/y-direction, but rather correspond to
the two directions defined in ``plane``.
``plane`` variable controls in which plane the solution variable is supposed to be plotted.
Acceptable input variables are ``"xy","xz","yz"``. See also
:py:func:`pyfrp.subclasses.pyfrp_ROI.ROI.getMaxExtendPlane`.
``mode`` controls which contour/surface function is used:
* ``normal``: Will create rectangular grid and use ``scipy.interpolate.gridddata`` and interpolate solution onto it,
then plot using ``matplotlib.pyplot.contourf``, see also http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.contourf and
http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.interpolate.griddata.html#scipy.interpolate.griddata .
* ``tri``: Will plot irregular grid using ``matplotlib.pyplot.tricontourf``. See also
http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.tricontourf .
``typ`` controls which plot type is used:
* ``contour``: Produces contour plots using either ``matplotlib.pyplot.contourf`` or ``matplotlib.pyplot.tricontourf``.
* ``surface``: Produces contour plots using either ``matplotlib.Axed3D.plot_surface`` or ``matplotlib.pyplot.plot_trisurf``.
.. warning:: ``matplotlib.pyplot.tricontourf`` has problems when ``val`` only is in a single level of contour plot.
To avoid this, we currently add some noise in this case just to make it plottable. This is not the most elegant
solution. (only in case of 3D plotting)
Args:
x (numpy.ndarray): x-coordinates.
y (numpy.ndarray): y-coordinates.
val (numpy.ndarray): Solution variable values.
Keyword Args:
ax (matplotlib.axes): Axes used for plotting.
vmin (float): Minimum value displayed in contour plot.
vmax (float): Maximum value displayed in contour plot.
nlevels (int): Number of contour levels.
colorbar (bool): Display color bar.
plane (str): Plane in which solution variable is supposed to be plotted.
zs (float): In case of a 3D plot, height in direction zdir where to put contour.
zdir (str): Orthogonal direction to plane.
nPts (int): Number of points used for interpolating (only if ``mode=normal``).
mode (str): Which contour function to use.
title (str): Title of plot.
typ (str): Type of plot.
Returns:
matplotlib.axes: Axes used for plotting.
"""
#Check of entered plot type makes sense
if typ not in ['contour','surface']:
printError("Unknown plot type "+ typ)
return ax
#Make axes if necessary
if ax==None:
if (zs!=None and zdir!=None) or typ=='surface':
fig,axes = makeSubplot([1,1],titles=[title],sup=sup,proj=['3d'])
else:
fig,axes = makeSubplot([1,1],titles=[title],sup=sup)
ax=axes[0]
else:
ax.set_title(title)
#vmin/vmax/levels
vmin,vmax,levels=makeFittingLevels(vmin,vmax,val,nlevels)
#Make grid
grid = np.meshgrid(np.linspace(min(x), max(x), nPts),np.linspace(min(y), max(y), nPts))
#Interpolate
xy=np.vstack((x,y))
valPlot = scipy.interpolate.griddata(xy.T, val, tuple(grid), 'linear')
#If dThresh is given, filter all nodes that are further apart from their neighbor than dThresh
if dThresh!=None:
idxs=pyfrp_idx_module.maskMeshByDistance(x,y,dThresh,grid)
valPlot[idxs]=np.nan
#Stupid fix for tricontourf 3D problem
if zs!=None and zdir!=None and mode=='tri':
if min(val)==max(val):
"""IDEA:
Add a little bit of noise to make it work. Not best solution ever. Needs to be changed.
"""
add=(1.01*vmax-vmin)/(nlevels)*np.random.randn(np.shape(val)[0])
val=val+add
#Plot
if mode=='tri':
if typ=='surface':
solPlot=ax.plot_trisurf(x,y,val,cmap='jet',vmin=vmin,vmax=vmax)
elif typ=='contour':
solPlot=ax.tricontourf(x,y,val,vmin=vmin,vmax=vmax,levels=levels,offset=zs,zdir=zdir,extend='both')
else:
if typ=='surface':
solPlot=ax.plot_surface(grid[0],grid[1],valPlot,cmap='jet',vmin=vmin,vmax=vmax)
elif typ=='contour':
solPlot=ax.contourf(grid[0],grid[1],valPlot,vmin=vmin,vmax=vmax,levels=levels,offset=zs,zdir=zdir)
#Label
if len(plane)!=2:
printError("Don't understand plane="+plane+". Will not plot.")
return ax
else:
ax.set_xlabel(plane[0])
ax.set_ylabel(plane[1])
ax.autoscale(enable=True, axis='both', tight=True)
if colorbar:
cb=plt.colorbar(solPlot,orientation='horizontal',pad=0.05)
#plt.draw()
ax.get_figure().canvas.draw()
return ax
def makeFittingLevels(vmin,vmax,val,nlevels,buff=0.01):
"""Generates array with fitting levels for contour plots.
.. note:: If ``vmin=None`` or ``vmax=None``, will pick the minimum/maximum
value of ``val``.
Args:
val (numpy.ndarray): Array to be plotted.
vmin (float): Minimum value displayed in contour plot.
vmax (float): Maximum value displayed in contour plot.
nlevels (int): Number of contour levels.
Keyword Args:
buff (float): Percentage buffer to be added to both sides of the array.
Returns:
tuple: Tuple containing:
* vmin (float): Minimum value displayed in contour plot.
* vmax (float): Maximum value displayed in contour plot.
* levels (numpy.ndarray): Level array to be handed to contour function.
"""
if vmin==None:
vmin=min(val)
if vmax==None:
vmax=max(val)
levels=np.linspace((1-buff)*vmin,(1+buff)*vmax,nlevels)
return vmin,vmax,levels
def set3DAxesEqual(ax):
"""Make axes of 3D plot have equal scale.
This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Modified from http://stackoverflow.com/questions/13685386/matplotlib-equal-unit-length-with-equal-aspect-ratio-z-axis-is-not-equal-to .
Args:
ax (matplotlib.axes): A matplotlib axes.
Returns:
matplotlib.axes: Modified matplotlib axes.
"""
xLimits = ax.get_xlim3d()
yLimits = ax.get_ylim3d()
zLimits = ax.get_zlim3d()
xRange = abs(xLimits[1] - xLimits[0])
xMiddle = np.mean(xLimits)
yRange = abs(yLimits[1] - yLimits[0])
yMiddle = np.mean(yLimits)
zRange = abs(zLimits[1] - zLimits[0])
zMiddle = np.mean(zLimits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plotRadius = 0.5*max([xRange, yRange, zRange])
ax.set_xlim3d([xMiddle - plotRadius, xMiddle + plotRadius])
ax.set_ylim3d([yMiddle - plotRadius, yMiddle + plotRadius])
ax.set_zlim3d([zMiddle - plotRadius, zMiddle + plotRadius])
return ax
def getPubParms(fontSize=10,labelFontSize=10,tickFontSize=10,legendFontSize=10):
"""Returns dictionary with good parameters for nice
publication figures.
Resulting ``dict`` can be loaded via ``plt.rcParams.update()``.
.. note:: Use this if you want to include LaTeX symbols in the figure.
Keyword Args:
fontSize (int): Font size.
labelFontSize (int): Font size.
tickFontSize (int): Font size.
legendFontSize (int): Font size.
Returns:
dict: Parameter dictionary.
"""
params = {'backend': 'ps',
'font.size': fontSize,
'axes.labelsize': labelFontSize,
'legend.fontsize': legendFontSize,
'xtick.labelsize': tickFontSize,
'ytick.labelsize': tickFontSize,
'text.usetex': True,
'font.family': 'sans-serif',
'font.sans-serif': 'Bitstream Vera Sans, Lucida Grande, Verdana, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif',
#'ytick.direction': 'out',
'text.latex.preamble': [r'\usepackage{helvet}', r'\usepackage{sansmath}',r'\usepackage{siunitx}'] , #r'\sansmath',
'ytick.direction' : 'out',
'xtick.direction' : 'out'
}
return params
def turnAxesForPub(ax,adjustFigSize=True,figWidthPt=180.4,figHeightPt=None,ptPerInches=72.27,fontSize=10,labelFontSize=10,tickFontSize=10,legendFontSize=10,is3D=False):
"""Turns axes nice for publication.
If ``adjustFigSize=True``, will also adjust the size the figure.
Args:
ax (matplotlib.axes): A matplotlib axes.
Keyword Args:
adjustFigSize (bool): Adjust the size of the figure.
figWidthPt (float): Width of the figure in pt.
figHeightPt (float): Height of the figure in pt.
ptPerInches (float): Resolution in pt/inches.
fontSize (int): Font size.
labelFontSize (int): Font size.
tickFontSize (int): Font size.
legendFontSize (int): Font size.
is3D (bool): Flag indicating if 3D figure.
Returns:
matplotlib.axes: Modified matplotlib axes.
"""
params=getPubParms(fontSize=fontSize,labelFontSize=labelFontSize,tickFontSize=tickFontSize,legendFontSize=legendFontSize)
plt.rcParams.update(params)
if not is3D:
ax=setPubAxis(ax)
setPubFigSize(ax.get_figure(),figWidthPt=figWidthPt,figHeightPt=figHeightPt,ptPerInches=ptPerInches)
ax=closerLabels(ax,padx=3,pady=1)
return ax
def setPubAxis(ax):
"""Gets rid of top and right axis.
Args:
ax (matplotlib.axes): A matplotlib axes.
Returns:
matplotlib.axes: Modified matplotlib axes.
"""
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines["left"].axis.axes.tick_params(direction="outward")
ax.spines["bottom"].axis.axes.tick_params(direction="outward")
return ax
def setPubFigSize(fig,figWidthPt=180.4,figHeightPt=None,ptPerInches=72.27):
"""Adjusts figure size/aspect.
If ``figHeightPt`` is not given, will use golden ratio to
compute it.
Keyword Args:
figWidthPt (float): Width of the figure in pt.
figHeightPt (float): Height of the figure in pt.
ptPerInches (float): Resolution in pt/inches.
Returns:
matplotlib.figure: Adjust figure.
"""
inchesPerPt = 1.0/ptPerInches
goldenMean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
figWidth = figWidthPt*inchesPerPt # width in inches
if figHeightPt==None:
figHeight = figWidth*goldenMean # height in inches
else:
figHeight = figHeightPt*inchesPerPt
figSize = [figWidth,figHeight]
fig.set_size_inches(figSize[0],figSize[1])
fig.subplots_adjust(bottom=0.25)
fig.subplots_adjust(left=0.2)
fig.subplots_adjust(top=0.9)
return fig
def closerLabels(ax,padx=10,pady=10):
"""Moves x/y labels closer to axis."""
ax.xaxis.labelpad = padx
ax.yaxis.labelpad = pady
return ax
def getRandomColor():
"""Returns triplet defining a random color.
"""
return np.random.rand(3,1)
def is3DAxes(ax):
"""Returns if an axes is a 3D axis.
Args:
ax (matplotlib.axes): A matplotlib axes.
Returns:
bool: True if 3d axis.
"""
if hasattr(ax, 'get_zlim'):
return True
else:
return False
| gpl-3.0 |
moutai/scikit-learn | setup.py | 25 | 11732 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
import subprocess
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable, os.path.join(cwd,
'build_tools',
'cythonize.py'),
'sklearn'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required, nor Cythonization
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
if len(sys.argv) >= 2 and sys.argv[1] not in 'config':
# Cythonize if needed
print('Generating cython files')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
# Clean left-over .so file
for dirpath, dirnames, filenames in os.walk(
os.path.join(cwd, 'sklearn')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension in (".so", ".pyd", ".dll"):
pyx_file = str.replace(filename, extension, '.pyx')
print(pyx_file)
if not os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
jejjohnson/manifold_learning | src/python/utils/naive_classification.py | 1 | 2272 | from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, recall_score, f1_score
# Naive Random forest classification scheme
def rf_naive(X, y, img, train_prct=0.10):
"""Implements a Naive RF Classifier on a Hyperspectral Image
Parameters:
-----------
X data set: (NxD) where N are the features
and D is the dimension
img: an image that is (MxNxD)
img_dimred: an image that is (MxNxd)
"""
#--------------------------------
# Training and Testing Data Split
#--------------------------------
train_sample_size = train_prct
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=train_sample_size,
random_state=1234)
#--------------------------------------------------
# Implement the Random Forest Classification Method
#--------------------------------------------------
# Initialize our model with 500 trees
rf_param = RandomForestClassifier(
n_estimators=500, oob_score=True)
print('Using Random Forest Parameters to train the model...')
t0 = time()
# Fit our model to training Data
rf_model = rf_param.fit(X_train, y_train)
print('Done in {s:0.3f}s.'.format(
s=time()-t0))
print ('Our OOB Prediction accuracy is: {oob}'.format(
oob=rf_model.oob_score_*100))
print('Using Random Forest model on the testing data...')
t0 = time()
# Test the model on the testing data
y_pred = rf_model.predict(X_test)
print('Done in {s:0.3f}s.'.format(
s=time()-t0))
#--------------------
# Accuracy Statistics
#--------------------
cm = confusion_matrix(y_test, y_pred)
cr = classification_report(y_test, y_pred)
#--------------------------------
# Classification for entire image
#--------------------------------
print('Using the RF model to predict the rest of the image classes...')
t0 = time()
rf_class_img = rf_model.predict(img)
print('Done in {s:0.3f}s.'.format(
s=time()-t0))
return rf_class_img, cm, cr | mit |
fabioticconi/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 42 | 20925 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = assert_warns(DeprecationWarning, estimator.decision_function, X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
appapantula/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
abantam/pmtud | src/flow-monitor/examples/wifi-olsr-flowmon.py | 108 | 7439 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <[email protected]>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
CrazyGuo/bokeh | bokeh/compat/mpl.py | 32 | 2834 | "Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from warnings import warn
import matplotlib.pyplot as plt
from .bokeh_exporter import BokehExporter
from .bokeh_renderer import BokehRenderer
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def to_bokeh(fig=None, name=None, server=None, notebook=None, pd_obj=True, xkcd=False):
""" Uses bokeh to display a Matplotlib Figure.
You can store a bokeh plot in a standalone HTML file, as a document in
a Bokeh plot server, or embedded directly into an IPython Notebook
output cell.
Parameters
----------
fig: matplotlib.figure.Figure
The figure to display. If None or not specified, then the current figure
will be used.
name: str (default=None)
If this option is provided, then the Bokeh figure will be saved into
this HTML file, and then a web browser will be used to display it.
server: str (default=None)
Fully specified URL of bokeh plot server. Default bokeh plot server
URL is "http://localhost:5006" or simply "deault"
notebook: bool (default=False)
Return an output value from this function which represents an HTML
object that the IPython notebook can display. You can also use it with
a bokeh plot server just specifying the URL.
pd_obj: bool (default=True)
The implementation asumes you are plotting using the pandas.
You have the option to turn it off (False) to plot the datetime xaxis
with other non-pandas interfaces.
xkcd: bool (default=False)
If this option is True, then the Bokeh figure will be saved with a
xkcd style.
"""
if name is not None:
warn("Use standard output_file(...) from bokeh.io")
if server is not None:
warn("Use standard output_server(...) from bokeh.io")
if notebook is not None:
warn("Use standard output_notebook() from bokeh.io")
if fig is None:
fig = plt.gcf()
renderer = BokehRenderer(pd_obj, xkcd)
exporter = BokehExporter(renderer)
exporter.run(fig)
return renderer.fig
| bsd-3-clause |
kcavagnolo/astroML | book_figures/chapter10/fig_matchedfilt_chirp2.py | 3 | 5123 | """
Matched Filter Chirp Search
---------------------------
Figure 10.27
A ten-parameter chirp model (see eq. 10.87) fit to a time series. Seven of the
parameters can be considered nuisance parameters, and we marginalize over
them in the likelihood contours shown here.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
# Hack to fix import issue in older versions of pymc
import scipy
import scipy.misc
scipy.derivative = scipy.misc.derivative
import pymc
from astroML.decorators import pickle_results
from astroML.plotting.mcmc import plot_mcmc
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#----------------------------------------------------------------------
# Set up toy dataset
def chirp(t, T, A, phi, omega, beta):
"""chirp signal"""
signal = A * np.sin(phi + omega * (t - T) + beta * (t - T) ** 2)
signal[t < T] = 0
return signal
def background(t, b0, b1, Omega1, Omega2):
"""background signal"""
return b0 + b1 * np.sin(Omega1 * t) * np.sin(Omega2 * t)
np.random.seed(0)
N = 500
T_true = 30
A_true = 0.8
phi_true = np.pi / 2
omega_true = 0.1
beta_true = 0.02
b0_true = 0.5
b1_true = 1.0
Omega1_true = 0.3
Omega2_true = 0.4
sigma = 0.1
t = 100 * np.random.random(N)
signal = chirp(t, T_true, A_true, phi_true, omega_true, beta_true)
bg = background(t, b0_true, b1_true, Omega1_true, Omega2_true)
y_true = signal + bg
y_obs = np.random.normal(y_true, sigma)
t_fit = np.linspace(0, 100, 1000)
y_fit = (chirp(t_fit, T_true, A_true, phi_true, omega_true, beta_true) +
background(t_fit, b0_true, b1_true, Omega1_true, Omega2_true))
#----------------------------------------------------------------------
# Set up MCMC sampling
T = pymc.Uniform('T', 0, 100, value=T_true)
A = pymc.Uniform('A', 0, 100, value=A_true)
phi = pymc.Uniform('phi', -np.pi, np.pi, value=phi_true)
log_omega = pymc.Uniform('log_omega', -4, 0, value=np.log(omega_true))
log_beta = pymc.Uniform('log_beta', -6, 0, value=np.log(beta_true))
b0 = pymc.Uniform('b0', 0, 100, value=b0_true)
b1 = pymc.Uniform('b1', 0, 100, value=b1_true)
log_Omega1 = pymc.Uniform('log_Omega1', -3, 0, value=np.log(Omega1_true))
log_Omega2 = pymc.Uniform('log_Omega2', -3, 0, value=np.log(Omega2_true))
omega = pymc.Uniform('omega', 0.001, 1, value=omega_true)
beta = pymc.Uniform('beta', 0.001, 1, value=beta_true)
# uniform prior on log(Omega1)
@pymc.deterministic
def Omega1(log_Omega1=log_Omega1):
return np.exp(log_Omega1)
# uniform prior on log(Omega2)
@pymc.deterministic
def Omega2(log_Omega2=log_Omega2):
return np.exp(log_Omega2)
@pymc.deterministic
def y_model(t=t, T=T, A=A, phi=phi, omega=omega, beta=beta,
b0=b0, b1=b1, Omega1=Omega1, Omega2=Omega2):
return (chirp(t, T, A, phi, omega, beta)
+ background(t, b0, b1, Omega1, Omega2))
y = pymc.Normal('y', mu=y_model, tau=sigma ** -2, observed=True, value=y_obs)
model = dict(T=T, A=A, phi=phi, b0=b0, b1=b1,
log_omega=log_omega, omega=omega,
log_beta=log_beta, beta=beta,
log_Omega1=log_Omega1, Omega1=Omega1,
log_Omega2=log_Omega2, Omega2=Omega2,
y_model=y_model, y=y)
#----------------------------------------------------------------------
# Run the MCMC sampling (saving the results to a pickle file)
@pickle_results('matchedfilt_chirp2.pkl')
def compute_MCMC(niter=30000, burn=2000):
S = pymc.MCMC(model)
S.sample(iter=30000, burn=2000)
traces = [S.trace(s)[:] for s in ['T', 'A', 'omega', 'beta']]
return traces
traces = compute_MCMC()
labels = ['$T$', '$A$', r'$\omega$', r'$\beta$']
limits = [(29.75, 30.25), (0.75, 0.83), (0.085, 0.115), (0.0197, 0.0202)]
true = [T_true, A_true, omega_true, beta_true]
#------------------------------------------------------------
# Plot results
fig = plt.figure(figsize=(5, 5))
# This function plots multiple panels with the traces
axes_list = plot_mcmc(traces, labels=labels, limits=limits,
true_values=true, fig=fig,
bins=30, colors='k',
bounds=[0.14, 0.08, 0.95, 0.95])
for ax in axes_list:
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_locator(plt.MaxNLocator(5))
ax = fig.add_axes([0.52, 0.7, 0.43, 0.25])
ax.scatter(t, y_obs, s=9, lw=0, c='k')
ax.plot(t_fit, y_fit, '-k')
ax.set_xlim(0, 100)
ax.set_xlabel('$t$')
ax.set_ylabel(r'$h_{\rm obs}$')
plt.show()
| bsd-2-clause |
Haleyo/spark-tk | regression-tests/sparktkregtests/testcases/graph/graph_lbp_test.py | 13 | 8859 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests LBP Graphx implementation by comparing results agains graphlab """
import unittest
from sparktkregtests.lib import sparktk_test
class LbpPottsModel(sparktk_test.SparkTKTestCase):
def test_lbp_cross_3_state(self):
"""Test 3 state Potts model"""
vertex_frame = self.context.frame.create(
[[1, "1.0 0.0 0.0"],
[2, ".3 .3 .3"],
[3, "1.0 0.0 0.0"],
[4, "0.0 1.0 0.0"],
[5, "0.0 0.0 1.0"]],
[("id", int),
("vertex_weight", str)])
edge_frame = self.context.frame.create(
[[2, 3, 1.0],
[2, 1, 1.0],
[2, 4, 1.0],
[2, 5, 1.0]],
[("src", int),
("dst", int),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {1: (1.0, 0.0, 0.0),
2: (0.57611688, 0.2119415576, 0.2119415576),
3: (1.0, 0.0, 0.0),
4: (0.0, 1.0, 0.0),
5: (0.0, 0.0, 1.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
self.assertAlmostEqual(known_vals[row["id"]][2], values[2])
def test_lbp_cross_50(self):
"""Test a balanced graph"""
vertex_frame = self.context.frame.create(
[[1, "1.0 0.0"],
[2, ".5 .5"],
[3, "1.0 0.0"],
[4, "0.0 1.0"],
[5, "0.0 1.0"]],
[("id", int),
("vertex_weight", str)])
edge_frame = self.context.frame.create(
[[2, 3, 1.0],
[2, 1, 1.0],
[2, 4, 1.0],
[2, 5, 1.0]],
[("src", int),
("dst", int),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {1: (1.0, 0.0),
2: (0.5, 0.5),
3: (1.0, 0.0),
4: (0.0, 1.0),
5: (0.0, 1.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
def test_lbp_cross_3_1(self):
"""Test LBP on a cross with a 3-1 split on the distribution"""
vertex_frame = self.context.frame.create(
[[1, "1.0 0.0"],
[2, "0.5 0.5"],
[3, "1.0 0.0"],
[4, "0.0 1.0"],
[5, "1.0 0.0"]],
[("id", int),
("vertex_weight", str)])
edge_frame = self.context.frame.create(
[[2, 3, 1.0],
[2, 1, 1.0],
[2, 4, 1.0],
[2, 5, 1.0]],
[("src", int),
("dst", int),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {1: (1.0, 0.0),
2: (0.88079707798, 0.119202922),
3: (1.0, 0.0),
4: (0.0, 1.0),
5: (1.0, 0.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
def test_lbp_cross(self):
"""Test lbp on a basic cross with a 4-0 split"""
vertex_frame = self.context.frame.create(
[["1", "1.0 0.0"],
["2", ".5 .5"],
["3", "1.0 0.0"],
["4", "1.0 0.0"],
["5", "1.0 0.0"]],
[("id", str), ("vertex_weight", str)])
edge_frame = self.context.frame.create(
[["2", "3", 0.5],
["2", "1", 0.5],
["2", "4", 0.5],
["2", "5", 0.5]],
[("src", str),
("dst", str),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {"1": (1.0, 0.0),
"2": (0.88079707797, 0.11920292202),
"3": (1.0, 0.0),
"4": (1.0, 0.0),
"5": (1.0, 0.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
def test_lbp_double_cross(self):
"""Test lbp on a double cross"""
vertex_frame = self.context.frame.create(
[["1", "1.0 0.0", 1, "1.0 0.0"],
["2", "0.5 0.5", 0, ""],
["3", "1.0 0.0", 1, "1.0 0.0"],
["4", "0.0 1.0", 1, "0.0 1.0"],
["5", "0.5 0.5", 0, ""],
["6", "0.0 1.0", 1, "0.0 1.0"],
["7", "0.0 1.0", 1, "0.0 1.0"],
["8", "1.0 0.0", 1, "1.0 0.0"]],
[("id", str),
("vertex_weight", str),
("is_observed",int), ("label", str)])
edge_frame = self.context.frame.create(
[["2", "3", 1.0],
["2", "1", 1.0],
["2", "4", 1.0],
["2", "5", 1.0],
["6", "5", 1.0],
["7", "5", 1.0],
["8", "5", 1.0]],
[("src", str),
("dst", str),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {"1": (1.0, 0.0),
"2": (0.6378903114, 0.36210968),
"3": (1.0, 0.0),
"4": (0.0, 1.0),
"5": (0.36210968, 0.6378903114),
"6": (0.0, 1.0),
"7": (0.0, 1.0),
"8": (1.0, 0.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
anki1909/peach | tutorial/demo07.py | 6 | 2711 | # -*- coding: utf-8 -*-
#####################################################################
# Peach - Python para Inteligência Computacional
# José Alexandre Nalon
#
# Este arquivo: demo07.py
# Demonstração e teste, Mapeamento de uma função não linear.
#####################################################################
from numpy import *
import random
import peach as p
# Explicação deste demo.
#
# É possível utilizar uma rede neural para fazer o mapeamento
# de uma função não linear, como uma senóide ou outra seme-
# lhante. A técnica vai exigir uma rede neural mais complexa,
# com uma entrada, mas com uma camada escondida relativamente
# complexa. A camada de saída deve ter como função de ativação
# a identidade, para somar os mapeamentos realizados.
# Criamos aqui a rede neural. N é a ordem do polinômio,
# que deixamos indicada na forma de uma variável para
# permitir fáceis adaptações. A função de ativação é a
# identidade, e o método de aprendizado é o back-propa-
# gation (por default).
# Utilizamos várias saídas, igualmente distribuídas ao
# redor do ponto de avaliação para que o erro obtido seja
# mais significativo, onde existir. Nesse caso, o ponto de
# avaliação será igual a int(inputs/2). O uso de uma vizi-
# nhança maior possibilitará melhores resultados.
inputs = 7
nn = p.FeedForward((inputs, 200, inputs), lrule=p.BackPropagation(0.01), bias=True)
nn.phi = (p.Sigmoid, p.Linear)
delta = linspace(-0.1, 0.1, inputs)
elog = [ ]
error = 1
i = 0
while i < 2000:
# Geramos um valor de x e um valor da resposta
# desejada. Com x, encontramos xo, que será o
# vetor de entrada da rede neural:
xo = random.uniform(-1.0, 1.0)
x = xo + delta
d = sin(pi*x)
# Fazemos a predição, calculamos o erro e realizamos
# o aprendizado da rede.
y = nn(x)
error = nn.learn(x, d)
elog.append(error)
# Incrementamos o contador de tentativas.
i = i + 1
# Se o sistema tiver o pacote gráfico matplotlib instalado,
# então o demo tenta criar um gráfico da função original,
# contrastada com a função predita. O gráfico é salvo no
# arquivo demo07.eps.
try:
from matplotlib import *
from matplotlib.pylab import *
x = linspace(-1, 1, 200)
y = sin(pi*x)
ye = [ ]
for xo in x:
yn = nn(delta + xo)
ye.append(yn[int(inputs/2)])
ye = array(ye)
subplot(211)
hold(True)
grid(True)
plot(x, y, 'b--')
plot(x, ye, 'g')
xlim([ -1, 1 ])
legend([ "$y$", "$\hat{y}$" ])
subplot(212)
grid(True)
plot(arange(0, 2000, 10), array(elog, dtype=float)[::10])
savefig("demo07.eps")
except ImportError:
pass
| lgpl-2.1 |
licco/zipline | zipline/utils/cli.py | 3 | 6201 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import argparse
from copy import copy
from six import print_
from six.moves import configparser
import pandas as pd
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except:
PYGMENTS = False
import zipline
DEFAULTS = {
'start': '2012-01-01',
'end': '2012-12-31',
'data_frequency': 'daily',
'capital_base': '10e6',
'source': 'yahoo',
'symbols': 'AAPL'
}
def parse_args(argv, ipython_mode=False):
"""Parse list of arguments.
If a config file is provided (via -c), it will read in the
supplied options and overwrite any global defaults.
All other directly supplied arguments will overwrite the config
file settings.
Arguments:
* argv : list of strings
List of arguments, e.g. ['-c', 'my.conf']
* ipython_mode : bool <default=True>
Whether to parse IPython specific arguments
like --local_namespace
Notes:
Default settings can be found in zipline.utils.cli.DEFAULTS.
"""
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file",
metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(argv)
defaults = copy(DEFAULTS)
if args.conf_file:
config = configparser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Defaults")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
description="Zipline version %s." % zipline.__version__,
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument('--algofile', '-f')
parser.add_argument('--data-frequency',
choices=('minute', 'daily'))
parser.add_argument('--start', '-s')
parser.add_argument('--end', '-e')
parser.add_argument('--capital_base')
parser.add_argument('--source', choices=('yahoo',))
parser.add_argument('--symbols')
parser.add_argument('--output', '-o')
if ipython_mode:
parser.add_argument('--local_namespace', action='store_true')
args = parser.parse_args(remaining_argv)
return(vars(args))
def parse_cell_magic(line, cell):
"""Parse IPython magic
"""
args_list = line.split(' ')
args = parse_args(args_list, ipython_mode=True)
local_namespace = args.pop('local_namespace', False)
# By default, execute inside IPython namespace
if not local_namespace:
args['namespace'] = get_ipython().user_ns # flake8: noqa
# If we are running inside NB, do not output to file but create a
# variable instead
output_var_name = args.pop('output', None)
perf = run_pipeline(print_algo=False, algo_text=cell, **args)
if output_var_name is not None:
get_ipython().user_ns[output_var_name] = perf # flake8: noqa
def run_pipeline(print_algo=True, **kwargs):
"""Runs a full zipline pipeline given configuration keyword
arguments.
1. Load data (start and end dates can be provided a strings as
well as the source and symobls).
2. Instantiate algorithm (supply either algo_text or algofile
kwargs containing initialize() and handle_data() functions). If
algofile is supplied, will try to look for algofile_analyze.py and
append it.
3. Run algorithm (supply capital_base as float).
4. Return performance dataframe.
:Arguments:
* print_algo : bool <default=True>
Whether to print the algorithm to command line. Will use
pygments syntax coloring if pygments is found.
"""
start = pd.Timestamp(kwargs['start'], tz='UTC')
end = pd.Timestamp(kwargs['end'], tz='UTC')
symbols = kwargs['symbols'].split(',')
if kwargs['source'] == 'yahoo':
source = zipline.data.load_bars_from_yahoo(
stocks=symbols, start=start, end=end)
else:
raise NotImplementedError(
'Source %s not implemented.' % kwargs['source'])
algo_text = kwargs.get('algo_text', None)
if algo_text is None:
# Expect algofile to be set
algo_fname = kwargs['algofile']
with open(algo_fname, 'r') as fd:
algo_text = fd.read()
analyze_fname = os.path.splitext(algo_fname)[0] + '_analyze.py'
if os.path.exists(analyze_fname):
with open(analyze_fname, 'r') as fd:
# Simply append
algo_text += fd.read()
if print_algo:
if PYGMENTS:
highlight(algo_text, PythonLexer(), TerminalFormatter(),
outfile=sys.stdout)
else:
print_(algo_text)
algo = zipline.TradingAlgorithm(script=algo_text,
namespace=kwargs.get('namespace', {}),
capital_base=float(kwargs['capital_base']))
perf = algo.run(source)
output_fname = kwargs.get('output', None)
if output_fname is not None:
perf.to_pickle(output_fname)
return perf
| apache-2.0 |
camisatx/pySecMaster | pySecMaster/query_database.py | 1 | 3842 | import pandas as pd
import psycopg2
import time
__author__ = 'Josh Schertz'
__copyright__ = 'Copyright (C) 2018 Josh Schertz'
__description__ = 'An automated system to store and maintain financial data.'
__email__ = 'josh[AT]joshschertz[DOT]com'
__license__ = 'GNU AGPLv3'
__maintainer__ = 'Josh Schertz'
__status__ = 'Development'
__url__ = 'https://joshschertz.com/'
__version__ = '1.5.0'
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
def query_entire_table(database, user, password, host, port, table):
""" Query all of the active tsid values from the specified database.
:param database: String of the database name
:param user: String of the username used to login to the database
:param password: String of the password used to login to the database
:param host: String of the database address (localhost, url, ip, etc.)
:param port: Integer of the database port number (5432)
:param table: String of the table whose values should be returned
:return: DataFrame of the returned values
"""
conn = psycopg2.connect(database=database, user=user, password=password,
host=host, port=port)
try:
with conn:
cur = conn.cursor()
query = ("""SELECT sym.source_id AS tsid
FROM symbology AS sym,
LATERAL (
SELECT source_id
FROM %s
WHERE source_id = sym.source_id
ORDER BY source_id ASC NULLS LAST
LIMIT 1) AS prices""" %
(table,))
cur.execute(query)
rows = cur.fetchall()
if rows:
df = pd.DataFrame(rows)
else:
raise SystemExit('No data returned from query_entire_table')
return df
except psycopg2.Error as e:
print(
'Error when trying to retrieve data from the %s database in '
'query_entire_table' % database)
print(e)
except conn.OperationalError:
raise SystemError('Unable to connect to the %s database in '
'query_entire_table. Make sure the database '
'address/name are correct.' % database)
except Exception as e:
print(e)
raise SystemError('Error: Unknown issue occurred in query_entire_table')
if __name__ == '__main__':
from utilities.user_dir import user_dir
userdir = user_dir()
test_database = userdir['postgresql']['pysecmaster_db']
test_user = userdir['postgresql']['pysecmaster_user']
test_password = userdir['postgresql']['pysecmaster_password']
test_host = userdir['postgresql']['pysecmaster_host']
test_port = userdir['postgresql']['pysecmaster_port']
test_table = 'daily_prices' # daily_prices, minute_prices, quandl_codes
start_time = time.time()
table_df = query_entire_table(test_database, test_user, test_password,
test_host, test_port, test_table)
print('Query took %0.2f seconds' % (time.time() - start_time))
# table_df.to_csv('%s.csv' % test_table)
print(table_df)
| agpl-3.0 |
mistercrunch/panoramix | superset/examples/multiformat_time_series.py | 3 | 4438 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional, Tuple
import pandas as pd
from sqlalchemy import BigInteger, Date, DateTime, String
from superset import db
from superset.models.slice import Slice
from superset.utils.core import get_example_database
from .helpers import (
config,
get_example_data,
get_slice_json,
merge_slice,
misc_dash_slices,
TBL,
)
def load_multiformat_time_series(
only_metadata: bool = False, force: bool = False
) -> None:
"""Loading time series data from a zip file in the repo"""
tbl_name = "multiformat_time_series"
database = get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
data = get_example_data("multiformat_time_series.json.gz")
pdf = pd.read_json(data)
# TODO(bkyryliuk): move load examples data into the pytest fixture
if database.backend == "presto":
pdf.ds = pd.to_datetime(pdf.ds, unit="s")
pdf.ds = pdf.ds.dt.strftime("%Y-%m-%d")
pdf.ds2 = pd.to_datetime(pdf.ds2, unit="s")
pdf.ds2 = pdf.ds2.dt.strftime("%Y-%m-%d %H:%M%:%S")
else:
pdf.ds = pd.to_datetime(pdf.ds, unit="s")
pdf.ds2 = pd.to_datetime(pdf.ds2, unit="s")
pdf.to_sql(
tbl_name,
database.get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={
"ds": String(255) if database.backend == "presto" else Date,
"ds2": String(255) if database.backend == "presto" else DateTime,
"epoch_s": BigInteger,
"epoch_ms": BigInteger,
"string0": String(100),
"string1": String(100),
"string2": String(100),
"string3": String(100),
},
index=False,
)
print("Done loading table!")
print("-" * 80)
print(f"Creating table [{tbl_name}] reference")
obj = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not obj:
obj = TBL(table_name=tbl_name)
obj.main_dttm_col = "ds"
obj.database = database
dttm_and_expr_dict: Dict[str, Tuple[Optional[str], None]] = {
"ds": (None, None),
"ds2": (None, None),
"epoch_s": ("epoch_s", None),
"epoch_ms": ("epoch_ms", None),
"string2": ("%Y%m%d-%H%M%S", None),
"string1": ("%Y-%m-%d^%H:%M:%S", None),
"string0": ("%Y-%m-%d %H:%M:%S.%f", None),
"string3": ("%Y/%m/%d%H:%M:%S.%f", None),
}
for col in obj.columns:
dttm_and_expr = dttm_and_expr_dict[col.column_name]
col.python_date_format = dttm_and_expr[0]
col.dbatabase_expr = dttm_and_expr[1]
col.is_dttm = True
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
print("Creating Heatmap charts")
for i, col in enumerate(tbl.columns):
slice_data = {
"metrics": ["count"],
"granularity_sqla": col.column_name,
"row_limit": config["ROW_LIMIT"],
"since": "2015",
"until": "2016",
"viz_type": "cal_heatmap",
"domain_granularity": "month",
"subdomain_granularity": "day",
}
slc = Slice(
slice_name=f"Calendar Heatmap multiformat {i}",
viz_type="cal_heatmap",
datasource_type="table",
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
misc_dash_slices.add("Calendar Heatmap multiformat 0")
| apache-2.0 |
alexsavio/scikit-learn | sklearn/gaussian_process/tests/test_kernels.py | 20 | 12566 | """Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2,
RBF(length_scale=[2.0]), Matern(length_scale=[2.0])]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
# Compare analytic and numeric gradient of kernels.
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
# Check that parameter vector theta of kernel is set correctly.
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s.rstrip("_bounds"),
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i + 1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i + 1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
# Auto-correlation and cross-correlation should be consistent.
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
# Test that diag method of kernel returns consistent results.
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
# Adding kernels and multiplying kernels should be commutative.
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
# Anisotropic kernel should be consistent with isotropic kernels.
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
# Test stationarity of kernels.
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def check_hyperparameters_equal(kernel1, kernel2):
# Check that hyperparameters of two kernels are equal
for attr in set(dir(kernel1) + dir(kernel2)):
if attr.startswith("hyperparameter_"):
attr_value1 = getattr(kernel1, attr)
attr_value2 = getattr(kernel2, attr)
assert_equal(attr_value1, attr_value2)
def test_kernel_clone():
# Test that sklearn's clone works correctly on kernels.
bounds = (1e-5, 1e5)
for kernel in kernels:
kernel_cloned = clone(kernel)
# XXX: Should this be fixed?
# This differs from the sklearn's estimators equality check.
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
# Check that all constructor parameters are equal.
assert_equal(kernel.get_params(), kernel_cloned.get_params())
# Check that all hyperparameters are equal.
yield check_hyperparameters_equal, kernel, kernel_cloned
# This test is to verify that using set_params does not
# break clone on kernels.
# This used to break because in kernels such as the RBF, non-trivial
# logic that modified the length scale used to be in the constructor
# See https://github.com/scikit-learn/scikit-learn/issues/6961
# for more details.
params = kernel.get_params()
# RationalQuadratic kernel is isotropic.
isotropic_kernels = (ExpSineSquared, RationalQuadratic)
if 'length_scale' in params and not isinstance(kernel,
isotropic_kernels):
length_scale = params['length_scale']
if np.iterable(length_scale):
params['length_scale'] = length_scale[0]
params['length_scale_bounds'] = bounds
else:
params['length_scale'] = [length_scale] * 2
params['length_scale_bounds'] = bounds * 2
kernel_cloned.set_params(**params)
kernel_cloned_clone = clone(kernel_cloned)
assert_equal(kernel_cloned_clone.get_params(),
kernel_cloned.get_params())
assert_not_equal(id(kernel_cloned_clone), id(kernel_cloned))
yield (check_hyperparameters_equal, kernel_cloned,
kernel_cloned_clone)
def test_matern_kernel():
# Test consistency of Matern kernel for special values of nu.
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
# Check that GP kernels can also be used as pairwise kernels.
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
# Check that set_params()/get_params() is consistent with kernel.theta.
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value] * size})
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
[value] * size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
def test_repr_kernels():
# Smoke-test for repr in kernels.
for kernel in kernels:
repr(kernel)
| bsd-3-clause |
yebrahim/pydatalab | google/datalab/bigquery/_table.py | 4 | 32766 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements Table, and related Table BigQuery APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from builtins import str
from past.utils import old_div
from builtins import object
import calendar
import codecs
import csv
import datetime
import pandas
import time
import traceback
import uuid
import sys
import google.datalab
import google.datalab.utils
from . import _api
from . import _csv_options
from . import _job
from . import _parser
from . import _schema
from . import _utils
class TableMetadata(object):
"""Represents metadata about a BigQuery table."""
def __init__(self, table, info):
"""Initializes a TableMetadata instance.
Args:
table: the Table object this belongs to.
info: The BigQuery information about this table as a Python dictionary.
"""
self._table = table
self._info = info
@property
def created_on(self):
"""The creation timestamp."""
timestamp = self._info.get('creationTime')
return _parser.Parser.parse_timestamp(timestamp)
@property
def description(self):
"""The description of the table if it exists."""
return self._info.get('description', '')
@property
def expires_on(self):
"""The timestamp for when the table will expire, or None if unknown."""
timestamp = self._info.get('expirationTime', None)
if timestamp is None:
return None
return _parser.Parser.parse_timestamp(timestamp)
@property
def friendly_name(self):
"""The friendly name of the table if it exists."""
return self._info.get('friendlyName', '')
@property
def modified_on(self):
"""The timestamp for when the table was last modified."""
timestamp = self._info.get('lastModifiedTime')
return _parser.Parser.parse_timestamp(timestamp)
@property
def rows(self):
"""The number of rows within the table, or -1 if unknown. """
return int(self._info['numRows']) if 'numRows' in self._info else -1
@property
def size(self):
"""The size of the table in bytes, or -1 if unknown. """
return int(self._info['numBytes']) if 'numBytes' in self._info else -1
def refresh(self):
""" Refresh the metadata. """
self._info = self._table._load_info()
class Table(object):
"""Represents a Table object referencing a BigQuery table. """
# Allowed characters in a BigQuery table column name
_VALID_COLUMN_NAME_CHARACTERS = '_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
# When fetching table contents for a range or iteration, use a small page size per request
_DEFAULT_PAGE_SIZE = 1024
# When fetching the entire table, use the maximum number of rows. The BigQuery service
# will always return fewer rows than this if their encoded JSON size is larger than 10MB
_MAX_PAGE_SIZE = 100000
# Milliseconds per week
_MSEC_PER_WEEK = 7 * 24 * 3600 * 1000
def __init__(self, name, context=None):
"""Initializes an instance of a Table object. The Table need not exist yet.
Args:
name: the name of the table either as a string or a 3-part tuple (projectid, datasetid, name).
If a string, it must have the form '<project>.<dataset>.<table>' or '<dataset>.<table>'.
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
Raises:
Exception if the name is invalid.
"""
if context is None:
context = google.datalab.Context.default()
self._context = context
self._api = _api.Api(context)
self._name_parts = _utils.parse_table_name(name, self._api.project_id)
self._full_name = '%s.%s.%s%s' % self._name_parts
self._info = None
self._cached_page = None
self._cached_page_index = 0
self._schema = None
@property
def name(self):
"""The TableName named tuple (project_id, dataset_id, table_id, decorator) for the table."""
return self._name_parts
@property
def full_name(self):
"""The full name of the table in the form of project.dataset.table."""
return self._full_name
@property
def job(self):
""" For tables resulting from executing queries, the job that created the table.
Default is None for a Table object; this is overridden by QueryResultsTable.
"""
return None
@property
def is_temporary(self):
""" Whether this is a short-lived table or not. Always False for non-QueryResultsTables. """
return False
def _load_info(self):
"""Loads metadata about this table."""
if self._info is None:
try:
self._info = self._api.tables_get(self._name_parts)
except Exception as e:
raise e
@property
def metadata(self):
"""Retrieves metadata about the table.
Returns:
A TableMetadata object.
Raises
Exception if the request could not be executed or the response was malformed.
"""
self._load_info()
return TableMetadata(self, self._info)
def exists(self):
"""Checks if the table exists.
Returns:
True if the table exists; False otherwise.
Raises:
Exception if there was an error requesting information about the table.
"""
try:
info = self._api.tables_get(self._name_parts)
except google.datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
self._info = info
return True
def delete(self):
""" Delete the table.
Returns:
True if the Table no longer exists; False otherwise.
"""
try:
self._api.table_delete(self._name_parts)
except google.datalab.utils.RequestException:
# TODO(gram): May want to check the error reasons here and if it is not
# because the file didn't exist, return an error.
pass
except Exception as e:
raise e
return not self.exists()
def create(self, schema, overwrite=False):
""" Create the table with the specified schema.
Args:
schema: the schema to use to create the table. Should be a list of dictionaries, each
containing at least a pair of entries, 'name' and 'type'.
See https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
overwrite: if True, delete the table first if it exists. If False and the table exists,
creation will fail and raise an Exception.
Returns:
The Table instance.
Raises:
Exception if the table couldn't be created or already exists and truncate was False.
"""
if overwrite and self.exists():
self.delete()
if not isinstance(schema, _schema.Schema):
# Convert to a Schema object
schema = _schema.Schema(schema)
try:
response = self._api.tables_insert(self._name_parts, schema=schema._bq_schema)
except Exception as e:
raise e
if 'selfLink' in response:
self._schema = schema
return self
raise Exception("Table %s could not be created as it already exists" % self._full_name)
@staticmethod
def _encode_dict_as_row(record, column_name_map):
""" Encode a dictionary representing a table row in a form suitable for streaming to BQ.
This includes encoding timestamps as ISO-compatible strings and removing invalid
characters from column names.
Args:
record: a Python dictionary representing the table row.
column_name_map: a dictionary mapping dictionary keys to column names. This is initially
empty and built up by this method when it first encounters each column, then used as a
cache subsequently.
Returns:
The sanitized dictionary.
"""
for k in list(record.keys()):
v = record[k]
# If the column is a date, convert to ISO string.
if isinstance(v, (pandas.Timestamp, datetime.datetime, datetime.date, datetime.time)):
v = record[k] = record[k].isoformat()
# If k has invalid characters clean it up
if k not in column_name_map:
column_name_map[k] = ''.join(c for c in k if c in Table._VALID_COLUMN_NAME_CHARACTERS)
new_k = column_name_map[k]
if k != new_k:
record[new_k] = v
del record[k]
return record
def insert(self, data, include_index=False, index_name=None):
""" Insert the contents of a Pandas DataFrame or a list of dictionaries into the table.
The insertion will be performed using at most 500 rows per POST, and at most 10 POSTs per
second, as BigQuery has some limits on streaming rates.
Args:
data: the DataFrame or list to insert.
include_index: whether to include the DataFrame or list index as a column in the BQ table.
index_name: for a list, if include_index is True, this should be the name for the index.
If not specified, 'Index' will be used.
Returns:
The table.
Raises:
Exception if the table doesn't exist, the table's schema differs from the data's schema,
or the insert failed.
"""
# TODO(gram): we could create the Table here is it doesn't exist using a schema derived
# from the data. IIRC we decided not to but doing so seems less unwieldy that having to
# create it first and then validate the schema against it itself.
# There are BigQuery limits on the streaming API:
#
# max_rows_per_post = 500
# max_bytes_per_row = 20000
# max_rows_per_second = 10000
# max_bytes_per_post = 1000000
# max_bytes_per_second = 10000000
#
# It is non-trivial to enforce these here, and the max bytes per row is not something we
# can really control. As an approximation we enforce the 500 row limit
# with a 0.05 sec POST interval (to enforce the 10,000 rows per sec limit).
max_rows_per_post = 500
post_interval = 0.05
# TODO(gram): add different exception types for each failure case.
if not self.exists():
raise Exception('Table %s does not exist.' % self._full_name)
data_schema = _schema.Schema.from_data(data)
if isinstance(data, list):
if include_index:
if not index_name:
index_name = 'Index'
data_schema._add_field(index_name, 'INTEGER')
table_schema = self.schema
# Do some validation of the two schema to make sure they are compatible.
for data_field in data_schema:
name = data_field.name
table_field = table_schema[name]
if table_field is None:
raise Exception('Table does not contain field %s' % name)
data_type = data_field.type
table_type = table_field.type
if table_type != data_type:
raise Exception('Field %s in data has type %s but in table has type %s' %
(name, data_type, table_type))
total_rows = len(data)
total_pushed = 0
job_id = uuid.uuid4().hex
rows = []
column_name_map = {}
is_dataframe = isinstance(data, pandas.DataFrame)
if is_dataframe:
# reset_index creates a new dataframe so we don't affect the original. reset_index(drop=True)
# drops the original index and uses an integer range.
gen = data.reset_index(drop=not include_index).iterrows()
else:
gen = enumerate(data)
for index, row in gen:
if is_dataframe:
row = row.to_dict()
elif include_index:
row[index_name] = index
rows.append({
'json': self._encode_dict_as_row(row, column_name_map),
'insertId': job_id + str(index)
})
total_pushed += 1
if (total_pushed == total_rows) or (len(rows) == max_rows_per_post):
try:
response = self._api.tabledata_insert_all(self._name_parts, rows)
except Exception as e:
raise e
if 'insertErrors' in response:
raise Exception('insertAll failed: %s' % response['insertErrors'])
time.sleep(post_interval) # Streaming API is rate-limited
rows = []
# Block until data is ready
while True:
self._info = self._api.tables_get(self._name_parts)
if 'streamingBuffer' not in self._info or \
'estimatedRows' not in self._info['streamingBuffer'] or \
int(self._info['streamingBuffer']['estimatedRows']) > 0:
break
time.sleep(2)
return self
def _init_job_from_response(self, response):
""" Helper function to create a Job instance from a response. """
job = None
if response and 'jobReference' in response:
job = _job.Job(job_id=response['jobReference']['jobId'], context=self._context)
return job
def extract_async(self, destination, format='csv', csv_delimiter=None, csv_header=True,
compress=False):
"""Starts a job to export the table to GCS.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the export Job if it was started successfully; else None.
"""
format = format.upper()
if format == 'JSON':
format = 'NEWLINE_DELIMITED_JSON'
if format == 'CSV' and csv_delimiter is None:
csv_delimiter = ','
try:
response = self._api.table_extract(self._name_parts, destination, format, compress,
csv_delimiter, csv_header)
return self._init_job_from_response(response)
except Exception as e:
raise google.datalab.JobError(location=traceback.format_exc(), message=str(e),
reason=str(type(e)))
def extract(self, destination, format='csv', csv_delimiter=None, csv_header=True, compress=False):
"""Exports the table to GCS; blocks until complete.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the completed export Job if it was started successfully; else None.
"""
job = self.extract_async(destination, format=format, csv_delimiter=csv_delimiter,
csv_header=csv_header, compress=compress)
if job is not None:
job.wait()
return job
def load_async(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Starts importing a table from GCS and return a Future.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the import if it was started successfully or None if not.
Raises:
Exception if the load job failed to be started or invalid arguments were supplied.
"""
if source_format == 'csv':
source_format = 'CSV'
elif source_format == 'json':
source_format = 'NEWLINE_DELIMITED_JSON'
else:
raise Exception("Invalid source format %s" % source_format)
if not(mode == 'create' or mode == 'append' or mode == 'overwrite'):
raise Exception("Invalid mode %s" % mode)
if csv_options is None:
csv_options = _csv_options.CSVOptions()
try:
response = self._api.jobs_insert_load(source, self._name_parts,
append=(mode == 'append'),
overwrite=(mode == 'overwrite'),
create=(mode == 'create'),
source_format=source_format,
field_delimiter=csv_options.delimiter,
allow_jagged_rows=csv_options.allow_jagged_rows,
allow_quoted_newlines=csv_options.allow_quoted_newlines,
encoding=csv_options.encoding.upper(),
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records,
quote=csv_options.quote,
skip_leading_rows=csv_options.skip_leading_rows)
except Exception as e:
raise e
return self._init_job_from_response(response)
def load(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Load the table from GCS.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: if True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the completed load Job if it was started successfully; else None.
"""
job = self.load_async(source,
mode=mode,
source_format=source_format,
csv_options=csv_options,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records)
if job is not None:
job.wait()
return job
def _get_row_fetcher(self, start_row=0, max_rows=None, page_size=_DEFAULT_PAGE_SIZE):
""" Get a function that can retrieve a page of rows.
The function returned is a closure so that it can have a signature suitable for use
by Iterator.
Args:
start_row: the row to start fetching from; default 0.
max_rows: the maximum number of rows to fetch (across all calls, not per-call). Default
is None which means no limit.
page_size: the maximum number of results to fetch per page; default 1024.
Returns:
A function that can be called repeatedly with a page token and running count, and that
will return an array of rows and a next page token; when the returned page token is None
the fetch is complete.
"""
if not start_row:
start_row = 0
elif start_row < 0: # We are measuring from the table end
if self.length >= 0:
start_row += self.length
else:
raise Exception('Cannot use negative indices for table of unknown length')
schema = self.schema._bq_schema
name_parts = self._name_parts
def _retrieve_rows(page_token, count):
page_rows = []
if max_rows and count >= max_rows:
page_token = None
else:
if max_rows and page_size > (max_rows - count):
max_results = max_rows - count
else:
max_results = page_size
try:
if page_token:
response = self._api.tabledata_list(name_parts, page_token=page_token,
max_results=max_results)
else:
response = self._api.tabledata_list(name_parts, start_index=start_row,
max_results=max_results)
except Exception as e:
raise e
page_token = response['pageToken'] if 'pageToken' in response else None
if 'rows' in response:
page_rows = response['rows']
rows = []
for row_dict in page_rows:
rows.append(_parser.Parser.parse_row(schema, row_dict))
return rows, page_token
return _retrieve_rows
def range(self, start_row=0, max_rows=None):
""" Get an iterator to iterate through a set of table rows.
Args:
start_row: the row of the table at which to start the iteration (default 0)
max_rows: an upper limit on the number of rows to iterate through (default None)
Returns:
A row iterator.
"""
fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows)
return iter(google.datalab.utils.Iterator(fetcher))
def to_dataframe(self, start_row=0, max_rows=None):
""" Exports the table to a Pandas dataframe.
Args:
start_row: the row of the table at which to start the export (default 0)
max_rows: an upper limit on the number of rows to export (default None)
Returns:
A Pandas dataframe containing the table data.
"""
fetcher = self._get_row_fetcher(start_row=start_row,
max_rows=max_rows,
page_size=self._MAX_PAGE_SIZE)
count = 0
page_token = None
# Collect results of page fetcher in separate dataframe objects, then
# concatenate them to reduce the amount of copying
df_list = []
df = None
while True:
page_rows, page_token = fetcher(page_token, count)
if len(page_rows):
count += len(page_rows)
df_list.append(pandas.DataFrame.from_records(page_rows))
if not page_token:
break
if df_list:
df = pandas.concat(df_list, ignore_index=True, copy=False)
# Need to reorder the dataframe to preserve column ordering
ordered_fields = [field.name for field in self.schema]
return df[ordered_fields] if df is not None else pandas.DataFrame()
def to_file(self, destination, format='csv', csv_delimiter=',', csv_header=True):
"""Save the results to a local file in CSV format.
Args:
destination: path on the local filesystem for the saved results.
format: the format to use for the exported data; currently only 'csv' is supported.
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
Raises:
An Exception if the operation failed.
"""
f = codecs.open(destination, 'w', 'utf-8')
fieldnames = []
for column in self.schema:
fieldnames.append(column.name)
if sys.version_info[0] == 2:
csv_delimiter = csv_delimiter.encode('unicode_escape')
writer = csv.DictWriter(f, fieldnames=fieldnames, delimiter=csv_delimiter)
if csv_header:
writer.writeheader()
for row in self:
writer.writerow(row)
f.close()
@property
def schema(self):
"""Retrieves the schema of the table.
Returns:
A Schema object containing a list of schema fields and associated metadata.
Raises
Exception if the request could not be executed or the response was malformed.
"""
if not self._schema:
try:
self._load_info()
self._schema = _schema.Schema(self._info['schema']['fields'])
except KeyError:
raise Exception('Unexpected table response: missing schema')
return self._schema
def update(self, friendly_name=None, description=None, expiry=None, schema=None):
""" Selectively updates Table information.
Any parameters that are omitted or None are not updated.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
expiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.
schema: if not None, the new schema: either a list of dictionaries or a Schema.
"""
self._load_info()
if friendly_name is not None:
self._info['friendlyName'] = friendly_name
if description is not None:
self._info['description'] = description
if expiry is not None:
if isinstance(expiry, datetime.datetime):
expiry = calendar.timegm(expiry.utctimetuple()) * 1000
self._info['expirationTime'] = expiry
if schema is not None:
if isinstance(schema, _schema.Schema):
schema = schema._bq_schema
self._info['schema'] = {'fields': schema}
try:
self._api.table_update(self._name_parts, self._info)
except google.datalab.utils.RequestException:
# The cached metadata is out of sync now; abandon it.
self._info = None
except Exception as e:
raise e
def _repr_sql_(self):
"""Returns a representation of the table for embedding into a SQL statement.
Returns:
A formatted table name for use within SQL statements.
"""
return '`' + self._full_name + '`'
def __repr__(self):
"""Returns a representation for the table for showing in the notebook.
"""
return 'BigQuery Table - name: %s' % self._full_name
@property
def length(self):
""" Get the length of the table (number of rows). We don't use __len__ as this may
return -1 for 'unknown'.
"""
return self.metadata.rows
def __iter__(self):
""" Get an iterator for the table.
"""
return self.range(start_row=0)
def __getitem__(self, item):
""" Get an item or a slice of items from the table. This uses a small cache
to reduce the number of calls to tabledata.list.
Note: this is a useful function to have, and supports some current usage like
query.execute().result()[0], but should be used with care.
"""
if isinstance(item, slice):
# Just treat this as a set of calls to __getitem__(int)
result = []
i = item.start
step = item.step if item.step else 1
while i < item.stop:
result.append(self[i])
i += step
return result
# Handle the integer index case.
if item < 0:
if self.length >= 0:
item += self.length
else:
raise Exception('Cannot use negative indices for table of unknown length')
if not self._cached_page \
or self._cached_page_index > item \
or self._cached_page_index + len(self._cached_page) <= item:
# cache a new page. To get the start row we round to the nearest multiple of the page
# size.
first = old_div(item, self._DEFAULT_PAGE_SIZE) * self._DEFAULT_PAGE_SIZE
count = self._DEFAULT_PAGE_SIZE
if self.length >= 0:
remaining = self.length - first
if count > remaining:
count = remaining
fetcher = self._get_row_fetcher(start_row=first, max_rows=count, page_size=count)
self._cached_page_index = first
self._cached_page, _ = fetcher(None, 0)
return self._cached_page[item - self._cached_page_index]
@staticmethod
def _convert_decorator_time(when):
if isinstance(when, datetime.datetime):
value = 1000 * (when - datetime.datetime.utcfromtimestamp(0)).total_seconds()
elif isinstance(when, datetime.timedelta):
value = when.total_seconds() * 1000
if value > 0:
raise Exception("Invalid snapshot relative when argument: %s" % str(when))
else:
raise Exception("Invalid snapshot when argument type: %s" % str(when))
if value < -Table._MSEC_PER_WEEK:
raise Exception("Invalid snapshot relative when argument: must be within 7 days: %s"
% str(when))
if value > 0:
now = 1000 * (datetime.datetime.utcnow() -
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# Check that an abs value is not more than 7 days in the past and is
# not in the future
if not ((now - Table._MSEC_PER_WEEK) < value < now):
raise Exception("Invalid snapshot absolute when argument: %s" % str(when))
return int(value)
def snapshot(self, at):
""" Return a new Table which is a snapshot of this table at the specified time.
Args:
at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past. Passing None will get a reference the oldest snapshot.
Note that using a datetime will get a snapshot at an absolute point in time, while
a timedelta will provide a varying snapshot; any queries issued against such a Table
will be done against a snapshot that has an age relative to the execution time of the
query.
Returns:
A new Table object referencing the snapshot.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use snapshot() on an already decorated table")
value = Table._convert_decorator_time(at)
return Table("%s@%s" % (self._full_name, str(value)), context=self._context)
def window(self, begin, end=None):
""" Return a new Table limited to the rows added to this Table during the specified time range.
Args:
begin: the start time of the window. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past.
Note that using a relative value will provide a varying snapshot, not a fixed
snapshot; any queries issued against such a Table will be done against a snapshot
that has an age relative to the execution time of the query.
end: the end time of the snapshot; if None, then the current time is used. The types and
interpretation of values is as for start.
Returns:
A new Table object referencing the window.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use window() on an already decorated table")
start = Table._convert_decorator_time(begin)
if end is None:
if isinstance(begin, datetime.timedelta):
end = datetime.timedelta(0)
else:
end = datetime.datetime.utcnow()
stop = Table._convert_decorator_time(end)
# Both values must have the same sign
if (start > 0 >= stop) or (stop > 0 >= start):
raise Exception("window: Between arguments must both be absolute or relative: %s, %s" %
(str(begin), str(end)))
# start must be less than stop
if start > stop:
raise Exception("window: Between arguments: begin must be before end: %s, %s" %
(str(begin), str(end)))
return Table("%s@%s-%s" % (self._full_name, str(start), str(stop)), context=self._context)
| apache-2.0 |
Obus/scikit-learn | sklearn/preprocessing/__init__.py | 31 | 1235 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
lucidfrontier45/scikit-learn | sklearn/tests/test_preprocessing.py | 1 | 25156 | import warnings
import numpy as np
import numpy.linalg as la
import scipy.sparse as sp
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.sparsefuncs import mean_variance_axis0
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import KernelCenterer
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import add_dummy_feature
from sklearn.preprocessing import balance_weights
from sklearn import datasets
from sklearn.linear_model.stochastic_gradient import SGDClassifier
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_scaler_1d():
"""Test scaling of dataset along single axis"""
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
def test_scaler_2d_arrays():
"""Test scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_equal(X_trans.min(axis=0), 0)
assert_array_equal(X_trans.min(axis=0), 0)
assert_array_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_equal(X_trans.min(axis=0), 1)
assert_array_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
"""Check min max scaler on toy data with zero variance features"""
X = [[0., 1., 0.5],
[0., 1., -0.1],
[0., 1., 1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
"""Check that StandardScaler.fit does not change input"""
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sp.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sp.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_warning_scaling_integers():
"""Check warning when scaling integer data"""
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
with warnings.catch_warnings(record=True) as w:
StandardScaler().fit(X)
assert_equal(len(w), 1)
with warnings.catch_warnings(record=True) as w:
MinMaxScaler().fit(X)
assert_equal(len(w), 1)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sp.csr_matrix))
X_norm = toarray(X_norm)
for i in xrange(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in xrange(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sp.csr_matrix))
X_norm = toarray(X_norm)
for i in xrange(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, 0]])
for init in (np.array, sp.csr_matrix, sp.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(type(X), type(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
def test_label_binarizer():
lb = LabelBinarizer()
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# two-class case
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 2, 2, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_multilabel():
lb = LabelBinarizer()
# test input as lists of tuples
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
got = lb.fit_transform(inp)
assert_array_equal(indicator_mat, got)
assert_equal(lb.inverse_transform(got), inp)
# test input as label indicator matrix
lb.fit(indicator_mat)
assert_array_equal(indicator_mat,
lb.inverse_transform(indicator_mat))
# regression test for the two-class multilabel case
lb = LabelBinarizer()
inp = [[1, 0], [0], [1], [0, 1]]
expected = np.array([[1, 1],
[1, 0],
[0, 1],
[1, 1]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_equal([set(x) for x in lb.inverse_transform(got)],
[set(x) for x in inp])
def test_label_binarizer_errors():
"""Check that invalid arguments yield ValueError"""
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
def test_one_hot_encoder():
"""Test OneHotEncoder's fit and transform."""
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raise when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_label_encoder():
"""Test LabelEncoder's transform and inverse_transform methods"""
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
"""Test fit_transform"""
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_string_labels():
"""Test LabelEncoder's transform and inverse_transform methods with
non-numeric labels"""
le = LabelEncoder()
le.fit(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(le.classes_, ["amsterdam", "paris", "tokyo"])
assert_array_equal(le.transform(["tokyo", "tokyo", "paris"]),
[2, 2, 1])
assert_array_equal(le.inverse_transform([2, 2, 1]),
["tokyo", "tokyo", "paris"])
assert_raises(ValueError, le.transform, ["london"])
def test_label_encoder_errors():
"""Check that invalid arguments yield ValueError"""
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
def test_label_binarizer_iris():
lb = LabelBinarizer()
Y = lb.fit_transform(iris.target)
clfs = [SGDClassifier().fit(iris.data, Y[:, k])
for k in range(len(lb.classes_))]
Y_pred = np.array([clf.decision_function(iris.data) for clf in clfs]).T
y_pred = lb.inverse_transform(Y_pred)
accuracy = np.mean(iris.target == y_pred)
y_pred2 = SGDClassifier().fit(iris.data, iris.target).predict(iris.data)
accuracy2 = np.mean(iris.target == y_pred2)
assert_almost_equal(accuracy, accuracy2)
def test_label_binarizer_multilabel_unlabeled():
"""Check that LabelBinarizer can handle an unlabeled sample"""
lb = LabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(lb.fit_transform(y), Y)
def test_center_kernel():
"""Test that KernelCenterer is equivalent to StandardScaler
in feature space"""
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sp.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sp.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sp.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sp.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sp.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sp.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_balance_weights():
weights = balance_weights([0, 0, 1, 1])
assert_array_equal(weights, [1., 1., 1., 1.])
weights = balance_weights([0, 1, 1, 1, 1])
assert_array_equal(weights, [1., 0.25, 0.25, 0.25, 0.25])
weights = balance_weights([0, 0])
assert_array_equal(weights, [1., 1.])
| bsd-3-clause |
woodscn/scipy | scipy/signal/signaltools.py | 2 | 115985 | # Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
import threading
import sys
import timeit
from . import sigtools, dlti
from ._upfirdn import upfirdn, _UpFIRDn, _output_len
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import fftpack, linalg
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
product, r_, ravel, real_if_close, reshape,
roots, sort, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
import math
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
from .filter_design import cheby1, _validate_sos
from .fir_filter_design import firwin
if sys.version_info.major >= 3 and sys.version_info.minor >= 5:
from math import gcd
else:
from fractions import gcd
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'resample_poly', 'detrend',
'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm'"
" (or 'symmetric').")
val = boundary << 2
return val
def _inputs_swap_needed(mode, shape1, shape2):
"""
If in 'valid' mode, returns whether or not the input arrays need to be
swapped depending on whether `shape1` is at least as large as `shape2` in
every dimension.
This is important for some of the correlation and convolution
implementations in this module, where the larger array input needs to come
before the smaller array input when operating in this mode.
Note that if the mode provided is not 'valid', False is immediately
returned.
"""
if mode == 'valid':
ok1, ok2 = True, True
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
ok1 = False
if not d2 >= d1:
ok2 = False
if not (ok1 or ok2):
raise ValueError("For 'valid' mode, one must be at least "
"as large as the other in every dimension")
return not ok1
return False
def correlate(in1, in2, mode='full', method='auto'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the correlation.
``direct``
The correlation is determined directly from sums, the definition of
correlation.
``fft``
The Fast Fourier Transform is used to perform the correlation more
quickly (only available for numerical arrays.)
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See `convolve` Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
See Also
--------
choose_conv_method : contains more documentation on `method`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as::
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
``method='fft'`` only works for numerical arrays as it relies on
`fftconvolve`. In certain cases (i.e., arrays of objects or when
rounding integers can lose precision), ``method='direct'`` is always used.
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0:
return in1 * in2
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
# this either calls fftconvolve or this function with method=='direct'
if method in ('fft', 'auto'):
return convolve(in1, _reverse_and_conj(in2), mode, method)
# fastpath to faster numpy.correlate for 1d inputs when possible
if _np_conv_ok(in1, in2, mode):
return np.correlate(in1, in2, mode)
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward if mode == 'full'. Also, it fails
# with 'valid' mode if in2 is larger than in1, so swap those, too.
# Don't swap inputs for 'same' mode, since shape of in1 matters.
swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or
_inputs_swap_needed(mode, in1.shape, in2.shape))
if swapped_inputs:
in1, in2 = in2, in1
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
if swapped_inputs:
# Reverse and conjugate to undo the effect of swapping inputs
z = _reverse_and_conj(z)
return z
def _centered(arr, newshape):
# Return the center newshape portion of the array.
newshape = asarray(newshape)
currshape = array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
As of v0.19, `convolve` automatically chooses this method or the direct
method based on an estimation of which is faster.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
If operating in 'valid' mode, either `in1` or `in2` must be
at least as large as the other in every dimension.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse.
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, complex) or
np.issubdtype(in2.dtype, complex))
shape = s1 + s2 - 1
# Check that input sizes are compatible with 'valid' mode
if _inputs_swap_needed(mode, s1, s2):
# Convolution is commutative; order doesn't have any effect on output
in1, s1, in2, s2 = in2, s2, in1, s1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [fftpack.helper.next_fast_len(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
sp1 = np.fft.rfftn(in1, fshape)
sp2 = np.fft.rfftn(in2, fshape)
ret = (np.fft.irfftn(sp1 * sp2, fshape)[fslice].copy())
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
sp1 = fftpack.fftn(in1, fshape)
sp2 = fftpack.fftn(in2, fshape)
ret = fftpack.ifftn(sp1 * sp2)[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def _numeric_arrays(arrays, kinds='buifc'):
"""
See if a list of arrays are all numeric.
Parameters
----------
ndarrays : array or list of arrays
arrays to check if numeric.
numeric_kinds : string-like
The dtypes of the arrays to be checked. If the dtype.kind of
the ndarrays are not in this string the function returns False and
otherwise returns True.
"""
if type(arrays) == ndarray:
return arrays.dtype.kind in kinds
for array_ in arrays:
if array_.dtype.kind not in kinds:
return False
return True
def _prod(iterable):
"""
Product of a list of numbers.
Faster than np.prod for short lists like array shapes.
"""
product = 1
for x in iterable:
product *= x
return product
def _fftconv_faster(x, h, mode):
"""
See if using `fftconvolve` or `_correlateND` is faster. The boolean value
returned depends on the sizes and shapes of the input values.
The big O ratios were found to hold across different machines, which makes
sense as it's the ratio that matters (the effective speed of the computer
is found in both big O constants). Regardless, this had been tuned on an
early 2015 MacBook Pro with 8GB RAM and an Intel i5 processor.
"""
if mode == 'full':
out_shape = [n + k - 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 10963.92823819 if x.ndim == 1 else 8899.1104874
elif mode == 'same':
out_shape = x.shape
if x.ndim == 1:
if h.size <= x.size:
big_O_constant = 7183.41306773
else:
big_O_constant = 856.78174111
else:
big_O_constant = 34519.21021589
elif mode == 'valid':
out_shape = [n - k + 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 41954.28006344 if x.ndim == 1 else 66453.24316434
else:
raise ValueError('mode is invalid')
# see whether the Fourier transform convolution method or the direct
# convolution method is faster (discussed in scikit-image PR #1792)
direct_time = (x.size * h.size * _prod(out_shape))
fft_time = sum(n * math.log(n) for n in (x.shape + h.shape +
tuple(out_shape)))
return big_O_constant * fft_time < direct_time
def _reverse_and_conj(x):
"""
Reverse array `x` in all dimensions and perform the complex conjugate
"""
reverse = [slice(None, None, -1)] * x.ndim
return x[reverse].conj()
def _np_conv_ok(volume, kernel, mode):
"""
See if numpy supports convolution of `volume` and `kernel` (i.e. both are
1D ndarrays and of the appropriate shape). Numpy's 'same' mode uses the
size of the larger input, while Scipy's uses the size of the first input.
"""
np_conv_ok = volume.ndim == kernel.ndim == 1
return np_conv_ok and (volume.size >= kernel.size or mode != 'same')
def _fftconvolve_valid(volume, kernel):
# fftconvolve doesn't support complex256
for not_fft_conv_supp in ["complex256", "complex192"]:
if hasattr(np, not_fft_conv_supp):
if volume.dtype == not_fft_conv_supp or kernel.dtype == not_fft_conv_supp:
return False
# for integer input,
# catch when more precision required than float provides (representing a
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return False
if _numeric_arrays([volume, kernel]):
return False
return True
def _timeit_fast(stmt="pass", setup="pass", repeat=3):
"""
Returns the time the statement/function took, in seconds.
Faster, less precise version of IPython's timeit. `stmt` can be a statement
written as a string or a callable.
Will do only 1 loop (like IPython's timeit) with no repetitions
(unlike IPython) for very slow functions. For fast functions, only does
enough loops to take 5 ms, which seems to produce similar results (on
Windows at least), and avoids doing an extraneous cycle that isn't
measured.
"""
timer = timeit.Timer(stmt, setup)
# determine number of calls per rep so total time for 1 rep >= 5 ms
x = 0
for p in range(0, 10):
number = 10**p
x = timer.timeit(number) # seconds
if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one
break
if x > 1: # second
# If it's macroscopic, don't bother with repetitions
best = x
else:
number *= 10
r = timer.repeat(repeat, number)
best = min(r)
sec = best / number
return sec
def choose_conv_method(in1, in2, mode='full', measure=False):
"""
Find the fastest convolution/correlation method.
This primarily exists to be called during the ``method='auto'`` option in
`convolve` and `correlate`, but can also be used when performing many
convolutions of the same input shapes and dtypes, determining
which method to use for all of them, either to avoid the overhead of the
'auto' option or to use accurate real-world measurements.
Parameters
----------
in1 : array_like
The first argument passed into the convolution function.
in2 : array_like
The second argument passed into the convolution function.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
measure : bool, optional
If True, run and time the convolution of `in1` and `in2` with both
methods and return the fastest. If False (default), predict the fastest
method using precomputed values.
Returns
-------
method : str
A string indicating which convolution method is fastest, either
'direct' or 'fft'
times : dict, optional
A dictionary containing the times (in seconds) needed for each method.
This value is only returned if ``measure=True``.
See Also
--------
convolve
correlate
Notes
-----
For large n, ``measure=False`` is accurate and can quickly determine the
fastest method to perform the convolution. However, this is not as
accurate for small n (when any dimension in the input or output is small).
In practice, we found that this function estimates the faster method up to
a multiplicative factor of 5 (i.e., the estimated method is *at most* 5
times slower than the fastest method). The estimation values were tuned on
an early 2015 MacBook Pro with 8GB RAM but we found that the prediction
held *fairly* accurately across different machines.
If ``measure=True``, time the convolutions. Because this function uses
`fftconvolve`, an error will be thrown if it does not support the inputs.
There are cases when `fftconvolve` supports the inputs but this function
returns `direct` (e.g., to protect against floating point integer
precision).
.. versionadded:: 0.19
Examples
--------
Estimate the fastest method for a given input:
>>> from scipy import signal
>>> a = np.random.randn(1000)
>>> b = np.random.randn(1000000)
>>> method = signal.choose_conv_method(a, b, mode='same')
>>> method
'fft'
This can then be applied to other arrays of the same dtype and shape:
>>> c = np.random.randn(1000)
>>> d = np.random.randn(1000000)
>>> # `method` works with correlate and convolve
>>> corr1 = signal.correlate(a, b, mode='same', method=method)
>>> corr2 = signal.correlate(c, d, mode='same', method=method)
>>> conv1 = signal.convolve(a, b, mode='same', method=method)
>>> conv2 = signal.convolve(c, d, mode='same', method=method)
"""
volume = asarray(in1)
kernel = asarray(in2)
if measure:
times = {}
for method in ['fft', 'direct']:
times[method] = _timeit_fast(lambda: convolve(volume, kernel,
mode=mode, method=method))
chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
return chosen_method, times
# fftconvolve doesn't support complex256
fftconv_unsup = "complex256" if sys.maxsize > 2**32 else "complex192"
if hasattr(np, fftconv_unsup):
if volume.dtype == fftconv_unsup or kernel.dtype == fftconv_unsup:
return 'direct'
# for integer input,
# catch when more precision required than float provides (representing an
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return 'direct'
if _numeric_arrays([volume, kernel]):
if _fftconv_faster(volume, kernel, mode):
return 'fft'
return 'direct'
def convolve(in1, in2, mode='full', method='auto'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the convolution.
``direct``
The convolution is determined directly from sums, the definition of
convolution.
``fft``
The Fourier Transform is used to perform the convolution by calling
`fftconvolve`.
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
choose_conv_method : chooses the fastest appropriate convolution method
fftconvolve
Notes
-----
By default, `convolve` and `correlate` use ``method='auto'``, which calls
`choose_conv_method` to choose the fastest method using pre-computed
values (`choose_conv_method` can also measure real-world timing with a
keyword argument). Because `fftconvolve` relies on floating point numbers,
there are certain constraints that may force `method=direct` (more detail
in `choose_conv_method` docstring).
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
if _inputs_swap_needed(mode, volume.shape, kernel.shape):
# Convolution is commutative; order doesn't have any effect on output
volume, kernel = kernel, volume
if method == 'auto':
method = choose_conv_method(volume, kernel, mode=mode)
if method == 'fft':
out = fftconvolve(volume, kernel, mode=mode)
if volume.dtype.kind in 'ui':
out = np.around(out)
return out.astype(volume.dtype)
# fastpath to faster numpy.convolve for 1d inputs when possible
if _np_conv_ok(volume, kernel, mode):
return np.convolve(volume, kernel, mode)
return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `a`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `a`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * volume.ndim
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(volume.ndim):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or array_like, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * im.ndim
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') /
product(mysize, axis=0) - lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
If operating in 'valid' mode, either `in1` or `in2` must be
at least as large as the other in every dimension.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> ascent = misc.ascent()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
>>> ax_orig.imshow(ascent, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('convolve2d inputs must both be 2D arrays')
if _inputs_swap_needed(mode, in1.shape, in2.shape):
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
If operating in 'valid' mode, either `in1` or `in2` must be
at least as large as the other in every dimension.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + np.random.randn(*face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('correlate2d inputs must both be 2D arrays')
swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
if swapped_inputs:
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
if swapped_inputs:
out = out[::-1, ::-1]
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a), len(b)) - 1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
lfiltic : Construct initial conditions for `lfilter`.
lfilter_zi : Compute initial state (steady state of step response) for
`lfilter`.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
savgol_filter : A Savitzky-Golay filter.
sosfilt: Filter data using cascaded second-order sections.
sosfiltfilt: A forward-backward filter using second-order sections.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
- a[1]*y[n-1] - ... - a[N]*y[n-N]
where `M` is the degree of the numerator, `N` is the degree of the
denominator, and `n` is the sample number. It is implemented using
the following difference equations (assuming M = N)::
a[0]*y[n] = b[0] * x[n] + d[0][n-1]
d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]
d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]
...
d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]
d[N-1][n] = b[N] * x[n] - a[N] * y[n]
where `d` are the state variables.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -M
b[0] + b[1]z + ... + b[M] z
Y(z) = -------------------------------- X(z)
-1 -N
a[0] + a[1]z + ... + a[N] z
Examples
--------
Generate a noisy signal to be filtered:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 201)
>>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +
... 0.1*np.sin(2*np.pi*1.25*t + 1) +
... 0.18*np.cos(2*np.pi*3.85*t))
>>> xn = x + np.random.randn(len(t)) * 0.08
Create an order 3 lowpass butterworth filter:
>>> b, a = signal.butter(3, 0.05)
Apply the filter to xn. Use lfilter_zi to choose the initial condition of
the filter:
>>> zi = signal.lfilter_zi(b, a)
>>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
Apply the filter again, to have a result filtered at an order the same as
filtfilt:
>>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
Use filtfilt to apply the filter:
>>> y = signal.filtfilt(b, a, xn)
Plot the original signal and the various filtered versions:
>>> plt.figure
>>> plt.plot(t, xn, 'b', alpha=0.75)
>>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
>>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
... 'filtfilt'), loc='best')
>>> plt.grid(True)
>>> plt.show()
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of
# singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape,
strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[ind] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[ind]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[ind]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
where ``K = max(M, N)``.
See Also
--------
lfilter, lfilter_zi
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal``.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See Also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
See Also
--------
scipy.fftpack.hilbert : Return Hilbert transform of a periodic sequence x.
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the
instantaneous phase in respect to time. The instantaneous phase corresponds
to the phase angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = (np.diff(instantaneous_phase) /
... (2.0*np.pi) * fs)
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
Processing, Third Edition, 2009. Chapter 12.
ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fftpack.fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if x.ndim > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = fftpack.ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if x.ndim > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fftpack.fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = x.ndim
while k > 2:
h = h[:, newaxis]
k -= 1
x = fftpack.ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `invresz`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residue, invresz, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `residuez`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, residuez, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n]) /
factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `residue`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invresz, residue, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `invres`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fftpack.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = asarray(x)
X = fftpack.fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftpack.fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = fftpack.ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
sl = [slice(None)] * x.ndim
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = fftpack.ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0)):
"""
Resample `x` along the given axis using polyphase filtering.
The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
FIR filter is applied, and then it is downsampled by the factor `down`.
The resulting sample rate is ``up / down`` times the original sample
rate. Values beyond the boundary of the signal are assumed to be zero
during the filtering step.
Parameters
----------
x : array_like
The data to be resampled.
up : int
The upsampling factor.
down : int
The downsampling factor.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : string, tuple, or array_like, optional
Desired window to use to design the low-pass filter, or the FIR filter
coefficients to employ. See below for details.
Returns
-------
resampled_x : array
The resampled array.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample : Resample up or down using the FFT method.
Notes
-----
This polyphase method will likely be faster than the Fourier method
in `scipy.signal.resample` when the number of samples is large and
prime, or when the number of samples is large and `up` and `down`
share a large greatest common denominator. The length of the FIR
filter used will depend on ``max(up, down) // gcd(up, down)``, and
the number of operations during polyphase filtering will depend on
the filter length and `down` (see `scipy.signal.upfirdn` for details).
The argument `window` specifies the FIR low-pass filter design.
If `window` is an array_like it is assumed to be the FIR filter
coefficients. Note that the FIR filter is applied after the upsampling
step, so it should be designed to operate on a signal at a sampling
frequency higher than the original by a factor of `up//gcd(up, down)`.
This function's output will be centered with respect to this array, so it
is best to pass a symmetric filter with an odd number of samples if, as
is usually the case, a zero-phase filter is desired.
For any other type of `window`, the functions `scipy.signal.get_window`
and `scipy.signal.firwin` are called to generate the appropriate filter
coefficients.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * up / float(down)``.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle for the FFT method, and gets closer to zero
for the polyphase method:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f_fft = signal.resample(y, 100)
>>> f_poly = signal.resample_poly(y, 100, 20)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
>>> plt.plot(x, y, 'ko-')
>>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries
>>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
>>> plt.show()
"""
x = asarray(x)
up = int(up)
down = int(down)
if up < 1 or down < 1:
raise ValueError('up and down must be >= 1')
# Determine our up and down factors
# Use a rational approimation to save computation time on really long
# signals
g_ = gcd(up, down)
up //= g_
down //= g_
if up == down == 1:
return x.copy()
n_out = x.shape[axis] * up
n_out = n_out // down + bool(n_out % down)
if isinstance(window, (list, np.ndarray)):
window = asarray(window)
if window.ndim > 1:
raise ValueError('window must be 1-D')
half_len = (window.size - 1) // 2
h = window
else:
# Design a linear-phase low-pass FIR filter
max_rate = max(up, down)
f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)
half_len = 10 * max_rate # reasonable cutoff for our sinc-like function
h = firwin(2 * half_len + 1, f_c, window=window)
h *= up
# Zero-pad our filter to put the output samples at the center
n_pre_pad = (down - half_len % down)
n_post_pad = 0
n_pre_remove = (half_len + n_pre_pad) // down
# We should rarely need to do this given our filter lengths...
while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],
up, down) < n_out + n_pre_remove:
n_post_pad += 1
h = np.concatenate((np.zeros(n_pre_pad), h, np.zeros(n_post_pad)))
ufd = _UpFIRDn(h, x.dtype, up, down)
n_pre_remove_end = n_pre_remove + n_out
def apply_remove(x):
"""Apply the upfirdn filter and remove excess"""
return ufd.apply_filter(x)[n_pre_remove:n_pre_remove_end]
y = np.apply_along_axis(apply_remove, axis, x)
return y
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
doi: 10.1063/1.3670512
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. doi: 10.1007/s00422-013-0561-7.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
doi: 10.1007/s00422-013-0560-8
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1000
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, _prod(dshape) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
See Also
--------
lfilter, lfiltic, filtfilt
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Compute an initial state `zi` for the sosfilt function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward and once
backwards. The combined filter has linear phase.
The function provides options for handling the edges of the signal.
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
Notes
-----
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# method == "pad"
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=max(len(a), len(b)))
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def _validate_pad(padtype, padlen, x, axis, ntaps):
"""Helper to validate padding for filtfilt"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
return edge, ext
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = signal.unit_impulse(700)
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos, n_sections = _validate_sos(sos)
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r, got %r.' %
(axis, x.shape, n_sections, x_zi_shape, zi.shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
"""
A forward-backward filter using cascaded second-order sections.
See `filtfilt` for more complete information about this method.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is::
3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),
(sos[:, 5] == 0).sum()))
The extra subtraction at the end attempts to compensate for poles
and zeros at the origin (e.g. for odd-order filters) to yield
equivalent estimates of `padlen` to those of `filtfilt` for
second-order section filters built with `scipy.signal` functions.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
filtfilt, sosfilt, sosfilt_zi, sosfreqz
Notes
-----
.. versionadded:: 0.18.0
"""
sos, n_sections = _validate_sos(sos)
# `method` is "pad"...
ntaps = 2 * n_sections + 1
ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=ntaps)
# These steps follow the same form as filtfilt with modifications
zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
zi_shape = [1] * x.ndim
zi_shape[axis] = 2
zi.shape = [n_sections] + zi_shape
x_0 = axis_slice(ext, stop=1, axis=axis)
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
y_0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
y = axis_reverse(y, axis=axis)
if edge > 0:
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=None):
"""
Downsample the signal after applying an anti-aliasing filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with Hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor. For downsampling factors higher than 13, it is
recommended to call `decimate` multiple times.
n : int, optional
The order of the filter (1 less than the length for 'fir'). Defaults to
8 for 'iir' and 30 for 'fir'.
ftype : str {'iir', 'fir'} or ``dlti`` instance, optional
If 'iir' or 'fir', specifies the type of lowpass filter. If an instance
of an `dlti` object, uses that object to filter before downsampling.
axis : int, optional
The axis along which to decimate.
zero_phase : bool, optional
Prevent phase shift by filtering with `filtfilt` instead of `lfilter`
when using an IIR filter, and shifting the outputs back by the filter's
group delay when using an FIR filter. A value of ``True`` is
recommended, since a phase shift is generally not desired. Using
``None`` defaults to ``False`` for backwards compatibility. This
default will change to ``True`` in a future release, so it is best to
set this argument explicitly.
.. versionadded:: 0.18.0
Returns
-------
y : ndarray
The down-sampled signal.
See Also
--------
resample : Resample up or down using the FFT method.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The ``zero_phase`` keyword was added in 0.18.0.
The possibility to use instances of ``dlti`` as ``ftype`` was added in
0.18.0.
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is not None and not isinstance(n, int):
raise TypeError("n must be an integer")
if ftype == 'fir':
if n is None:
n = 30
system = dlti(firwin(n+1, 1. / q, window='hamming'), 1.)
elif ftype == 'iir':
if n is None:
n = 8
system = dlti(*cheby1(n, 0.05, 0.8 / q))
elif isinstance(ftype, dlti):
system = ftype._as_tf() # Avoids copying if already in TF form
n = np.max((system.num.size, system.den.size)) - 1
else:
raise ValueError('invalid ftype')
if zero_phase is None:
warnings.warn(" Note: Decimate's zero_phase keyword argument will "
"default to True in a future release. Until then, "
"decimate defaults to one-way filtering for backwards "
"compatibility. Ideally, always set this argument "
"explicitly.", FutureWarning)
zero_phase = False
sl = [slice(None)] * x.ndim
if len(system.den) == 1: # FIR case
if zero_phase:
y = resample_poly(x, 1, q, axis=axis, window=system.num)
else:
# upfirdn is generally faster than lfilter by a factor equal to the
# downsampling factor, since it only calculates the needed outputs
n_out = x.shape[axis] // q + bool(x.shape[axis] % q)
y = upfirdn(system.num, x, up=1, down=q, axis=axis)
sl[axis] = slice(None, n_out, None)
else: # IIR case
if zero_phase:
y = filtfilt(system.num, system.den, x, axis=axis)
else:
y = lfilter(system.num, system.den, x, axis=axis)
sl[axis] = slice(None, None, q)
return y[sl]
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/series/test_repr.py | 8 | 5953 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, date_range)
from pandas.core.index import MultiIndex
from pandas.compat import StringIO, lrange, range, u
from pandas import compat
import pandas.util.testing as tm
from .common import TestData
class TestSeriesRepr(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(lrange(0, len(index)), index=index, name='sth')
expected = ["first second", "foo one 0",
" two 1", " three 2",
"bar one 3", " two 4",
"baz two 5", " three 6",
"qux one 7", " two 8",
" three 9", "Name: sth, dtype: int64"]
expected = "\n".join(expected)
self.assertEqual(repr(s), expected)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
# test big series (diff code path)
s = Series(lrange(0, 1000))
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
s = Series(index=date_range('20010101', '20020101'), name='test')
self.assertIn("Name: test", repr(s))
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# with Nones
ots = self.ts.astype('O')
ots[::2] = None
repr(ots)
# various names
for name in ['', 1, 1.2, 'foo', u('\u03B1\u03B2\u03B3'),
'loooooooooooooooooooooooooooooooooooooooooooooooooooong',
('foo', 'bar', 'baz'), (1, 2), ('foo', 1, 2.3),
(u('\u03B1'), u('\u03B2'), u('\u03B3')),
(u('\u03B1'), 'bar')]:
self.series.name = name
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
# 0 as name
ser = Series(np.random.randn(100), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
# tidy repr
ser = Series(np.random.randn(1001), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
ser = Series(["a\n\r\tb"], name="a\n\r\td", index=["a\n\r\tf"])
self.assertFalse("\t" in repr(ser))
self.assertFalse("\r" in repr(ser))
self.assertFalse("a\n" in repr(ser))
# with empty series (#4651)
s = Series([], dtype=np.int64, name='foo')
self.assertEqual(repr(s), 'Series([], Name: foo, dtype: int64)')
s = Series([], dtype=np.int64, name=None)
self.assertEqual(repr(s), 'Series([], dtype: int64)')
def test_tidy_repr(self):
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a) # should not raise exception
def test_repr_bool_fails(self):
s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
# it works (with no Cython exception barf)!
repr(s)
finally:
sys.stderr = tmp
self.assertEqual(buf.getvalue(), '')
def test_repr_name_iterable_indexable(self):
s = Series([1, 2, 3], name=np.int64(3))
# it works!
repr(s)
s.name = (u("\u05d0"), ) * 2
repr(s)
def test_repr_should_return_str(self):
# http://docs.python.org/py3k/reference/datamodel.html#object.__repr__
# http://docs.python.org/reference/datamodel.html#object.__repr__
# ...The return value must be a string object.
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
df = Series(data, index=index1)
self.assertTrue(type(df.__repr__() == str)) # both py2 / 3
def test_repr_max_rows(self):
# GH 6863
with pd.option_context('max_rows', None):
str(Series(range(1001))) # should not raise exception
def test_unicode_string_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
bytes(df)
else:
str(df)
def test_timeseries_repr_object_dtype(self):
index = Index([datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)], dtype=object)
ts = Series(np.random.randn(len(index)), index)
repr(ts)
ts = tm.makeTimeSeries(1000)
self.assertTrue(repr(ts).splitlines()[-1].startswith('Freq:'))
ts2 = ts.ix[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
| gpl-3.0 |
brguez/TEIBA | src/python/mergedMEI.stats.py | 1 | 23359 | #!/usr/bin/env python
#coding: utf-8
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "****", string, "****"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string,
#### CLASSES ####
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import formats
import time
import scipy.stats as stats
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
## Get user's input ##
parser = argparse.ArgumentParser(description= """""")
parser.add_argument('VCF', help='...')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
VCF = args.VCF
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "VCF: ", VCF
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
###########
## Start ##
###########
############################################
## 1. Create VCF object and read input VCF #
############################################
header("Read input VCF")
VCFObj = formats.VCF()
VCFObj.read_VCF(VCF)
########################################
## 2. Plot information about MEI calls #
########################################
## 2.1 Number of MEI per chromosome
####################################
header("Number of MEI per chromosome")
# Iniciate lists
# 24 element lists. The 24 chromosomes (1, 2, 3, 4...Y)
L1List = [0] * 24
AluList = [0] * 24
SVAList = [0] * 24
ERVKList = [0] * 24
## Count number of MEI per chromosome
for MEIObj in VCFObj.lineList:
supportingPElist = MEIObj.genotype.split(':')
totalSupportingPE = int(supportingPElist[0]) + int(supportingPElist[1])
if (MEIObj.chrom == "X"):
index = 23 - 1
elif (MEIObj.chrom == "Y"):
index = 24 - 1
else:
index = int(MEIObj.chrom) - 1
if (MEIObj.infoDict['CLASS'] == 'L1'):
L1List[index] = L1List[index] + 1
elif (MEIObj.infoDict['CLASS'] == 'Alu'):
AluList[index] = AluList[index] + 1
elif (MEIObj.infoDict['CLASS'] == 'SVA'):
SVAList[index] = SVAList[index] + 1
else:
ERVKList[index] = ERVKList[index] + 1
## Make figure
# Make bar plot
xpos = np.arange(1, 25) # the x locations for the groups
width = 0.5 # the width of the bars: can also be len(x) sequence
fig = plt.figure(figsize=(8,6))
fig.suptitle('# MEI per chromosome')
plt.ylabel('# MEI', fontsize=12)
ax = fig.add_subplot(111)
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.set_axisbelow(True)
# Plot data
p1 = ax.bar(xpos, AluList, color='#A67D3D', alpha=0.75, edgecolor='#000000', width=width, align='center')
p2 = ax.bar(xpos, L1List, color='#008000', alpha=0.75, edgecolor='#000000', width=width, align='center',
bottom=AluList)
p3 = ax.bar(xpos, SVAList, color='#87CEFA', alpha=0.75, edgecolor='#000000', width=width, align='center',
bottom=[i+j for i,j in zip(AluList, L1List)])
p4 = ax.bar(xpos, ERVKList, color='#ff0000', alpha=0.75, edgecolor='#000000', width=width, align='center',
bottom=[i+j+z for i,j,z in zip(AluList, L1List, SVAList)])
## Customize ticks
chrList = range(1, 23)
chrList = chrList + ['X', 'Y']
plt.yticks(np.arange(0, 2001, 100))
plt.xticks(xpos, chrList)
# Rotate X ticks:
locs, labels = plt.xticks()
plt.setp(labels, rotation=30)
## Make legend
circle1 = mpatches.Circle((0, 0), 5, color='#A67D3D', alpha=0.75)
circle2 = mpatches.Circle((0, 0), 5, color='#008000', alpha=0.75)
circle3 = mpatches.Circle((0, 0), 5, color='#87CEFA', alpha=0.75)
circle4 = mpatches.Circle((0, 0), 5, color='#ff0000', alpha=0.75)
plt.figlegend((circle1, circle2, circle3, circle4), ('ALU', 'L1', 'SVA', 'ERVK'), loc = 'lower center', ncol=4, labelspacing=0., fontsize=8, fancybox=True )
## Save figure
fileName = outDir + "/PCAWG_MEIperChr_barPlot.pdf"
plt.savefig(fileName)
## 2.2 MEI total number of supporting discordant paired-ends histogram
#######################################################################
header("Total number of discordant paired-ends histogram")
## Gather data
L1PElist = []
AluPElist = []
SVAPElist = []
ERVKPElist = []
for MEIObj in VCFObj.lineList:
supportingPElist = MEIObj.genotype.split(':')
totalSupportingPE = int(supportingPElist[0]) + int(supportingPElist[1])
if (MEIObj.infoDict['CLASS'] == 'L1'):
L1PElist.append(totalSupportingPE)
elif (MEIObj.infoDict['CLASS'] == 'Alu'):
AluPElist.append(totalSupportingPE)
elif (MEIObj.infoDict['CLASS'] == 'SVA'):
SVAPElist.append(totalSupportingPE)
else:
ERVKPElist.append(totalSupportingPE)
#### Make plot
fig = plt.figure(figsize=(20,14))
fig.suptitle('Total discordant paired-end support', fontsize=20)
### A) L1
## Make plot
ax1 = fig.add_subplot(2, 2, 1)
ax1.set_title("LINE-1", fontsize=16)
plt.hist(L1PElist, bins=100, color='#008000', alpha=0.75)
plt.xlabel("# discordant paired-end", fontsize=14)
plt.ylabel("# MEI")
plt.xlim(0, 250)
plt.ylim(0, 325)
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
## Customize ticks
plt.xticks(np.arange(0, 251, 10))
locs, labels = plt.xticks()
plt.setp(labels, rotation=30)
plt.yticks(np.arange(0, 326, 20))
### B) ALU
## Make plot
ax2 = fig.add_subplot(2, 2, 2)
ax2.set_title("ALU", fontsize=16)
plt.hist(AluPElist, bins=125, color='#008000', alpha=0.75)
plt.xlabel("# discordant paired-end", fontsize=14)
plt.ylabel("# MEI")
plt.xlim(0, 250)
plt.ylim(0, 2500)
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax2.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax2.set_axisbelow(True)
## Customize ticks
plt.xticks(np.arange(0, 251, 10))
locs, labels = plt.xticks()
plt.setp(labels, rotation=30)
plt.yticks(np.arange(0, 2501, 200))
### C) SVA
## Make plot
ax3 = fig.add_subplot(2, 2, 3)
ax3.set_title("SVA", fontsize=16)
plt.hist(SVAPElist, bins=100, color='#008000', alpha=0.75)
plt.xlabel("# discordant paired-end", fontsize=14)
plt.ylabel("# MEI")
plt.xlim(0, 250)
plt.ylim(0, 75)
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax3.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax3.set_axisbelow(True)
## Customize ticks
plt.xticks(np.arange(0, 251, 10))
locs, labels = plt.xticks()
plt.setp(labels, rotation=30)
plt.yticks(np.arange(0, 75, 5))
### D) ERVK
## Make plot
ax4 = fig.add_subplot(2, 2, 4)
ax4.set_title("ERVK", fontsize=16)
plt.hist(ERVKPElist, bins=75, color='#008000', alpha=0.75)
plt.xlabel("# discordant paired-end", fontsize=14)
plt.ylabel("# MEI")
plt.xlim(0, 250)
plt.ylim(0, 6)
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax4.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax4.set_axisbelow(True)
## Customize ticks
plt.xticks(np.arange(0, 251, 14))
locs, labels = plt.xticks()
plt.setp(labels, rotation=30)
plt.yticks(np.arange(0, 6, 1))
##### Save figure
fileName = outDir + "/PCAWG_totalSupportingPE_hist.pdf"
plt.savefig(fileName)
## 2.3 Number of + cluster vs - cluster discordant paired-ends scatterplot
###########################################################################
header("Number of + and - cluster supporting paired-ends scatterplots")
## Gather data
L1PEtuplelist = []
AluPEtuplelist = []
SVAPEtuplelist = []
ERVKPEtuplelist = []
for MEIObj in VCFObj.lineList:
supportingPEtuple = tuple(map(int, MEIObj.genotype.split(':')))
if (MEIObj.infoDict['CLASS'] == 'L1'):
L1PEtuplelist.append(supportingPEtuple)
elif (MEIObj.infoDict['CLASS'] == 'Alu'):
AluPEtuplelist.append(supportingPEtuple)
elif (MEIObj.infoDict['CLASS'] == 'SVA'):
SVAPEtuplelist.append(supportingPEtuple)
else:
ERVKPEtuplelist.append(supportingPEtuple)
### Make plot
fig = plt.figure(figsize=(10,14))
fig.suptitle('Number of discordant paired-ends correlation', fontsize=18)
## L1
tmpList = map(list, zip(*L1PEtuplelist))
L1plusPE = tmpList[0]
L1minusPE = tmpList[1]
# Compute correlation
corr = stats.pearsonr(L1plusPE, L1minusPE)
coefficient = format(corr[0], '.3f')
pvalue = corr[1]
text = 'pearson_corr: ' + str(coefficient)
# Make scatterplot
ax1 = fig.add_subplot(2, 2, 1)
ax1.set_title("LINE-1", fontsize=14)
plt.scatter(L1plusPE, L1minusPE, color='#008000', alpha=.4)
plt.xlim((0, (max(L1plusPE) + 10)))
plt.ylim((0, (max(L1minusPE) + 10)))
plt.xlabel('+ cluster', fontsize=12)
plt.ylabel('- cluster', fontsize=12)
ax1.text(0.5, 0.1, text, transform = ax1.transAxes)
## Alu
tmpList = map(list, zip(*AluPEtuplelist))
AluPlusPE = tmpList[0]
AluMinusPE = tmpList[1]
# Compute correlation
corr = stats.pearsonr(AluPlusPE, AluMinusPE)
coefficient = format(corr[0], '.3f')
pvalue = corr[1]
text = 'pearson_corr: ' + str(coefficient)
# Make scatterplot
ax2 = fig.add_subplot(2, 2, 2)
ax2.set_title("ALU", fontsize=14)
plt.scatter(AluPlusPE, AluMinusPE, color='#008000', alpha=.4)
plt.xlim((0, (max(AluPlusPE) + 10)))
plt.ylim((0, (max(AluMinusPE) + 10)))
plt.xlabel('+ cluster', fontsize=12)
plt.ylabel('- cluster', fontsize=12)
ax2.text(0.5, 0.1, text, transform = ax2.transAxes)
## SVA
tmpList = map(list, zip(*SVAPEtuplelist))
SVAPlusPE = tmpList[0]
SVAMinusPE = tmpList[1]
# Compute correlation
corr = stats.pearsonr(SVAPlusPE, SVAMinusPE)
coefficient = format(corr[0], '.3f')
pvalue = corr[1]
text = 'pearson_corr: ' + str(coefficient)
# Make scatterplot
ax3 = fig.add_subplot(2, 2, 3)
ax3.set_title("SVA", fontsize=14)
plt.scatter(SVAPlusPE, SVAMinusPE, color='#008000', alpha=.4)
plt.xlim((0, (max(SVAPlusPE) + 10)))
plt.ylim((0, (max(SVAMinusPE) + 10)))
plt.xlabel('+ cluster', fontsize=12)
plt.ylabel('- cluster', fontsize=12)
ax3.text(0.5, 0.1, text, transform = ax3.transAxes)
## ERVK
tmpList = map(list, zip(*ERVKPEtuplelist))
ERVKPlusPE = tmpList[0]
ERVKMinusPE = tmpList[1]
# Compute correlation
corr = stats.pearsonr(ERVKPlusPE, ERVKMinusPE)
coefficient = format(corr[0], '.3f')
pvalue = corr[1]
text = 'pearson_corr: ' + str(coefficient)
# Make scatterplot
ax4 = fig.add_subplot(2, 2, 4)
ax4.set_title("ERVK", fontsize=14)
plt.scatter(ERVKPlusPE, ERVKMinusPE, color='#008000', alpha=.4)
plt.xlim((0, (max(ERVKPlusPE) + 10)))
plt.ylim((0, (max(ERVKMinusPE) + 10)))
plt.xlabel('+ cluster', fontsize=12)
plt.ylabel('- cluster', fontsize=12)
ax4.text(0.5, 0.1, text, transform = ax4.transAxes)
## Save figure
fileName = outDir + "/PCAWG_discordantPE_clusters_correlation.pdf"
plt.savefig(fileName)
## 2.4 MEI TSD length histogram
################################
header("MEI TSD length histogram")
## Gather data
L1TSDlist = []
AluTSDlist = []
SVATSDlist = []
for MEIObj in VCFObj.lineList:
if (MEIObj.infoDict['CLASS'] == 'L1'):
L1TSDlist.append(int(MEIObj.infoDict['TSLEN']))
elif (MEIObj.infoDict['CLASS'] == 'Alu'):
AluTSDlist.append(int(MEIObj.infoDict['TSLEN']))
elif (MEIObj.infoDict['CLASS'] == 'SVA'):
SVATSDlist.append(int(MEIObj.infoDict['TSLEN']))
#### Make plot
fig = plt.figure(figsize=(14,12))
fig.suptitle('Target site duplication (TSD)', fontsize=20)
### A) L1
## Make plot
ax1 = fig.add_subplot(2, 2, 1)
ax1.set_title("LINE-1", fontsize=16)
plt.hist(L1TSDlist, bins=75, color='#008000', alpha=0.75)
plt.xlabel("TSD length", fontsize=14)
plt.ylabel("# MEI")
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
## Customize ticks
plt.xticks(np.arange(0, 100, 5))
locs, labels = plt.xticks()
plt.setp(labels, rotation=30)
### B) ALU
## Make plot
ax2 = fig.add_subplot(2, 2, 2)
ax2.set_title("ALU", fontsize=16)
plt.hist(AluTSDlist, bins=75, color='#008000', alpha=0.75)
plt.xlabel("TSD length", fontsize=14)
plt.ylabel("# MEI")
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax2.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax2.set_axisbelow(True)
## Customize ticks
plt.xticks(np.arange(0, 100, 5))
locs, labels = plt.xticks()
plt.setp(labels, rotation=30)
### C) SVA
## Make plot
ax3 = fig.add_subplot(2, 2, 3)
ax3.set_title("SVA", fontsize=16)
plt.hist(SVATSDlist, bins=75, color='#008000', alpha=0.75)
plt.xlabel("TSD length", fontsize=14)
plt.ylabel("# MEI")
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax3.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax3.set_axisbelow(True)
## Customize ticks
plt.xticks(np.arange(0, 100, 5))
locs, labels = plt.xticks()
plt.setp(labels, rotation=30)
##### Save figure
fileName = outDir + "/PCAWG_TSDlen_hist.pdf"
plt.savefig(fileName)
## 2.5 MEI strand bar plot
###########################
header("MEI strand bar plot")
## Gather data
L1StrandList = []
AluStrandList = []
SVAStrandList = []
ERVKStrandList = []
for MEIObj in VCFObj.lineList:
if (MEIObj.infoDict['CLASS'] == 'L1'):
L1StrandList.append(MEIObj.infoDict['STRAND'])
elif (MEIObj.infoDict['CLASS'] == 'Alu'):
AluStrandList.append(MEIObj.infoDict['STRAND'])
elif (MEIObj.infoDict['CLASS'] == 'SVA'):
SVAStrandList.append(MEIObj.infoDict['STRAND'])
else:
ERVKStrandList.append(MEIObj.infoDict['STRAND'])
# Compute the number of times the MEI is inserted in + and - for each MEI type
nbPlusMinusL1 = [L1StrandList.count(i) for i in set(L1StrandList)]
nbPlusMinusAlu = [AluStrandList.count(i) for i in set(AluStrandList)]
nbPlusMinusSVA = [SVAStrandList.count(i) for i in set(SVAStrandList)]
nbPlusMinusERVK = [ERVKStrandList.count(i) for i in set(ERVKStrandList)]
# Convert into percentages.
percPlusMinusL1 = [float(i)/sum(nbPlusMinusL1)*100 for i in nbPlusMinusL1]
percPlusMinusAlu = [float(i)/sum(nbPlusMinusAlu)*100 for i in nbPlusMinusAlu]
percPlusMinusSVA = [float(i)/sum(nbPlusMinusSVA)*100 for i in nbPlusMinusSVA]
percPlusMinusERVK = [float(i)/sum(nbPlusMinusERVK)*100 for i in nbPlusMinusERVK]
# Put percentages into two lists
tmpList = map(list, zip(percPlusMinusL1, percPlusMinusAlu, percPlusMinusSVA, percPlusMinusERVK))
plusList = tmpList[0]
minusList = tmpList[1]
## Make figure
# Make bar plot
xpos = np.arange(4) # the x locations for the groups
width = 0.5 # the width of the bars: can also be len(x) sequence
fig = plt.figure(figsize=(5,6))
fig.suptitle('MEI DNA strand')
plt.ylabel('%', fontsize=12)
ax = fig.add_subplot(111)
p1 = ax.bar(xpos, plusList, color='#A67D3D', alpha=0.75, edgecolor='#000000', width=width, align='center')
p2 = ax.bar(xpos, minusList, color='#008000', alpha=0.75, edgecolor='#000000', width=width, align='center',
bottom=plusList)
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.set_axisbelow(True)
## Customize ticks
plt.yticks(np.arange(0, 101, 10))
plt.xticks(xpos, ('L1', 'ALU', 'SVA', 'ERVK'))
## Make legend
circle1 = mpatches.Circle((0, 0), 5, color='#A67D3D', alpha=0.75)
circle2 = mpatches.Circle((0, 0), 5, color='#008000', alpha=0.75)
plt.figlegend((circle1, circle2), ('+ strand', '- strand'), loc = 'lower center', ncol=2, labelspacing=0., fontsize=8, fancybox=True )
## Save figure
fileName = outDir + "/PCAWG_strand_barPlot.pdf"
plt.savefig(fileName)
## 2.6 MEI structure bar plot
##############################
header("MEI structure bar plot")
## Gather data
L1StructureList = []
AluStructureList = []
SVAStructureList = []
for MEIObj in VCFObj.lineList:
if (MEIObj.infoDict['CLASS'] == 'L1'):
L1StructureList.append(MEIObj.infoDict['STRUCT'])
elif (MEIObj.infoDict['CLASS'] == 'Alu'):
AluStructureList.append(MEIObj.infoDict['STRUCT'])
elif (MEIObj.infoDict['CLASS'] == 'SVA'):
SVAStructureList.append(MEIObj.infoDict['STRUCT'])
# Compute the number of times the MEI are inserted in + and - for each MEI type
nbInvFullDelL1 = [L1StructureList.count(i) for i in set(L1StructureList)]
nbInvFullDelAlu = [AluStructureList.count(i) for i in set(AluStructureList)]
nbInvFullDelSVA = [SVAStructureList.count(i) for i in set(SVAStructureList)]
# Convert into percentages.
percInvFullDelL1 = [float(i)/sum(nbInvFullDelL1)*100 for i in nbInvFullDelL1]
percInvFullDelAlu = [float(i)/sum(nbInvFullDelAlu)*100 for i in nbInvFullDelAlu]
percInvFullDelSVA = [float(i)/sum(nbInvFullDelSVA)*100 for i in nbInvFullDelSVA]
# Put percentages into two lists
tmpList = map(list, zip(percInvFullDelL1, percInvFullDelAlu, percInvFullDelSVA))
invList = tmpList[0]
delList = tmpList[2]
fullList = tmpList[1]
## Make figure
# Make bar plot
xpos = np.arange(3) # the x locations for the groups
width = 0.5 # the width of the bars: can also be len(x) sequence
fig = plt.figure(figsize=(5,6))
fig.suptitle('MEI structure')
plt.ylabel('%', fontsize=12)
ax = fig.add_subplot(111)
p1 = ax.bar(xpos, invList, color='#A67D3D', alpha=0.75, edgecolor='#000000', width=width, align='center')
p2 = ax.bar(xpos, delList, color='#008000', alpha=0.75, edgecolor='#000000', width=width, align='center',
bottom=invList)
p3 = ax.bar(xpos, fullList, color='#FFA500', alpha=0.75, edgecolor='#000000', width=width, align='center',
bottom=[i+j for i,j in zip(invList, delList)])
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.set_axisbelow(True)
## Customize ticks
plt.yticks(np.arange(0, 101, 10))
plt.xticks(xpos, ('L1', 'ALU', 'SVA'))
## Make legend
circle1 = mpatches.Circle((0, 0), 5, color='#A67D3D', alpha=0.75)
circle2 = mpatches.Circle((0, 0), 5, color='#008000', alpha=0.75)
circle3 = mpatches.Circle((0, 0), 5, color='#FFA500', alpha=0.75)
plt.figlegend((circle1, circle2, circle3), ('5\' Inverted', '5\' Truncated', 'Full-length'), loc = 'lower center', ncol=3, labelspacing=0., fontsize=8, fancybox=True )
## Save figure
fileName = outDir + "/PCAWG_structure_barPlot.pdf"
plt.savefig(fileName)
## 2.7 MEI in CPG
###################
header("MEI CPG region bar plot")
## Gather data
TSGRegionList = []
oncogeneRegionList = []
bothRegionList = []
test = []
genes = []
for MEIObj in VCFObj.lineList:
# Nota: number of CPG genes inconsistent with input VCf. Investigate...
if 'CPG' in MEIObj.infoDict:
test.append(MEIObj)
# A) Tumor suppresor gene (TSG)
if (MEIObj.infoDict['ROLE'] == 'TSG'):
TSGRegionList.append(MEIObj.infoDict['REGION'])
# B) Oncogene
elif (MEIObj.infoDict['ROLE'] == 'oncogene'):
oncogeneRegionList.append(MEIObj.infoDict['REGION'])
# C) Both
else:
bothRegionList.append(MEIObj.infoDict['REGION'])
# Compute the number of times the MEI are inserted in the different genic regions (Only introns for oncogene and both. For TSG two in UTR)
nbIntronUtrTSG = [TSGRegionList.count(i) for i in set(TSGRegionList)]
nbIntronUtrOncogene = [oncogeneRegionList.count(i) for i in set(oncogeneRegionList)]
nbIntronUtrBoth = [bothRegionList.count(i) for i in set(bothRegionList)]
## Add 0 UTR to the oncogene and both list
nbIntronUtrOncogene.append(0)
nbIntronUtrBoth.append(0)
# Put percentages into two lists
tmpList = map(list, zip(nbIntronUtrTSG, nbIntronUtrOncogene, nbIntronUtrBoth))
intronList = tmpList[0]
utrList = tmpList[1]
## Make figure
# Make bar plot
xpos = np.arange(3) # the x locations for the groups
width = 0.5 # the width of the bars: can also be len(x) sequence
fig = plt.figure(figsize=(5,6))
fig.suptitle('MEI in CPG')
plt.ylabel('# MEI', fontsize=12)
ax = fig.add_subplot(111)
p1 = ax.bar(xpos, intronList, color='#A67D3D', alpha=0.75, edgecolor='#000000', width=width, align='center')
p2 = ax.bar(xpos, utrList, color='#008000', alpha=0.75, edgecolor='#000000', width=width, align='center',
bottom=intronList)
plt.ylim(0, 50)
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.set_axisbelow(True)
## Customize ticks
plt.yticks(np.arange(0, 51, 5))
plt.xticks(xpos, ('TSG', 'ONCOGENE', 'BOTH'), fontsize=12)
## Make legend
circle1 = mpatches.Circle((0, 0), 5, color='#A67D3D', alpha=0.75)
circle2 = mpatches.Circle((0, 0), 5, color='#008000', alpha=0.75)
plt.figlegend((circle1, circle2), ('INTRON', '3\' UTR'), loc = 'lower center', ncol=2, labelspacing=0., fontsize=8, fancybox=True )
## Save figure
fileName = outDir + "/PCAWG_CPG_barPlot.pdf"
plt.savefig(fileName)
## 2.8 MEI insertion region pie chart
#######################################
header("MEI functional spectrum pie chart")
## Gather data
regionsList = []
for MEIObj in VCFObj.lineList:
if (MEIObj.infoDict['REGION']=="splicing") or (MEIObj.infoDict['REGION']=="upstream,downstream") or (MEIObj.infoDict['REGION']=="upstream") or (MEIObj.infoDict['REGION']=="downstream"):
region = "Other"
elif (MEIObj.infoDict['REGION']=="UTR5") or (MEIObj.infoDict['REGION']=="UTR3"):
region = "UTR"
elif (MEIObj.infoDict['REGION']=="ncRNA_exonic") or (MEIObj.infoDict['REGION']=="ncRNA_intronic"):
region = "ncRNA"
else:
region = MEIObj.infoDict['REGION']
regionsList.append(region)
regionTuples = [(x, int(regionsList.count(x))) for x in set(regionsList)]
regionList = [list(t) for t in zip(*regionTuples)]
labels = regionList[0]
sizes = regionList[1]
## Make plot
fig = plt.figure(figsize=(6,6))
fig.suptitle('MEI functional spectrum', fontsize=16)
colors = ['#008000', '#A67D3D', '#87CEFA', '#ff0000', '#FFD700', '#FFA500']
patches, texts, perc = plt.pie(sizes, colors=colors, startangle=90, autopct='%1.1f%%', pctdistance=1.2, labeldistance=1)
plt.legend(patches, labels, loc="best", fontsize=11)
##### Save figure
fileName = outDir + "/PCAWG_funcSpectrum_piechart.pdf"
plt.savefig(fileName)
## End ##
print
print "***** Finished! *****"
print
| gpl-3.0 |
georgetown-analytics/nba-tracking | ModelFitting/single_Kmeans.py | 1 | 3449 | """
In this case, imputing helps the classifier get close to the original score.
Cross Validation Link:
http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_digits.html#sphx-glr-auto-examples-model-selection-grid-search-digits-py
"""
##############################
# Module imports and dependencies
##############################
import numpy as np
import pandas as pd
import pickle
import os
import sklearn
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.cluster import KMeans
##############################
# Import Dataframe and Explore
##############################
# We have an existing pickle.pickle, which is a dataframe of our cleaned SportsVU elements
#dir = os.path.dirname(__file__) #dirname'
os.chdir('/Users/johnniefields/nba-tracking2/nba-tracking/')
filename = os.path.join(os.getcwd(), 'data','pickle.pickle')
filename = 'pickle.pickle'
df= pd.read_pickle(filename)
df.columns
##############################
# Prepare Data for Clustering
##############################
#player id is the person who took the shot, as file has already been subset to only shot taking events.
testlist = ['PLAYER1_ID', 'PERSON3TYPE']
broketest = df[testlist]
outcomes = df['PLAYER1_ID']
outcomes = outcomes.astype('category')
#need to identify tuples for our columns.
def gen_predictors(df):
colnames_pred = []
for i in df.columns:
if isinstance(i, tuple):
colnames_pred.append(i)
colnames_pred = colnames_pred[2:]
len(colnames_pred)
####Imputing values for players in bench
predictors = df[colnames_pred]
type(predictors)
predictors.fillna(-100, inplace= True)
return predictors
predictors = gen_predictors(df)
df.max()
df.min()
type(predictors)
#==============================================================================
# Clustering using Kmeans
#==============================================================================
#defining the object that will carry out the kmeans clustering part.
KMeans_object = KMeans(init = 'k-means++', n_clusters = 17, n_init= 10)
#doing the kmeans clustering
KMeans_object.fit(predictors)
#printing the inertia
print('The Inertia is:', KMeans_object.inertia_)
#Calculating the Homogenity score
print('Homogenity Score is:', metrics.homogeneity_score(outcomes, KMeans_object.labels_))
#Calculating the Completeness Score
print('Completeness Score is:', metrics.completeness_score(outcomes, KMeans_object.labels_))
#calculating the V-measure score
print('V-Measure Score is:', metrics.v_measure_score(outcomes, KMeans_object.labels_))
#calculating the adjusted rand score
print('Adjusted Rand Score is:', metrics.adjusted_rand_score(outcomes, KMeans_object.labels_))
#calculating the adjusted mutual information score
print('Adjusted Mututal Info Score is:', metrics.adjusted_mutual_info_score(outcomes, KMeans_object.labels_))
#Calculating the SIlhoutte Score. We are not sampling the dataset to calculate it.
print('Silhoutte Score is:', metrics.silhouette_score(predictors2, KMeans_object.labels_, metric='euclidean'))
| mit |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/factorization/python/ops/gmm.py | 30 | 7114 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering using tf.Learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops as logging
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.training import session_run_hook
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
class GMM(estimator.Estimator):
"""An estimator for GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
config=None):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
config: See Estimator
"""
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
super(GMM, self).__init__(
model_fn=self._model_builder(), model_dir=model_dir, config=config)
def predict_assignments(self, input_fn=None, batch_size=None, outputs=None):
"""See BaseEstimator.predict."""
results = self.predict(input_fn=input_fn,
batch_size=batch_size,
outputs=outputs)
for result in results:
yield result[GMM.ASSIGNMENTS]
def score(self, input_fn=None, batch_size=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
batch_size: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
results = self.evaluate(input_fn=input_fn, batch_size=batch_size,
steps=steps)
return np.sum(results[GMM.SCORES])
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())],
1)
return features
def _model_builder(self):
"""Creates a model function."""
def _model_fn(features, labels, mode, config):
"""Model function."""
assert labels is None, labels
(all_scores,
model_predictions,
losses, training_op,
init_op,
is_initialized) = gmm_ops.gmm(self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters, self._random_seed,
self._covariance_type,
self._params)
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
predictions = {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
eval_metric_ops = {
GMM.SCORES: _streaming_sum(loss),
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss, train_op=training_op,
training_hooks=training_hooks)
return _model_fn
| mit |
PythonCharmers/orange3 | Orange/widgets/evaluate/owrocanalysis.py | 1 | 27749 | """
ROC Analysis Widget
-------------------
"""
import operator
from functools import reduce, wraps
from collections import namedtuple, deque
import numpy
import sklearn.metrics as skl_metrics
from PyQt4 import QtGui
from PyQt4.QtGui import QColor, QPen, QBrush
from PyQt4.QtCore import Qt
import pyqtgraph as pg
import Orange
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import colorpalette, colorbrewer
from Orange.widgets.io import FileFormat
#: Points on a ROC curve
ROCPoints = namedtuple(
"ROCPoints",
["fpr", # (N,) array of false positive rate coordinates (ascending)
"tpr", # (N,) array of true positive rate coordinates
"thresholds" # (N,) array of thresholds (in descending order)
]
)
ROCPoints.is_valid = property(lambda self: self.fpr.size > 0)
#: ROC Curve and it's convex hull
ROCCurve = namedtuple(
"ROCCurve",
["points", # ROCPoints
"hull" # ROCPoints of the convex hull
]
)
ROCCurve.is_valid = property(lambda self: self.points.is_valid)
#: A ROC Curve averaged vertically
ROCAveragedVert = namedtuple(
"ROCAveragedVert",
["points", # ROCPoints sampled by fpr
"hull", # ROCPoints of the convex hull
"tpr_std", # array standard deviation of tpr at each fpr point
]
)
ROCAveragedVert.is_valid = property(lambda self: self.points.is_valid)
#: A ROC Curve averaged by thresholds
ROCAveragedThresh = namedtuple(
"ROCAveragedThresh",
["points", # ROCPoints sampled by threshold
"hull", # ROCPoints of the convex hull
"tpr_std", # array standard deviations of tpr at each threshold
"fpr_std" # array standard deviations of fpr at each threshold
]
)
ROCAveragedThresh.is_valid = property(lambda self: self.points.is_valid)
#: Combined data for a ROC curve of a single algorithm
ROCData = namedtuple(
"ROCData",
["merged", # ROCCurve merged over all folds
"folds", # ROCCurve list, one for each fold
"avg_vertical", # ROCAveragedVert
"avg_threshold", # ROCAveragedThresh
]
)
def ROCData_from_results(results, clf_index, target):
"""
Compute ROC Curve(s) from evaluation results.
:param Orange.evaluation.Results results:
Evaluation results.
:param int clf_index:
Learner index in the `results`.
:param int target:
Target class index (i.e. positive class).
:rval ROCData:
A instance holding the computed curves.
"""
merged = roc_curve_for_fold(results, slice(0, -1), clf_index, target)
merged_curve = ROCCurve(ROCPoints(*merged),
ROCPoints(*roc_curve_convex_hull(merged)))
folds = results.folds if results.folds is not None else [slice(0, -1)]
fold_curves = []
for fold in folds:
# TODO: Check for no FP or no TP
points = roc_curve_for_fold(results, fold, clf_index, target)
hull = roc_curve_convex_hull(points)
c = ROCCurve(ROCPoints(*points), ROCPoints(*hull))
fold_curves.append(c)
curves = [fold.points for fold in fold_curves
if fold.is_valid]
fpr, tpr, std = roc_curve_vertical_average(curves)
thresh = numpy.zeros_like(fpr) * numpy.nan
hull = roc_curve_convex_hull((fpr, tpr, thresh))
v_avg = ROCAveragedVert(
ROCPoints(fpr, tpr, thresh),
ROCPoints(*hull),
std
)
all_thresh = numpy.hstack([t for _, _, t in curves])
all_thresh = numpy.clip(all_thresh, 0.0 - 1e-10, 1.0 + 1e-10)
all_thresh = numpy.unique(all_thresh)[::-1]
thresh = all_thresh[::max(all_thresh.size // 10, 1)]
(fpr, fpr_std), (tpr, tpr_std) = \
roc_curve_threshold_average(curves, thresh)
hull = roc_curve_convex_hull((fpr, tpr, thresh))
t_avg = ROCAveragedThresh(
ROCPoints(fpr, tpr, thresh),
ROCPoints(*hull),
tpr_std,
fpr_std
)
return ROCData(merged_curve, fold_curves, v_avg, t_avg)
ROCData.from_results = staticmethod(ROCData_from_results)
#: A curve item to be displayed in a plot
PlotCurve = namedtuple(
"PlotCurve",
["curve", # ROCCurve source curve
"curve_item", # pg.PlotDataItem main curve
"hull_item" # pg.PlotDataItem curve's convex hull
]
)
def plot_curve(curve, pen=None, shadow_pen=None, symbol="+",
symbol_size=3, name=None):
"""
Construct a `PlotCurve` for the given `ROCCurve`.
:param ROCCurve curve:
Source curve.
The other parameters are passed to pg.PlotDataItem
:rtype: PlotCurve
"""
def extend_to_origin(points):
"Extend ROCPoints to include coordinate origin if not already present"
if points.tpr.size and (points.tpr[0] > 0 or points.fpr[0] > 0):
points = ROCPoints(
numpy.r_[0, points.fpr], numpy.r_[0, points.tpr],
numpy.r_[points.thresholds[0] + 1, points.thresholds]
)
return points
points = extend_to_origin(curve.points)
item = pg.PlotCurveItem(
points.fpr, points.tpr, pen=pen, shadowPen=shadow_pen,
name=name, antialias=True
)
sp = pg.ScatterPlotItem(
curve.points.fpr, curve.points.tpr, symbol=symbol,
size=symbol_size, pen=shadow_pen,
name=name
)
sp.setParentItem(item)
hull = extend_to_origin(curve.hull)
hull_item = pg.PlotDataItem(
hull.fpr, hull.tpr, pen=pen, antialias=True
)
return PlotCurve(curve, item, hull_item)
PlotCurve.from_roc_curve = staticmethod(plot_curve)
#: A curve displayed in a plot with error bars
PlotAvgCurve = namedtuple(
"PlotAvgCurve",
["curve", # ROCCurve
"curve_item", # pg.PlotDataItem
"hull_item", # pg.PlotDataItem
"confint_item", # pg.ErrorBarItem
]
)
def plot_avg_curve(curve, pen=None, shadow_pen=None, symbol="+",
symbol_size=4, name=None):
"""
Construct a `PlotAvgCurve` for the given `curve`.
:param curve: Source curve.
:type curve: ROCAveragedVert or ROCAveragedThresh
The other parameters are passed to pg.PlotDataItem
:rtype: PlotAvgCurve
"""
pc = plot_curve(curve, pen=pen, shadow_pen=shadow_pen, symbol=symbol,
symbol_size=symbol_size, name=name)
points = curve.points
if isinstance(curve, ROCAveragedVert):
tpr_std = curve.tpr_std
error_item = pg.ErrorBarItem(
x=points.fpr[1:-1], y=points.tpr[1:-1],
height=2 * tpr_std[1:-1],
pen=pen, beam=0.025,
antialias=True,
)
elif isinstance(curve, ROCAveragedThresh):
tpr_std, fpr_std = curve.tpr_std, curve.fpr_std
error_item = pg.ErrorBarItem(
x=points.fpr[1:-1], y=points.tpr[1:-1],
height=2 * tpr_std[1:-1], width=2 * fpr_std[1:-1],
pen=pen, beam=0.025,
antialias=True,
)
return PlotAvgCurve(curve, pc.curve_item, pc.hull_item, error_item)
PlotAvgCurve.from_roc_curve = staticmethod(plot_avg_curve)
Some = namedtuple("Some", ["val"])
def once(f):
"""
Return a function that will be called only once, and it's result cached.
"""
cached = None
@wraps(f)
def wraped():
nonlocal cached
if cached is None:
cached = Some(f())
return cached.val
return wraped
plot_curves = namedtuple(
"plot_curves",
["merge", # :: () -> PlotCurve
"folds", # :: () -> [PlotCurve]
"avg_vertical", # :: () -> PlotAvgCurve
"avg_threshold", # :: () -> PlotAvgCurve
]
)
class InfiniteLine(pg.InfiniteLine):
"""pyqtgraph.InfiniteLine extended to support antialiasing.
"""
def __init__(self, pos=None, angle=90, pen=None, movable=False,
bounds=None, antialias=False):
super().__init__(pos, angle, pen, movable, bounds)
self.antialias = antialias
def paint(self, painter, *args):
if self.antialias:
painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
super().paint(painter, *args)
class OWROCAnalysis(widget.OWWidget):
name = "ROC Analysis"
description = "Display Receiver Operating Characteristics curve " \
"based on evaluation of classifiers."
icon = "icons/ROCAnalysis.svg"
priority = 1010
inputs = [("Evaluation Results", Orange.evaluation.Results, "set_results")]
target_index = settings.Setting(0)
selected_classifiers = []
display_perf_line = settings.Setting(True)
display_def_threshold = settings.Setting(True)
fp_cost = settings.Setting(500)
fn_cost = settings.Setting(500)
target_prior = settings.Setting(50.0)
#: ROC Averaging Types
Merge, Vertical, Threshold, NoAveraging = 0, 1, 2, 3
roc_averaging = settings.Setting(Merge)
display_convex_hull = settings.Setting(False)
display_convex_curve = settings.Setting(False)
want_graph = True
def __init__(self):
super().__init__()
self.results = None
self.classifier_names = []
self.perf_line = None
self.colors = []
self._curve_data = {}
self._plot_curves = {}
self._rocch = None
self._perf_line = None
box = gui.widgetBox(self.controlArea, "Plot")
tbox = gui.widgetBox(box, "Target Class")
tbox.setFlat(True)
self.target_cb = gui.comboBox(
tbox, self, "target_index", callback=self._on_target_changed,
contentsLength=8)
cbox = gui.widgetBox(box, "Classifiers")
cbox.setFlat(True)
self.classifiers_list_box = gui.listBox(
cbox, self, "selected_classifiers", "classifier_names",
selectionMode=QtGui.QListView.MultiSelection,
callback=self._on_classifiers_changed)
abox = gui.widgetBox(box, "Combine ROC Curves From Folds")
abox.setFlat(True)
gui.comboBox(abox, self, "roc_averaging",
items=["Merge predictions from folds", "Mean TP rate",
"Mean TP and FP at threshold", "Show individual curves"],
callback=self._replot)
hbox = gui.widgetBox(box, "ROC Convex Hull")
hbox.setFlat(True)
gui.checkBox(hbox, self, "display_convex_curve",
"Show convex ROC curves", callback=self._replot)
gui.checkBox(hbox, self, "display_convex_hull",
"Show ROC convex hull", callback=self._replot)
box = gui.widgetBox(self.controlArea, "Analysis")
gui.checkBox(box, self, "display_def_threshold",
"Default threshold (0.5) point",
callback=self._on_display_def_threshold_changed)
gui.checkBox(box, self, "display_perf_line", "Show performance line",
callback=self._on_display_perf_line_changed)
grid = QtGui.QGridLayout()
ibox = gui.indentedBox(box, orientation=grid)
sp = gui.spin(box, self, "fp_cost", 1, 1000, 10,
callback=self._on_display_perf_line_changed)
grid.addWidget(QtGui.QLabel("FP Cost"), 0, 0)
grid.addWidget(sp, 0, 1)
sp = gui.spin(box, self, "fn_cost", 1, 1000, 10,
callback=self._on_display_perf_line_changed)
grid.addWidget(QtGui.QLabel("FN Cost"))
grid.addWidget(sp, 1, 1)
sp = gui.spin(box, self, "target_prior", 1, 99,
callback=self._on_display_perf_line_changed)
sp.setSuffix("%")
sp.addAction(QtGui.QAction("Auto", sp))
grid.addWidget(QtGui.QLabel("Prior target class probability"))
grid.addWidget(sp, 2, 1)
self.plotview = pg.GraphicsView(background="w")
self.plotview.setFrameStyle(QtGui.QFrame.StyledPanel)
self.plot = pg.PlotItem()
self.plot.getViewBox().setMenuEnabled(False)
self.plot.getViewBox().setMouseEnabled(False, False)
pen = QPen(self.palette().color(QtGui.QPalette.Text))
tickfont = QtGui.QFont(self.font())
tickfont.setPixelSize(max(int(tickfont.pixelSize() * 2 // 3), 11))
axis = self.plot.getAxis("bottom")
axis.setTickFont(tickfont)
axis.setPen(pen)
axis.setLabel("FP Rate (1-Specificity)")
axis = self.plot.getAxis("left")
axis.setTickFont(tickfont)
axis.setPen(pen)
axis.setLabel("TP Rate (Sensitivity)")
self.plot.showGrid(True, True, alpha=0.1)
self.plot.setRange(xRange=(0.0, 1.0), yRange=(0.0, 1.0))
self.plotview.setCentralItem(self.plot)
self.mainArea.layout().addWidget(self.plotview)
self.graphButton.clicked.connect(self.save_graph)
def set_results(self, results):
"""Set the input evaluation results."""
self.clear()
self.error(0)
if results is not None:
if results.data is None:
self.error(0, "Give me data!!")
results = None
elif not results.data.domain.has_discrete_class:
self.error(0, "Need discrete class variable")
results = None
self.results = results
if results is not None:
self._initialize(results)
self._setup_plot()
def clear(self):
"""Clear the widget state."""
self.results = None
self.plot.clear()
self.classifier_names = []
self.selected_classifiers = []
self.target_cb.clear()
self.target_index = 0
self.colors = []
self._curve_data = {}
self._plot_curves = {}
self._rocch = None
self._perf_line = None
def _initialize(self, results):
names = getattr(results, "learner_names", None)
if names is None:
names = ["#{}".format(i + 1)
for i in range(len(results.predicted))]
self.colors = colorpalette.ColorPaletteGenerator(
len(names), colorbrewer.colorSchemes["qualitative"]["Dark2"])
self.classifier_names = names
self.selected_classifiers = list(range(len(names)))
for i in range(len(names)):
listitem = self.classifiers_list_box.item(i)
listitem.setIcon(colorpalette.ColorPixmap(self.colors[i]))
class_var = results.data.domain.class_var
self.target_cb.addItems(class_var.values)
def curve_data(self, target, clf_idx):
"""Return `ROCData' for the given target and classifier."""
if (target, clf_idx) not in self._curve_data:
data = ROCData.from_results(self.results, clf_idx, target)
self._curve_data[target, clf_idx] = data
return self._curve_data[target, clf_idx]
def plot_curves(self, target, clf_idx):
"""Return a set of functions `plot_curves` generating plot curves."""
def generate_pens(basecolor):
pen = QPen(basecolor, 1)
pen.setCosmetic(True)
shadow_pen = QPen(pen.color().lighter(160), 2.5)
shadow_pen.setCosmetic(True)
return pen, shadow_pen
data = self.curve_data(target, clf_idx)
if (target, clf_idx) not in self._plot_curves:
pen, shadow_pen = generate_pens(self.colors[clf_idx])
name = self.classifier_names[clf_idx]
@once
def merged():
return plot_curve(
data.merged, pen=pen, shadow_pen=shadow_pen, name=name)
@once
def folds():
return [plot_curve(fold, pen=pen, shadow_pen=shadow_pen)
for fold in data.folds]
@once
def avg_vert():
return plot_avg_curve(data.avg_vertical, pen=pen,
shadow_pen=shadow_pen, name=name)
@once
def avg_thres():
return plot_avg_curve(data.avg_threshold, pen=pen,
shadow_pen=shadow_pen, name=name)
self._plot_curves[target, clf_idx] = plot_curves(
merge=merged, folds=folds,
avg_vertical=avg_vert, avg_threshold=avg_thres
)
return self._plot_curves[target, clf_idx]
def _setup_plot(self):
target = self.target_index
selected = self.selected_classifiers
curves = [self.plot_curves(target, i) for i in selected]
selected = [self.curve_data(target, i) for i in selected]
if self.roc_averaging == OWROCAnalysis.Merge:
for curve in curves:
graphics = curve.merge()
curve = graphics.curve
self.plot.addItem(graphics.curve_item)
if self.display_convex_curve:
self.plot.addItem(graphics.hull_item)
if self.display_def_threshold:
points = curve.points
ind = numpy.argmin(numpy.abs(points.thresholds - 0.5))
item = pg.TextItem(
text="{:.3f}".format(points.thresholds[ind]),
)
item.setPos(points.fpr[ind], points.tpr[ind])
self.plot.addItem(item)
hull_curves = [curve.merged.hull for curve in selected]
if hull_curves:
self._rocch = convex_hull(hull_curves)
iso_pen = QPen(QColor(Qt.black), 1)
iso_pen.setCosmetic(True)
self._perf_line = InfiniteLine(pen=iso_pen, antialias=True)
self.plot.addItem(self._perf_line)
elif self.roc_averaging == OWROCAnalysis.Vertical:
for curve in curves:
graphics = curve.avg_vertical()
self.plot.addItem(graphics.curve_item)
self.plot.addItem(graphics.confint_item)
hull_curves = [curve.avg_vertical.hull for curve in selected]
elif self.roc_averaging == OWROCAnalysis.Threshold:
for curve in curves:
graphics = curve.avg_threshold()
self.plot.addItem(graphics.curve_item)
self.plot.addItem(graphics.confint_item)
hull_curves = [curve.avg_threshold.hull for curve in selected]
elif self.roc_averaging == OWROCAnalysis.NoAveraging:
for curve in curves:
graphics = curve.folds()
for fold in graphics:
self.plot.addItem(fold.curve_item)
if self.display_convex_curve:
self.plot.addItem(fold.hull_item)
hull_curves = [fold.hull for curve in selected for fold in curve.folds]
if self.display_convex_hull and hull_curves:
hull = convex_hull(hull_curves)
hull_pen = QPen(QColor(200, 200, 200, 100), 2)
hull_pen.setCosmetic(True)
item = self.plot.plot(
hull.fpr, hull.tpr,
pen=hull_pen,
brush=QBrush(QColor(200, 200, 200, 50)),
fillLevel=0)
item.setZValue(-10000)
pen = QPen(QColor(100, 100, 100, 100), 1, Qt.DashLine)
pen.setCosmetic(True)
self.plot.plot([0, 1], [0, 1], pen=pen, antialias=True)
if self.roc_averaging == OWROCAnalysis.Merge:
self._update_perf_line()
def _on_target_changed(self):
self.plot.clear()
self._setup_plot()
def _on_classifiers_changed(self):
self.plot.clear()
if self.results is not None:
self._setup_plot()
def _on_display_perf_line_changed(self):
if self.roc_averaging == OWROCAnalysis.Merge:
self._update_perf_line()
if self.perf_line is not None:
self.perf_line.setVisible(self.display_perf_line)
def _on_display_def_threshold_changed(self):
self._replot()
def _replot(self):
self.plot.clear()
if self.results is not None:
self._setup_plot()
def _update_perf_line(self):
if self._perf_line is None:
return
self._perf_line.setVisible(self.display_perf_line)
if self.display_perf_line:
m = roc_iso_performance_slope(
self.fp_cost, self.fn_cost, self.target_prior / 100.0)
hull = self._rocch
ind = roc_iso_performance_line(m, hull)
angle = numpy.arctan2(m, 1) # in radians
self._perf_line.setAngle(angle * 180 / numpy.pi)
self._perf_line.setPos((hull.fpr[ind[0]], hull.tpr[ind[0]]))
def onDeleteWidget(self):
self.clear()
def save_graph(self):
from Orange.widgets.data.owsave import OWSave
save_img = OWSave(data=self.plot,
file_formats=FileFormat.img_writers)
save_img.exec_()
def interp(x, xp, fp, left=None, right=None):
"""
Like numpy.interp except for handling of running sequences of
same values in `xp`.
"""
x = numpy.asanyarray(x)
xp = numpy.asanyarray(xp)
fp = numpy.asanyarray(fp)
if xp.shape != fp.shape:
raise ValueError("xp and fp must have the same shape")
ind = numpy.searchsorted(xp, x, side="right")
fx = numpy.zeros(len(x))
under = ind == 0
over = ind == len(xp)
between = ~under & ~over
fx[under] = left if left is not None else fp[0]
fx[over] = right if right is not None else fp[-1]
if right is not None:
# Fix points exactly on the right boundary.
fx[x == xp[-1]] = fp[-1]
ind = ind[between]
df = (fp[ind] - fp[ind - 1]) / (xp[ind] - xp[ind - 1])
fx[between] = df * (x[between] - xp[ind]) + fp[ind]
return fx
def roc_curve_for_fold(res, fold, clf_idx, target):
fold_actual = res.actual[fold]
P = numpy.sum(fold_actual == target)
N = fold_actual.size - P
if P == 0 or N == 0:
# Undefined TP and FP rate
return numpy.array([]), numpy.array([]), numpy.array([])
fold_probs = res.probabilities[clf_idx][fold][:, target]
return skl_metrics.roc_curve(
fold_actual, fold_probs, pos_label=target
)
def roc_curve_vertical_average(curves, samples=10):
fpr_sample = numpy.linspace(0.0, 1.0, samples)
tpr_samples = []
for fpr, tpr, _ in curves:
tpr_samples.append(interp(fpr_sample, fpr, tpr, left=0, right=1))
tpr_samples = numpy.array(tpr_samples)
return fpr_sample, tpr_samples.mean(axis=0), tpr_samples.std(axis=0)
def roc_curve_threshold_average(curves, thresh_samples):
fpr_samples, tpr_samples = [], []
for fpr, tpr, thresh in curves:
ind = numpy.searchsorted(thresh[::-1], thresh_samples, side="left")
ind = ind[::-1]
ind = numpy.clip(ind, 0, len(thresh) - 1)
fpr_samples.append(fpr[ind])
tpr_samples.append(tpr[ind])
fpr_samples = numpy.array(fpr_samples)
tpr_samples = numpy.array(tpr_samples)
return ((fpr_samples.mean(axis=0), fpr_samples.std(axis=0)),
(tpr_samples.mean(axis=0), fpr_samples.std(axis=0)))
def roc_curve_threshold_average_interp(curves, thresh_samples):
fpr_samples, tpr_samples = [], []
for fpr, tpr, thresh in curves:
thresh = thresh[::-1]
fpr = interp(thresh_samples, thresh, fpr[::-1], left=1.0, right=0.0)
tpr = interp(thresh_samples, thresh, tpr[::-1], left=1.0, right=0.0)
fpr_samples.append(fpr)
tpr_samples.append(tpr)
fpr_samples = numpy.array(fpr_samples)
tpr_samples = numpy.array(tpr_samples)
return ((fpr_samples.mean(axis=0), fpr_samples.std(axis=0)),
(tpr_samples.mean(axis=0), fpr_samples.std(axis=0)))
roc_point = namedtuple("roc_point", ["fpr", "tpr", "threshold"])
def roc_curve_convex_hull(curve):
def slope(p1, p2):
x1, y1, _ = p1
x2, y2, _ = p2
if x1 != x2:
return (y2 - y1) / (x2 - x1)
else:
return numpy.inf
fpr, _, _ = curve
if len(fpr) <= 2:
return curve
points = map(roc_point._make, zip(*curve))
hull = deque([next(points)])
for point in points:
while True:
if len(hull) < 2:
hull.append(point)
break
else:
last = hull[-1]
if point.fpr != last.fpr and \
slope(hull[-2], last) > slope(last, point):
hull.append(point)
break
else:
hull.pop()
fpr = numpy.array([p.fpr for p in hull])
tpr = numpy.array([p.tpr for p in hull])
thres = numpy.array([p.threshold for p in hull])
return (fpr, tpr, thres)
def convex_hull(curves):
def slope(p1, p2):
x1, y1, *_ = p1
x2, y2, *_ = p2
if x1 != x2:
return (y2 - y1) / (x2 - x1)
else:
return numpy.inf
curves = [list(map(roc_point._make, zip(*curve))) for curve in curves]
merged_points = reduce(operator.iadd, curves, [])
merged_points = sorted(merged_points)
if len(merged_points) == 0:
return ROCPoints(numpy.array([]), numpy.array([]), numpy.array([]))
if len(merged_points) <= 2:
return ROCPoints._make(map(numpy.array, zip(*merged_points)))
points = iter(merged_points)
hull = deque([next(points)])
for point in points:
while True:
if len(hull) < 2:
hull.append(point)
break
else:
last = hull[-1]
if point[0] != last[0] and \
slope(hull[-2], last) > slope(last, point):
hull.append(point)
break
else:
hull.pop()
return ROCPoints._make(map(numpy.array, zip(*hull)))
def roc_iso_performance_line(slope, hull, tol=1e-5):
"""
Return the indices where a line with `slope` touches the ROC convex hull.
"""
fpr, tpr, *_ = hull
# Compute the distance of each point to a reference iso line
# going through point (0, 1). The point(s) with the minimum
# distance are our result
# y = m * x + 1
# m * x - 1y + 1 = 0
a, b, c = slope, -1, 1
dist = distance_to_line(a, b, c, fpr, tpr)
mindist = numpy.min(dist)
return numpy.flatnonzero((dist - mindist) <= tol)
def distance_to_line(a, b, c, x0, y0):
"""
Return the distance to a line ax + by + c = 0
"""
assert a != 0 or b != 0
return numpy.abs(a * x0 + b * y0 + c) / numpy.sqrt(a ** 2 + b ** 2)
def roc_iso_performance_slope(fp_cost, fn_cost, p):
assert 0 <= p <= 1
if fn_cost * p == 0:
return numpy.inf
else:
return (fp_cost * (1. - p)) / (fn_cost * p)
def main():
import gc
import sip
from PyQt4.QtGui import QApplication
from Orange.classification import (LogisticRegressionLearner, SVMLearner,
NuSVMLearner)
app = QApplication([])
w = OWROCAnalysis()
w.show()
w.raise_()
# data = Orange.data.Table("iris")
data = Orange.data.Table("ionosphere")
results = Orange.evaluation.CrossValidation(
data,
[LogisticRegressionLearner(),
LogisticRegressionLearner(penalty="l1"),
SVMLearner(probability=True),
NuSVMLearner(probability=True)],
k=5,
store_data=True,
)
results.learner_names = ["Logistic", "Logistic (L1 reg.)", "SVM", "NuSVM"]
w.set_results(results)
rval = app.exec_()
w.deleteLater()
sip.delete(w)
del w
app.processEvents()
sip.delete(app)
del app
gc.collect()
return rval
if __name__ == "__main__":
import sys
sys.exit(main())
| gpl-3.0 |
mblondel/scikit-learn | sklearn/metrics/ranking.py | 8 | 21775 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.stats import rankdata
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
| bsd-3-clause |
wkfwkf/statsmodels | statsmodels/sandbox/rls.py | 33 | 5179 | """Restricted least squares
from pandas
License: Simplified BSD
"""
from __future__ import print_function
import numpy as np
from statsmodels.regression.linear_model import WLS, GLS, RegressionResults
class RLS(GLS):
"""
Restricted general least squares model that handles linear constraints
Parameters
----------
endog: array-like
n length array containing the dependent variable
exog: array-like
n-by-p array of independent variables
constr: array-like
k-by-p array of linear constraints
param (0.): array-like or scalar
p-by-1 array (or scalar) of constraint parameters
sigma (None): scalar or array-like
The weighting matrix of the covariance. No scaling by default (OLS).
If sigma is a scalar, then it is converted into an n-by-n diagonal
matrix with sigma as each diagonal element.
If sigma is an n-length array, then it is assumed to be a diagonal
matrix with the given sigma on the diagonal (WLS).
Notes
-----
endog = exog * beta + epsilon
weights' * constr * beta = param
See Greene and Seaks, "The Restricted Least Squares Estimator:
A Pedagogical Note", The Review of Economics and Statistics, 1991.
"""
def __init__(self, endog, exog, constr, param=0., sigma=None):
N, Q = exog.shape
constr = np.asarray(constr)
if constr.ndim == 1:
K, P = 1, constr.shape[0]
else:
K, P = constr.shape
if Q != P:
raise Exception('Constraints and design do not align')
self.ncoeffs = Q
self.nconstraint = K
self.constraint = constr
if np.isscalar(param) and K > 1:
param = np.ones((K,)) * param
self.param = param
if sigma is None:
sigma = 1.
if np.isscalar(sigma):
sigma = np.ones(N) * sigma
sigma = np.squeeze(sigma)
if sigma.ndim == 1:
self.sigma = np.diag(sigma)
self.cholsigmainv = np.diag(np.sqrt(sigma))
else:
self.sigma = sigma
self.cholsigmainv = np.linalg.cholesky(np.linalg.pinv(self.sigma)).T
super(GLS, self).__init__(endog, exog)
_rwexog = None
@property
def rwexog(self):
"""Whitened exogenous variables augmented with restrictions"""
if self._rwexog is None:
P = self.ncoeffs
K = self.nconstraint
design = np.zeros((P + K, P + K))
design[:P, :P] = np.dot(self.wexog.T, self.wexog) #top left
constr = np.reshape(self.constraint, (K, P))
design[:P, P:] = constr.T #top right partition
design[P:, :P] = constr #bottom left partition
design[P:, P:] = np.zeros((K, K)) #bottom right partition
self._rwexog = design
return self._rwexog
_inv_rwexog = None
@property
def inv_rwexog(self):
"""Inverse of self.rwexog"""
if self._inv_rwexog is None:
self._inv_rwexog = np.linalg.inv(self.rwexog)
return self._inv_rwexog
_rwendog = None
@property
def rwendog(self):
"""Whitened endogenous variable augmented with restriction parameters"""
if self._rwendog is None:
P = self.ncoeffs
K = self.nconstraint
response = np.zeros((P + K,))
response[:P] = np.dot(self.wexog.T, self.wendog)
response[P:] = self.param
self._rwendog = response
return self._rwendog
_ncp = None
@property
def rnorm_cov_params(self):
"""Parameter covariance under restrictions"""
if self._ncp is None:
P = self.ncoeffs
self._ncp = self.inv_rwexog[:P, :P]
return self._ncp
_wncp = None
@property
def wrnorm_cov_params(self):
"""
Heteroskedasticity-consistent parameter covariance
Used to calculate White standard errors.
"""
if self._wncp is None:
df = self.df_resid
pred = np.dot(self.wexog, self.coeffs)
eps = np.diag((self.wendog - pred) ** 2)
sigmaSq = np.sum(eps)
pinvX = np.dot(self.rnorm_cov_params, self.wexog.T)
self._wncp = np.dot(np.dot(pinvX, eps), pinvX.T) * df / sigmaSq
return self._wncp
_coeffs = None
@property
def coeffs(self):
"""Estimated parameters"""
if self._coeffs is None:
betaLambda = np.dot(self.inv_rwexog, self.rwendog)
self._coeffs = betaLambda[:self.ncoeffs]
return self._coeffs
def fit(self):
rncp = self.wrnorm_cov_params
lfit = RegressionResults(self, self.coeffs, normalized_cov_params=rncp)
return lfit
if __name__=="__main__":
import statsmodels.api as sm
dta = np.genfromtxt('./rlsdata.txt', names=True)
design = np.column_stack((dta['Y'],dta['Y']**2,dta[['NE','NC','W','S']].view(float).reshape(dta.shape[0],-1)))
design = sm.add_constant(design, prepend=True)
rls_mod = RLS(dta['G'],design, constr=[0,0,0,1,1,1,1])
rls_fit = rls_mod.fit()
print(rls_fit.params)
| bsd-3-clause |
MJuddBooth/pandas | pandas/io/stata.py | 1 | 108746 | """
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
from collections import OrderedDict
import datetime
import os
import struct
import sys
import warnings
from dateutil.relativedelta import relativedelta
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas._libs.tslibs import NaT, Timestamp
from pandas._libs.writers import max_len_string_array
from pandas.compat import (
BytesIO, ResourceWarning, lmap, lrange, lzip, range, string_types,
text_type, zip)
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_object, is_categorical_dtype, is_datetime64_dtype)
from pandas import DatetimeIndex, compat, isna, to_datetime, to_timedelta
from pandas.core.arrays import Categorical
from pandas.core.base import StringMixin
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.io.common import (
BaseIterator, _stringify_path, get_filepath_or_buffer)
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values.
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables."""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. None defaults to latin-1."""
_statafile_processing_params2 = """\
index_col : string, optional, default: None
Column to set as index.
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns.
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines."""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object."""
_read_stata_doc = """
Read Stata file into DataFrame.
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions.
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
DataFrame.to_stata: Export Stata data files.
Examples
--------
Read a Stata dta file:
>>> df = pd.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pd.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
... do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """\
Read observations from Stata file, converting them into a dataframe
.. deprecated::
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or object
implementing a binary read() functions.
.. versionadded:: 0.23.0 support for pathlib, py.path.
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
@deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
@deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index_col=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index_col=index_col, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize)
if iterator or chunksize:
data = reader
else:
try:
data = reader.read()
finally:
reader.close()
return data
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas.
Otherwise it falls back to a slower but more robust method
using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return (to_datetime(year, format='%Y') +
to_timedelta(days, unit='d'))
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000))
for d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt.startswith(("%tc", "tc")): # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt.startswith(("%tC", "tC")):
warnings.warn("Encountered %tC format. Leaving in Stata "
"Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = NaT
return conv_dates
# Delta days relative to base
elif fmt.startswith(("%td", "td", "%d", "d")):
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
# does not count leap days - 7 days is a week.
# 52nd week may have more than 7 days
elif fmt.startswith(("%tw", "tw")):
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt.startswith(("%tm", "tm")): # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%th", "th")): # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%ty", "ty")): # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt {fmt} not understood".format(fmt=fmt))
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates, skipna=False) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isna(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
warnings.warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (2 * (d.year - stata_epoch.year) +
(d.month > 6).astype(np.int))
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError(
"Format {fmt} is not a known Stata date format".format(fmt=fmt))
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '%s' does not satisfy this restriction. Use the
'version=117' parameter to write the newer (Stata 13 and later) format.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint columns are converted to int of the
same size if there is no loss in precision, otherwise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0]
float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if (data[col].max() <= 2147483620 and
data[col].min() >= -2147483647):
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
raise ValueError('Column {col} has a maximum value of '
'infinity which is outside the range '
'supported by Stata.'.format(col=col))
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
raise ValueError('Column {col} has a maximum value '
'({val}) outside the range supported by '
'Stata ({float64_max})'
.format(col=col, val=value,
float64_max=float64_max))
if ws:
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatibility shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[
0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "{cls}({obj})".format(cls=self.__class__, obj=self)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
def __init__(self):
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, 'Q'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = {
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254 # float
# don't know old code for double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(
struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = {
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'Q': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
@deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
@deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index_col=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding=None, chunksize=None):
super(StataReader, self).__init__()
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index_col = index_col
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = None
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _, should_close = get_filepath_or_buffer(
path_or_buf)
if isinstance(path_or_buf, (str, text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
self.path_or_buf = BytesIO(contents)
self._read_header()
self._setup_dtype()
def __enter__(self):
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _set_encoding(self):
"""
Set string encoding which depends on file version
"""
if self.format_version < 118:
self._encoding = 'latin-1'
else:
self._encoding = 'utf-8'
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
self._read_new_header(first_char)
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
self._set_encoding()
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == b'MSF' and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self.data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
self._seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
self._seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-118.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
def f(typ):
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata types [{0}]".
format(typ))
typlist = [f(x) for x in raw_typlist]
def f(typ):
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata dtype [{0}]"
.format(typ))
dtyplist = [f(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self):
if self.format_version == 117:
b = 33
elif self.format_version == 118:
b = 129
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
if self.format_version == 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the label list
def _get_lbllist(self):
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
def _get_variable_labels(self):
if self.format_version == 118:
vlblist = [self._decode(self.path_or_buf.read(321))
for i in range(self.nvar)]
elif self.format_version > 105:
vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
return vlblist
def _get_nobs(self):
if self.format_version == 118:
return struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
def _get_data_label(self):
if self.format_version == 118:
strlen = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._null_terminate(self.path_or_buf.read(81))
else:
return self._null_terminate(self.path_or_buf.read(32))
def _get_time_stamp(self):
if self.format_version == 118:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._null_terminate(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self):
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version == 118:
return struct.unpack(self.byteorder + 'q',
self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self._set_encoding()
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self.data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # py2 string, py3 bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except ValueError:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except ValueError:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _setup_dtype(self):
"""Map between numpy and state dtypes"""
if self._dtype is not None:
return self._dtype
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
return self._dtype
def _calcsize(self, fmt):
return (type(fmt) is int and fmt or
struct.calcsize(self.byteorder + fmt))
def _decode(self, s):
s = s.partition(b"\0")[0]
return s.decode('utf-8')
def _null_terminate(self, s):
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding)
def _read_value_labels(self):
if self._value_labels_read:
# Don't read twice
return
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
self._value_labels_read = True
self.value_label_dict = dict()
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._null_terminate(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
val = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
if self.format_version <= 117:
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:end]))
else:
self.value_label_dict[labname][val[i]] = (
self._decode(txt[off[i]:end]))
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO = {'0': ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
if self.byteorder == '<':
buf = buf[0:2] + buf[4:10]
else:
buf = buf[0:2] + buf[6:]
v_o = struct.unpack('Q', buf)[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
va = va[0:-1].decode(self._encoding)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO[str(v_o)] = va
# legacy
@Appender(_data_method_doc)
def data(self, **kwargs):
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
@deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index_col=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if index_col is None:
index_col = self._index_col
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (not self._value_labels_read):
self._can_read_value_labels = True
self._read_strls()
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist)
else:
data = DataFrame.from_records(data)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index_col is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(
self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
ix = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], ix, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_dict(OrderedDict(data_formatted))
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: any(x.startswith(fmt)
for fmt in _date_formats),
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col],
self.fmtlist[i])
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(data,
self.value_label_dict,
self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_dict(OrderedDict(retyped_data))
if index_col is not None:
data = data.set_index(data.pop(index_col))
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing._ndarray_values)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'Q':
continue
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist,
order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
try:
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = '\n' + '-' * 80 + '\n'.join(repeats)
raise ValueError('Value labels for column {col} are not '
'unique. The repeated labels are:\n'
'{repeats}'
.format(col=col, repeats=repeats))
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_dict(OrderedDict(cat_converted_data))
return data
def data_label(self):
"""
Return data label of Stata file.
"""
return self.data_label
def variable_labels(self):
"""
Return variable labels as a dict, associating each variable name
with corresponding label.
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self):
"""
Return a dict, associating each variable name a dict, associating
each value its corresponding label.
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname):
"""
Open a binary file or no-op if file-like.
Parameters
----------
fname : string path, path object or buffer
Returns
-------
file : file-like object
File object supporting write
own : bool
True if the file was created, otherwise False
"""
if hasattr(fname, 'write'):
# if 'b' not in fname.mode:
return fname, False
return open(fname, "wb"), True
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError(
"Endianness {endian} not understood".format(endian=endianness))
def _pad_bytes(name, length):
"""
Take a char string and pads it with null bytes until it's length chars.
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Convert from one of the stata date formats to a type in TYPE_MAP.
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError(
"Format {fmt} not implemented".format(fmt=fmt))
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a "
"column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Convert dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - for int8 byte
252 - for int16 int
253 - for int32 long
254 - for float32 float
255 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(ensure_object(column.values))
return max(itemsize, 1)
elif dtype == np.float64:
return 255
elif dtype == np.float32:
return 254
elif dtype == np.int32:
return 253
elif dtype == np.int16:
return 252
elif dtype == np.int8:
return 251
else: # pragma : no cover
raise NotImplementedError(
"Data type {dtype} not supported.".format(dtype=dtype))
def _dtype_to_default_stata_fmt(dtype, column, dta_version=114,
force_strl=False):
"""
Map numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
strl -> "%9s"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dta_version < 117:
max_str_len = 244
else:
max_str_len = 2045
if force_strl:
return '%9s'
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column, skipna=True)
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Column `{col}` cannot be exported.\n\nOnly '
'string-like object arrays containing all '
'strings or a mix of strings and None can be '
'exported. Object arrays containing only null '
'values are prohibited. Other object types'
'cannot be exported and must first be converted '
'to one of the supported '
'types.'.format(col=column.name))
itemsize = max_len_string_array(ensure_object(column.values))
if itemsize > max_str_len:
if dta_version >= 117:
return '%9s'
else:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError(
"Data type {dtype} not supported.".format(dtype=dtype))
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
is written.
.. versionadded:: 0.23.0 support for pathlib, py.path.
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Only latin-1 and ascii are supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
_max_string_length = 244
@deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
super(StataWriter, self).__init__()
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._encoding = 'latin-1'
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
self._own_file = True
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = _stringify_path(fname)
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
self._converted_names = {}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_dict(OrderedDict(data_formatted))
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _update_strl_names(self):
"""No-op, forward compatibility"""
pass
def _check_column_names(self, data):
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names = {}
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if ((c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and
(c < '0' or c > '9') and c != '_'):
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
converted_names[orig_name] = name
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
conversion_warning = []
for orig_name, name in converted_names.items():
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except (UnicodeDecodeError, AttributeError):
pass
msg = '{0} -> {1}'.format(orig_name, name)
conversion_warning.append(msg)
ws = invalid_name_doc.format('\n '.join(conversion_warning))
warnings.warn(ws, InvalidColumnName)
self._converted_names = converted_names
self._update_strl_names()
return data
def _set_formats_and_types(self, data, dtypes):
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
def _prepare_pandas(self, data):
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = 'tc'
self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates,
self.varlist)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self._set_formats_and_types(data, dtypes)
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._file, self._own_file = _open_file_binary_write(self._fname)
try:
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_map()
self._write_variable_types()
self._write_varnames()
self._write_sortlist()
self._write_formats()
self._write_value_label_names()
self._write_variable_labels()
self._write_expansion_fields()
self._write_characteristics()
self._prepare_data()
self._write_data()
self._write_strls()
self._write_value_labels()
self._write_file_close_tag()
self._write_map()
except Exception as exc:
self._close()
try:
if self._own_file:
os.unlink(self._fname)
except Exception:
warnings.warn('This save was not successful but {0} could not '
'be deleted. This file is not '
'valid.'.format(self._fname), ResourceWarning)
raise exc
else:
self._close()
def _close(self):
"""
Close the file if it was created by the writer.
If a buffer or file-like object was passed in, for example a GzipFile,
then leave this file open for the caller to close. In either case,
attempt to flush the file contents to ensure they are written to disk
(if supported)
"""
# Some file-like objects might not support flush
try:
self._file.flush()
except AttributeError:
pass
if self._own_file:
self._file.close()
def _write_map(self):
"""No-op, future compatibility"""
pass
def _write_file_close_tag(self):
"""No-op, future compatibility"""
pass
def _write_characteristics(self):
"""No-op, future compatibility"""
pass
def _write_strls(self):
"""No-op, future compatibility"""
pass
def _write_expansion_fields(self):
"""Write 5 zeros for expansion fields"""
self._write(_pad_bytes("", 5))
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# GH #13856
# Avoid locale-specific month conversion
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (time_stamp.strftime("%d ") +
month_lookup[time_stamp.month] +
time_stamp.strftime(" %Y %H:%M"))
self._file.write(self._null_terminate(ts))
def _write_variable_types(self):
for typ in self.typlist:
self._file.write(struct.pack('B', typ))
def _write_varnames(self):
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
def _write_sortlist(self):
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (self.nvar + 1))
self._write(srtlist)
def _write_formats(self):
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
def _write_value_label_names(self):
# lbllist, 33*nvar, char array
for i in range(self.nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes('', 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError('Variable labels must be 80 characters '
'or fewer')
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError('Variable labels must contain only '
'characters that can be encoded in '
'Latin-1')
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _convert_strls(self, data):
"""No-op, future compatibility"""
return data
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert strls
data = self._convert_strls(data)
# 3. Convert bad string data to '' and pad to correct length
dtypes = {}
native_byteorder = self._byteorder == _set_endianness(sys.byteorder)
for i, col in enumerate(data):
typ = typlist[i]
if typ <= self._max_string_length:
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S{type}'.format(type=typ)
dtypes[col] = stype
data[col] = data[col].str.encode(self._encoding).astype(stype)
else:
dtype = data[col].dtype
if not native_byteorder:
dtype = dtype.newbyteorder(self._byteorder)
dtypes[col] = dtype
self.data = data.to_records(index=False, column_dtypes=dtypes)
def _write_data(self):
data = self.data
self._file.write(data.tobytes())
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
def _dtype_to_stata_type_117(dtype, column, force_strl):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 2045 are strings of this length
Pandas Stata
32768 - for object strL
65526 - for int8 byte
65527 - for int16 int
65528 - for int32 long
65529 - for float32 float
65530 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if force_strl:
return 32768
if dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(ensure_object(column.values))
itemsize = max(itemsize, 1)
if itemsize <= 2045:
return itemsize
return 32768
elif dtype == np.float64:
return 65526
elif dtype == np.float32:
return 65527
elif dtype == np.int32:
return 65528
elif dtype == np.int16:
return 65529
elif dtype == np.int8:
return 65530
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
def _bytes(s, encoding):
if compat.PY3:
return bytes(s, encoding)
else:
return bytes(s.encode(encoding))
def _pad_bytes_new(name, length):
"""
Takes a bytes instance and pads it with null bytes until it's length chars.
"""
if isinstance(name, string_types):
name = _bytes(name, 'utf-8')
return name + b'\x00' * (length - len(name))
class StataStrLWriter(object):
"""
Converter for Stata StrLs
Stata StrLs map 8 byte values to strings which are stored using a
dictionary-like format where strings are keyed to two values.
Parameters
----------
df : DataFrame
DataFrame to convert
columns : list
List of columns names to convert to StrL
version : int, optional
dta version. Currently supports 117, 118 and 119
byteorder : str, optional
Can be ">", "<", "little", or "big". default is `sys.byteorder`
Notes
-----
Supports creation of the StrL block of a dta file for dta versions
117, 118 and 119. These differ in how the GSO is stored. 118 and
119 store the GSO lookup value as a uint32 and a uint64, while 117
uses two uint32s. 118 and 119 also encode all strings as unicode
which is required by the format. 117 uses 'latin-1' a fixed width
encoding that extends the 7-bit ascii table with an additional 128
characters.
"""
def __init__(self, df, columns, version=117, byteorder=None):
if version not in (117, 118, 119):
raise ValueError('Only dta versions 117, 118 and 119 supported')
self._dta_ver = version
self.df = df
self.columns = columns
self._gso_table = OrderedDict((('', (0, 0)),))
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
gso_v_type = 'I' # uint32
gso_o_type = 'Q' # uint64
self._encoding = 'utf-8'
if version == 117:
o_size = 4
gso_o_type = 'I' # 117 used uint32
self._encoding = 'latin-1'
elif version == 118:
o_size = 6
else: # version == 119
o_size = 5
self._o_offet = 2 ** (8 * (8 - o_size))
self._gso_o_type = gso_o_type
self._gso_v_type = gso_v_type
def _convert_key(self, key):
v, o = key
return v + self._o_offet * o
def generate_table(self):
"""
Generates the GSO lookup table for the DataFRame
Returns
-------
gso_table : OrderedDict
Ordered dictionary using the string found as keys
and their lookup position (v,o) as values
gso_df : DataFrame
DataFrame where strl columns have been converted to
(v,o) values
Notes
-----
Modifies the DataFrame in-place.
The DataFrame returned encodes the (v,o) values as uint64s. The
encoding depends on teh dta version, and can be expressed as
enc = v + o * 2 ** (o_size * 8)
so that v is stored in the lower bits and o is in the upper
bits. o_size is
* 117: 4
* 118: 6
* 119: 5
"""
gso_table = self._gso_table
gso_df = self.df
columns = list(gso_df.columns)
selected = gso_df[self.columns]
col_index = [(col, columns.index(col)) for col in self.columns]
keys = np.empty(selected.shape, dtype=np.uint64)
for o, (idx, row) in enumerate(selected.iterrows()):
for j, (col, v) in enumerate(col_index):
val = row[col]
# Allow columns with mixed str and None (GH 23633)
val = '' if val is None else val
key = gso_table.get(val, None)
if key is None:
# Stata prefers human numbers
key = (v + 1, o + 1)
gso_table[val] = key
keys[o, j] = self._convert_key(key)
for i, col in enumerate(self.columns):
gso_df[col] = keys[:, i]
return gso_table, gso_df
def _encode(self, s):
"""
Python 3 compatibility shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
if isinstance(s, text_type):
return s.encode(self._encoding)
return s
def generate_blob(self, gso_table):
"""
Generates the binary blob of GSOs that is written to the dta file.
Parameters
----------
gso_table : OrderedDict
Ordered dictionary (str, vo)
Returns
-------
gso : bytes
Binary content of dta file to be placed between strl tags
Notes
-----
Output format depends on dta version. 117 uses two uint32s to
express v and o while 118+ uses a uint32 for v and a uint64 for o.
"""
# Format information
# Length includes null term
# 117
# GSOvvvvooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u4 u1 u4 string + null term
#
# 118, 119
# GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u8 u1 u4 string + null term
bio = BytesIO()
gso = _bytes('GSO', 'ascii')
gso_type = struct.pack(self._byteorder + 'B', 130)
null = struct.pack(self._byteorder + 'B', 0)
v_type = self._byteorder + self._gso_v_type
o_type = self._byteorder + self._gso_o_type
len_type = self._byteorder + 'I'
for strl, vo in gso_table.items():
if vo == (0, 0):
continue
v, o = vo
# GSO
bio.write(gso)
# vvvv
bio.write(struct.pack(v_type, v))
# oooo / oooooooo
bio.write(struct.pack(o_type, o))
# t
bio.write(gso_type)
# llll
utf8_string = _bytes(strl, 'utf-8')
bio.write(struct.pack(len_type, len(utf8_string) + 1))
# xxx...xxx
bio.write(utf8_string)
bio.write(null)
bio.seek(0)
return bio.read()
class StataWriter117(StataWriter):
"""
A class for writing Stata binary dta files in Stata 13 format (117)
.. versionadded:: 0.23.0
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
is written.
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Only latin-1 and ascii are supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
convert_strl : list
List of columns names to convert to Stata StrL format. Columns with
more than 2045 characters are aautomatically written as StrL.
Smaller columns can be converted by including the column name. Using
StrLs can reduce output file size when strings are longer than 8
characters, and either frequently repeated or sparse.
Returns
-------
writer : StataWriter117 instance
The StataWriter117 instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> from pandas.io.stata import StataWriter117
>>> data = pd.DataFrame([[1.0, 1, 'a']], columns=['a', 'b', 'c'])
>>> writer = StataWriter117('./data_file.dta', data)
>>> writer.write_file()
Or with long strings stored in strl format
>>> data = pd.DataFrame([['A relatively long string'], [''], ['']],
... columns=['strls'])
>>> writer = StataWriter117('./data_file_with_long_strings.dta', data,
... convert_strl=['strls'])
>>> writer.write_file()
"""
_max_string_length = 2045
@deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None, convert_strl=None):
# Shallow copy since convert_strl might be modified later
self._convert_strl = [] if convert_strl is None else convert_strl[:]
super(StataWriter117, self).__init__(fname, data, convert_dates,
write_index, byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
variable_labels=variable_labels)
self._map = None
self._strl_blob = None
@staticmethod
def _tag(val, tag):
"""Surround val with <tag></tag>"""
if isinstance(val, str) and compat.PY3:
val = _bytes(val, 'utf-8')
return (_bytes('<' + tag + '>', 'utf-8') + val +
_bytes('</' + tag + '>', 'utf-8'))
def _update_map(self, tag):
"""Update map location for tag with file position"""
self._map[tag] = self._file.tell()
def _write_header(self, data_label=None, time_stamp=None):
"""Write the file header"""
byteorder = self._byteorder
self._file.write(_bytes('<stata_dta>', 'utf-8'))
bio = BytesIO()
# ds_format - 117
bio.write(self._tag(_bytes('117', 'utf-8'), 'release'))
# byteorder
bio.write(self._tag(byteorder == ">" and "MSF" or "LSF", 'byteorder'))
# number of vars, 2 bytes
assert self.nvar < 2 ** 16
bio.write(self._tag(struct.pack(byteorder + "H", self.nvar), 'K'))
# number of obs, 4 bytes
bio.write(self._tag(struct.pack(byteorder + "I", self.nobs), 'N'))
# data label 81 bytes, char, null terminated
label = data_label[:80] if data_label is not None else ''
label_len = struct.pack(byteorder + "B", len(label))
label = label_len + _bytes(label, 'utf-8')
bio.write(self._tag(label, 'label'))
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# Avoid locale-specific month conversion
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (time_stamp.strftime("%d ") +
month_lookup[time_stamp.month] +
time_stamp.strftime(" %Y %H:%M"))
# '\x11' added due to inspection of Stata file
ts = b'\x11' + _bytes(ts, 'utf8')
bio.write(self._tag(ts, 'timestamp'))
bio.seek(0)
self._file.write(self._tag(bio.read(), 'header'))
def _write_map(self):
"""Called twice during file write. The first populates the values in
the map with 0s. The second call writes the final map locations when
all blocks have been written."""
if self._map is None:
self._map = OrderedDict((('stata_data', 0),
('map', self._file.tell()),
('variable_types', 0),
('varnames', 0),
('sortlist', 0),
('formats', 0),
('value_label_names', 0),
('variable_labels', 0),
('characteristics', 0),
('data', 0),
('strls', 0),
('value_labels', 0),
('stata_data_close', 0),
('end-of-file', 0)))
# Move to start of map
self._file.seek(self._map['map'])
bio = BytesIO()
for val in self._map.values():
bio.write(struct.pack(self._byteorder + 'Q', val))
bio.seek(0)
self._file.write(self._tag(bio.read(), 'map'))
def _write_variable_types(self):
self._update_map('variable_types')
bio = BytesIO()
for typ in self.typlist:
bio.write(struct.pack(self._byteorder + 'H', typ))
bio.seek(0)
self._file.write(self._tag(bio.read(), 'variable_types'))
def _write_varnames(self):
self._update_map('varnames')
bio = BytesIO()
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes_new(name[:32], 33)
bio.write(name)
bio.seek(0)
self._file.write(self._tag(bio.read(), 'varnames'))
def _write_sortlist(self):
self._update_map('sortlist')
self._file.write(self._tag(b'\x00\00' * (self.nvar + 1), 'sortlist'))
def _write_formats(self):
self._update_map('formats')
bio = BytesIO()
for fmt in self.fmtlist:
bio.write(_pad_bytes_new(fmt, 49))
bio.seek(0)
self._file.write(self._tag(bio.read(), 'formats'))
def _write_value_label_names(self):
self._update_map('value_label_names')
bio = BytesIO()
for i in range(self.nvar):
# Use variable name when categorical
name = '' # default name
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes_new(name[:32], 33)
bio.write(name)
bio.seek(0)
self._file.write(self._tag(bio.read(), 'value_label_names'))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
self._update_map('variable_labels')
bio = BytesIO()
blank = _pad_bytes_new('', 81)
if self._variable_labels is None:
for _ in range(self.nvar):
bio.write(blank)
bio.seek(0)
self._file.write(self._tag(bio.read(), 'variable_labels'))
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError('Variable labels must be 80 characters '
'or fewer')
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError('Variable labels must contain only '
'characters that can be encoded in '
'Latin-1')
bio.write(_pad_bytes_new(label, 81))
else:
bio.write(blank)
bio.seek(0)
self._file.write(self._tag(bio.read(), 'variable_labels'))
def _write_characteristics(self):
self._update_map('characteristics')
self._file.write(self._tag(b'', 'characteristics'))
def _write_data(self):
self._update_map('data')
data = self.data
self._file.write(b'<data>')
self._file.write(data.tobytes())
self._file.write(b'</data>')
def _write_strls(self):
self._update_map('strls')
strls = b''
if self._strl_blob is not None:
strls = self._strl_blob
self._file.write(self._tag(strls, 'strls'))
def _write_expansion_fields(self):
"""No-op in dta 117+"""
pass
def _write_value_labels(self):
self._update_map('value_labels')
bio = BytesIO()
for vl in self._value_labels:
lab = vl.generate_value_label(self._byteorder, self._encoding)
lab = self._tag(lab, 'lbl')
bio.write(lab)
bio.seek(0)
self._file.write(self._tag(bio.read(), 'value_labels'))
def _write_file_close_tag(self):
self._update_map('stata_data_close')
self._file.write(_bytes('</stata_dta>', 'utf-8'))
self._update_map('end-of-file')
def _update_strl_names(self):
"""Update column names for conversion to strl if they might have been
changed to comply with Stata naming rules"""
# Update convert_strl if names changed
for orig, new in self._converted_names.items():
if orig in self._convert_strl:
idx = self._convert_strl.index(orig)
self._convert_strl[idx] = new
def _convert_strls(self, data):
"""Convert columns to StrLs if either very large or in the
convert_strl variable"""
convert_cols = [
col for i, col in enumerate(data)
if self.typlist[i] == 32768 or col in self._convert_strl]
if convert_cols:
ssw = StataStrLWriter(data, convert_cols)
tab, new_data = ssw.generate_table()
data = new_data
self._strl_blob = ssw.generate_blob(tab)
return data
def _set_formats_and_types(self, data, dtypes):
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
force_strl = col in self._convert_strl
fmt = _dtype_to_default_stata_fmt(dtype, data[col],
dta_version=117,
force_strl=force_strl)
self.fmtlist.append(fmt)
self.typlist.append(_dtype_to_stata_type_117(dtype, data[col],
force_strl))
| bsd-3-clause |
olologin/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 149 | 7173 | # Authors: Tom Dupre la Tour <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
beepee14/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
dpo/nlpy | nlpy/optimize/solvers/nlpy_regqp.py | 3 | 6275 | #!/usr/bin/env python
from nlpy import __version__
from nlpy.model import SlackFramework
from nlpy.optimize.solvers.cqp import RegQPInteriorPointSolver
from nlpy.optimize.solvers.cqp import RegQPInteriorPointSolver3x3
from nlpy.tools.norms import norm2
from nlpy.tools.timing import cputime
from optparse import OptionParser
import numpy
import os
import sys
import logging
# Create root logger.
log = logging.getLogger('cqp')
log.setLevel(logging.INFO)
fmt = logging.Formatter('%(name)-10s %(levelname)-8s %(message)s')
hndlr = logging.StreamHandler(sys.stdout)
hndlr.setFormatter(fmt)
log.addHandler(hndlr)
# Configure the solver logger.
sublogger = logging.getLogger('cqp.solver')
sublogger.setLevel(logging.INFO)
sublogger.addHandler(hndlr)
sublogger.propagate = False
usage_msg = """%prog [options] problem1 [... problemN]
where problem1 through problemN represent convex quadratic programs."""
# Define formats for output table.
hdrfmt = '%-15s %5s %15s %7s %7s %7s %6s %6s %4s'
hdr = hdrfmt % ('Name', 'Iter', 'Objective', 'pResid', 'dResid',
'Gap', 'Setup', 'Solve', 'Stat')
fmt = '%-15s %5d %15.8e %7.1e %7.1e %7.1e %6.2f %6.2f %4s'
# Define allowed command-line options
parser = OptionParser(usage=usage_msg, version='%prog version ' + __version__)
# File name options
parser.add_option("-i", "--iter", action="store", type="int", default=None,
dest="maxiter", help="Specify maximum number of iterations")
parser.add_option("-t", "--tol", action="store", type="float", default=None,
dest="tol", help="Specify relative stopping tolerance")
parser.add_option("-p", "--regpr", action="store", type="float", default=None,
dest="regpr", help="Specify initial primal regularization parameter")
parser.add_option("-d", "--regdu", action="store", type="float", default=None,
dest="regdu", help="Specify initial dual regularization parameter")
parser.add_option("-S", "--no-scale", action="store_true",
dest="no_scale", default=False, help="Turn off problem scaling")
parser.add_option("-3", "--3x3", action="store_true",
dest="sys3x3", default=False, help="Use 3x3 block linear system")
parser.add_option("-l", "--long-step", action="store_true", default=False,
dest="longstep", help="Use long-step method")
parser.add_option("-f", "--assume-feasible", action="store_true",
default=False, dest="assume_feasible",
help="Deactivate infeasibility check")
parser.add_option("-V", "--verbose", action="store_true", default=False,
dest="verbose", help="Set verbose mode")
# Parse command-line options
(options, args) = parser.parse_args()
# Decide which class to instantiate.
if options.sys3x3:
print 'Brave man! Using 3x3 block system!'
Solver = RegQPInteriorPointSolver3x3
else:
Solver = RegQPInteriorPointSolver
opts_init = {}
if options.regpr is not None:
opts_init['regpr'] = options.regpr
if options.regdu is not None:
opts_init['regdu'] = options.regdu
opts_solve = {}
if options.maxiter is not None:
opts_solve['itermax'] = options.maxiter
if options.tol is not None:
opts_solve['tolerance'] = options.tol
# Set printing standards for arrays.
numpy.set_printoptions(precision=3, linewidth=70, threshold=10, edgeitems=2)
multiple_problems = len(args) > 1
if not options.verbose:
log.info(hdr)
log.info('-'*len(hdr))
for probname in args:
t_setup = cputime()
qp = SlackFramework(probname)
t_setup = cputime() - t_setup
# isqp() should be implemented in the near future.
#if not qp.isqp():
# log.info('Problem %s is not a quadratic program\n' % probname)
# qp.close()
# continue
# Pass problem to RegQP.
regqp = Solver(qp,
scale=not options.no_scale,
verbose=options.verbose,
**opts_init)
regqp.solve(PredictorCorrector=not options.longstep,
check_infeasible=not options.assume_feasible,
**opts_solve)
# Display summary line.
probname=os.path.basename(probname)
if probname[-3:] == '.nl': probname = probname[:-3]
if not options.verbose:
log.info(fmt % (probname, regqp.iter, regqp.obj_value,
regqp.pResid, regqp.dResid, regqp.rgap,
t_setup, regqp.solve_time, regqp.short_status))
if regqp.short_status == 'degn':
log.info(' F') # Could not regularize sufficiently.
qp.close()
log.info('-'*len(hdr))
if not multiple_problems:
x = regqp.x[:qp.original_n]
log.info('Final x: %s, |x| = %7.1e' % (repr(x),norm2(x)))
log.info('Final y: %s, |y| = %7.1e' % (repr(regqp.y),norm2(regqp.y)))
log.info('Final z: %s, |z| = %7.1e' % (repr(regqp.z),norm2(regqp.z)))
log.info(regqp.status)
log.info('#Iterations: %-d' % regqp.iter)
log.info('RelResidual: %7.1e' % regqp.kktResid)
log.info('Final cost : %21.15e' % regqp.obj_value)
log.info('Setup time : %6.2fs' % t_setup)
log.info('Solve time : %6.2fs' % regqp.solve_time)
# Plot linear system statistics.
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca()
ax.semilogy(regqp.lres_history)
ax.set_title('LS Relative Residual')
fig2 = plt.figure()
ax2 = fig2.gca()
ax2.semilogy(regqp.derr_history)
ax2.set_title('Direct Error Estimate')
fig3 = plt.figure()
ax3 = fig3.gca()
ax3.semilogy([cond[0] for cond in regqp.cond_history], label='K1')
ax3.semilogy([cond[1] for cond in regqp.cond_history], label='K2')
ax3.legend(loc='upper left')
ax3.set_title('Condition number estimates of Arioli, Demmel, Duff')
fig4 = plt.figure()
ax4 = fig4.gca()
ax4.semilogy([berr[0] for berr in regqp.berr_history], label='bkwrd err1')
ax4.semilogy([berr[1] for berr in regqp.berr_history], label='bkwrd err2')
ax4.legend(loc='upper left')
ax4.set_title('Backward Error Estimates of Arioli, Demmel, Duff')
fig5 = plt.figure()
ax5 = fig5.gca()
ax5.semilogy([nrm[0] for nrm in regqp.nrms_history], label='Matrix norm')
ax5.semilogy([nrm[1] for nrm in regqp.nrms_history], label='Solution norm')
ax5.legend(loc='upper left')
ax5.set_title('Infinity Norm Estimates')
plt.show()
| gpl-3.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/spines.py | 11 | 18621 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
from matplotlib import docstring
import matplotlib.transforms as mtransforms
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.path as mpath
import matplotlib.cbook as cbook
import numpy as np
import warnings
class Spine(mpatches.Patch):
"""an axis spine -- the line noting the data area boundaries
Spines are the lines connecting the axis tick marks and noting the
boundaries of the data area. They can be placed at arbitrary
positions. See function:`~matplotlib.spines.Spine.set_position`
for more information.
The default position is ``('outward',0)``.
Spines are subclasses of class:`~matplotlib.patches.Patch`, and
inherit much of their behavior.
Spines draw a line or a circle, depending if
function:`~matplotlib.spines.Spine.set_patch_line` or
function:`~matplotlib.spines.Spine.set_patch_circle` has been
called. Line-like is the default.
"""
def __str__(self):
return "Spine"
@docstring.dedent_interpd
def __init__(self, axes, spine_type, path, **kwargs):
"""
- *axes* : the Axes instance containing the spine
- *spine_type* : a string specifying the spine type
- *path* : the path instance used to draw the spine
Valid kwargs are:
%(Patch)s
"""
super(Spine, self).__init__(**kwargs)
self.axes = axes
self.set_figure(self.axes.figure)
self.spine_type = spine_type
self.set_facecolor('none')
self.set_edgecolor(rcParams['axes.edgecolor'])
self.set_linewidth(rcParams['axes.linewidth'])
self.set_capstyle('projecting')
self.axis = None
self.set_zorder(2.5)
self.set_transform(self.axes.transData) # default transform
self._bounds = None # default bounds
self._smart_bounds = False
# Defer initial position determination. (Not much support for
# non-rectangular axes is currently implemented, and this lets
# them pass through the spines machinery without errors.)
self._position = None
assert isinstance(path, matplotlib.path.Path)
self._path = path
# To support drawing both linear and circular spines, this
# class implements Patch behavior two ways. If
# self._patch_type == 'line', behave like a mpatches.PathPatch
# instance. If self._patch_type == 'circle', behave like a
# mpatches.Ellipse instance.
self._patch_type = 'line'
# Behavior copied from mpatches.Ellipse:
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = mtransforms.IdentityTransform()
def set_smart_bounds(self, value):
"""set the spine and associated axis to have smart bounds"""
self._smart_bounds = value
# also set the axis if possible
if self.spine_type in ('left', 'right'):
self.axes.yaxis.set_smart_bounds(value)
elif self.spine_type in ('top', 'bottom'):
self.axes.xaxis.set_smart_bounds(value)
def get_smart_bounds(self):
"""get whether the spine has smart bounds"""
return self._smart_bounds
def set_patch_circle(self, center, radius):
"""set the spine to be circular"""
self._patch_type = 'circle'
self._center = center
self._width = radius * 2
self._height = radius * 2
self._angle = 0
# circle drawn on axes transform
self.set_transform(self.axes.transAxes)
def set_patch_line(self):
"""set the spine to be linear"""
self._patch_type = 'line'
# Behavior copied from mpatches.Ellipse:
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
assert self._patch_type == 'circle'
center = (self.convert_xunits(self._center[0]),
self.convert_yunits(self._center[1]))
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
self._patch_transform = mtransforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self._angle) \
.translate(*center)
def get_patch_transform(self):
if self._patch_type == 'circle':
self._recompute_transform()
return self._patch_transform
else:
return super(Spine, self).get_patch_transform()
def get_path(self):
return self._path
def _ensure_position_is_set(self):
if self._position is None:
# default position
self._position = ('outward', 0.0) # in points
self.set_position(self._position)
def register_axis(self, axis):
"""register an axis
An axis should be registered with its corresponding spine from
the Axes instance. This allows the spine to clear any axis
properties when needed.
"""
self.axis = axis
if self.axis is not None:
self.axis.cla()
def cla(self):
"""Clear the current spine"""
self._position = None # clear position
if self.axis is not None:
self.axis.cla()
def is_frame_like(self):
"""return True if directly on axes frame
This is useful for determining if a spine is the edge of an
old style MPL plot. If so, this function will return True.
"""
self._ensure_position_is_set()
position = self._position
if cbook.is_string_like(position):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
assert len(position) == 2, "position should be 2-tuple"
position_type, amount = position
if position_type == 'outward' and amount == 0:
return True
else:
return False
def _adjust_location(self):
"""automatically set spine bounds to the view interval"""
if self.spine_type == 'circle':
return
if self._bounds is None:
if self.spine_type in ('left', 'right'):
low, high = self.axes.viewLim.intervaly
elif self.spine_type in ('top', 'bottom'):
low, high = self.axes.viewLim.intervalx
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
if self._smart_bounds:
# attempt to set bounds in sophisticated way
if low > high:
# handle inverted limits
low, high = high, low
viewlim_low = low
viewlim_high = high
del low, high
if self.spine_type in ('left', 'right'):
datalim_low, datalim_high = self.axes.dataLim.intervaly
ticks = self.axes.get_yticks()
elif self.spine_type in ('top', 'bottom'):
datalim_low, datalim_high = self.axes.dataLim.intervalx
ticks = self.axes.get_xticks()
# handle inverted limits
ticks = list(ticks)
ticks.sort()
ticks = np.array(ticks)
if datalim_low > datalim_high:
datalim_low, datalim_high = datalim_high, datalim_low
if datalim_low < viewlim_low:
# Data extends past view. Clip line to view.
low = viewlim_low
else:
# Data ends before view ends.
cond = (ticks <= datalim_low) & (ticks >= viewlim_low)
tickvals = ticks[cond]
if len(tickvals):
# A tick is less than or equal to lowest data point.
low = tickvals[-1]
else:
# No tick is available
low = datalim_low
low = max(low, viewlim_low)
if datalim_high > viewlim_high:
# Data extends past view. Clip line to view.
high = viewlim_high
else:
# Data ends before view ends.
cond = (ticks >= datalim_high) & (ticks <= viewlim_high)
tickvals = ticks[cond]
if len(tickvals):
# A tick is greater than or equal to highest data
# point.
high = tickvals[0]
else:
# No tick is available
high = datalim_high
high = min(high, viewlim_high)
else:
low, high = self._bounds
v1 = self._path.vertices
assert v1.shape == (2, 2), 'unexpected vertices shape'
if self.spine_type in ['left', 'right']:
v1[0, 1] = low
v1[1, 1] = high
elif self.spine_type in ['bottom', 'top']:
v1[0, 0] = low
v1[1, 0] = high
else:
raise ValueError('unable to set bounds for spine "%s"' %
self.spine_type)
@allow_rasterization
def draw(self, renderer):
self._adjust_location()
return super(Spine, self).draw(renderer)
def _calc_offset_transform(self):
"""calculate the offset transform performed by the spine"""
self._ensure_position_is_set()
position = self._position
if cbook.is_string_like(position):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
assert len(position) == 2, "position should be 2-tuple"
position_type, amount = position
assert position_type in ('axes', 'outward', 'data')
if position_type == 'outward':
if amount == 0:
# short circuit commonest case
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif self.spine_type in ['left', 'right', 'top', 'bottom']:
offset_vec = {'left': (-1, 0),
'right': (1, 0),
'bottom': (0, -1),
'top': (0, 1),
}[self.spine_type]
# calculate x and y offset in dots
offset_x = amount * offset_vec[0] / 72.0
offset_y = amount * offset_vec[1] / 72.0
self._spine_transform = ('post',
mtransforms.ScaledTranslation(
offset_x,
offset_y,
self.figure.dpi_scale_trans))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif position_type == 'axes':
if self.spine_type in ('left', 'right'):
self._spine_transform = ('pre',
mtransforms.Affine2D.from_values(
# keep y unchanged, fix x at
# amount
0, 0, 0, 1, amount, 0))
elif self.spine_type in ('bottom', 'top'):
self._spine_transform = ('pre',
mtransforms.Affine2D.from_values(
# keep x unchanged, fix y at
# amount
1, 0, 0, 0, 0, amount))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif position_type == 'data':
if self.spine_type in ('right', 'top'):
# The right and top spines have a default position of 1 in
# axes coordinates. When specifying the position in data
# coordinates, we need to calculate the position relative to 0.
amount -= 1
if self.spine_type in ('left', 'right'):
self._spine_transform = ('data',
mtransforms.Affine2D().translate(
amount, 0))
elif self.spine_type in ('bottom', 'top'):
self._spine_transform = ('data',
mtransforms.Affine2D().translate(
0, amount))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
def set_position(self, position):
"""set the position of the spine
Spine position is specified by a 2 tuple of (position type,
amount). The position types are:
* 'outward' : place the spine out from the data area by the
specified number of points. (Negative values specify placing the
spine inward.)
* 'axes' : place the spine at the specified Axes coordinate (from
0.0-1.0).
* 'data' : place the spine at the specified data coordinate.
Additionally, shorthand notations define a special positions:
* 'center' -> ('axes',0.5)
* 'zero' -> ('data', 0.0)
"""
if position in ('center', 'zero'):
# special positions
pass
else:
assert len(position) == 2, "position should be 'center' or 2-tuple"
assert position[0] in ['outward', 'axes', 'data']
self._position = position
self._calc_offset_transform()
self.set_transform(self.get_spine_transform())
if self.axis is not None:
self.axis.reset_ticks()
def get_position(self):
"""get the spine position"""
self._ensure_position_is_set()
return self._position
def get_spine_transform(self):
"""get the spine transform"""
self._ensure_position_is_set()
what, how = self._spine_transform
if what == 'data':
# special case data based spine locations
data_xform = self.axes.transScale + \
(how + self.axes.transLimits + self.axes.transAxes)
if self.spine_type in ['left', 'right']:
result = mtransforms.blended_transform_factory(
data_xform, self.axes.transData)
elif self.spine_type in ['top', 'bottom']:
result = mtransforms.blended_transform_factory(
self.axes.transData, data_xform)
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
return result
if self.spine_type in ['left', 'right']:
base_transform = self.axes.get_yaxis_transform(which='grid')
elif self.spine_type in ['top', 'bottom']:
base_transform = self.axes.get_xaxis_transform(which='grid')
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
if what == 'identity':
return base_transform
elif what == 'post':
return base_transform + how
elif what == 'pre':
return how + base_transform
else:
raise ValueError("unknown spine_transform type: %s" % what)
def set_bounds(self, low, high):
"""Set the bounds of the spine."""
if self.spine_type == 'circle':
raise ValueError(
'set_bounds() method incompatible with circular spines')
self._bounds = (low, high)
def get_bounds(self):
"""Get the bounds of the spine."""
return self._bounds
@classmethod
def linear_spine(cls, axes, spine_type, **kwargs):
"""
(staticmethod) Returns a linear :class:`Spine`.
"""
# all values of 13 get replaced upon call to set_bounds()
if spine_type == 'left':
path = mpath.Path([(0.0, 13), (0.0, 13)])
elif spine_type == 'right':
path = mpath.Path([(1.0, 13), (1.0, 13)])
elif spine_type == 'bottom':
path = mpath.Path([(13, 0.0), (13, 0.0)])
elif spine_type == 'top':
path = mpath.Path([(13, 1.0), (13, 1.0)])
else:
raise ValueError('unable to make path for spine "%s"' % spine_type)
result = cls(axes, spine_type, path, **kwargs)
return result
@classmethod
def circular_spine(cls, axes, center, radius, **kwargs):
"""
(staticmethod) Returns a circular :class:`Spine`.
"""
path = mpath.Path.unit_circle()
spine_type = 'circle'
result = cls(axes, spine_type, path, **kwargs)
result.set_patch_circle(center, radius)
return result
def set_color(self, c):
"""
Set the edgecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
# The facecolor of a spine is always 'none' by default -- let
# the user change it manually if desired.
self.set_edgecolor(c)
| mit |
JsNoNo/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
phobson/statsmodels | statsmodels/examples/ex_kde_normalreference.py | 34 | 1704 | # -*- coding: utf-8 -*-
"""
Author: Padarn Wilson
Performance of normal reference plug-in estimator vs silverman. Sample is drawn
from a mixture of gaussians. Distribution has been chosen to be reasoanbly close
to normal.
"""
from __future__ import print_function
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.nonparametric.api as npar
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.distributions.mixture_rvs import mixture_rvs
# example from test_kde.py mixture of two normal distributions
np.random.seed(12345)
x = mixture_rvs([.1, .9], size=200, dist=[stats.norm, stats.norm],
kwargs=(dict(loc=0, scale=.5), dict(loc=1, scale=.5)))
kde = npar.KDEUnivariate(x)
kernel_names = ['Gaussian', 'Epanechnikov', 'Biweight',
'Triangular', 'Triweight', 'Cosine'
]
kernel_switch = ['gau', 'epa', 'tri', 'biw',
'triw', 'cos'
]
def true_pdf(x):
pdf = 0.1 * stats.norm.pdf(x, loc=0, scale=0.5)
pdf += 0.9 * stats.norm.pdf(x, loc=1, scale=0.5)
return pdf
fig = plt.figure()
for ii, kn in enumerate(kernel_switch):
ax = fig.add_subplot(2, 3, ii + 1) # without uniform
ax.hist(x, bins=20, normed=True, alpha=0.25)
kde.fit(kernel=kn, bw='silverman', fft=False)
ax.plot(kde.support, kde.density)
kde.fit(kernel=kn, bw='normal_reference', fft=False)
ax.plot(kde.support, kde.density)
ax.plot(kde.support, true_pdf(kde.support), color='black', linestyle='--')
ax.set_title(kernel_names[ii])
ax.legend(['silverman', 'normal reference', 'true pdf'], loc='lower right')
ax.set_title('200 points')
plt.show()
| bsd-3-clause |
anastasiyabel/functional_chromosome_interactions | code/normalize_epigenetic_tracks.py | 1 | 2165 | import sys
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
import seaborn
import pandas as pd
import pickle
from common import parse_config
"""This script normalizes the feature matrix of counts for each chromosome by the mean and standard deviation across the whole genome."""
def get_filtered_chipseq(chr, blacklist):
df_chipseq = pd.read_csv(config["CHIPSEQ_OUT_DIR"] + 'features_matrix_chr' + str(chr) + '.csv', index_col = 0)
# get all blacklisted loccations
blacklist_chr = blacklist[chr]
# get a list of columns to keep
allcols = set(map(int,df_chipseq.columns))
cols2keep = allcols - blacklist_chr
df_chipseq_filt = df_chipseq[list(map(str,cols2keep))]
return df_chipseq_filt
def get_mean_std():
blacklist = pickle.load(open(config["HIC_FILT_DIR"] + 'blacklist.pickle', 'rb'))
# collect chipseq data across all chromosomes into one dataframe
df_all = pd.DataFrame()
chr_list = config["chrs"]
for chr in chr_list:
df_chipseq_filt = get_filtered_chipseq(chr, blacklist)
df_all = pd.concat([df_all, df_chipseq_filt],axis=1)
# transform
df_all = np.log(df_all + 1)
# find mean and standard dev
mean_features = np.mean(df_all, axis =1)
std_features = np.std(df_all, axis=1)
return mean_features, std_features
def normalize_chipseq(mean_features, std_features):
chr_list = config["chrs"]
for chr in chr_list:
# get chipseq data
df_chipseq = pd.read_csv(config["CHIPSEQ_OUT_DIR"] + 'features_matrix_chr' + str(chr) + '.csv', index_col = 0)
# transform
df_chipseq = np.log(df_chipseq + 1)
# normalize
df_norm = (df_chipseq.T - mean_features)/std_features
# transpose back
df_norm = df_norm.T
# save
df_norm.to_csv(config["CHIPSEQ_OUT_DIR"] + 'features_matrix_chr' + str(chr) + 'norm.csv')
def main():
global config
config_fn = sys.argv[1]
config = parse_config(config_fn)
mean_features, std_features = get_mean_std()
normalize_chipseq(mean_features, std_features)
if __name__ == "__main__":
main() | mit |
mmottahedi/neuralnilm_prototype | scripts/e122.py | 2 | 3859 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(1)
"""
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': LSTMLayer,
'num_units': 5,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=5000)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
| mit |
terkkila/scikit-learn | sklearn/datasets/svmlight_format.py | 114 | 15826 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
zak-k/iris | docs/iris/example_code/General/anomaly_log_colouring.py | 6 | 4533 | """
Colouring anomaly data with logarithmic scaling
===============================================
In this example, we need to plot anomaly data where the values have a
"logarithmic" significance -- i.e. we want to give approximately equal ranges
of colour between data values of, say, 1 and 10 as between 10 and 100.
As the data range also contains zero, that obviously does not suit a simple
logarithmic interpretation. However, values of less than a certain absolute
magnitude may be considered "not significant", so we put these into a separate
"zero band" which is plotted in white.
To do this, we create a custom value mapping function (normalization) using
the matplotlib Norm class `matplotlib.colours.SymLogNorm
<http://matplotlib.org/api/colors_api.html#matplotlib.colors.SymLogNorm>`_.
We use this to make a cell-filled pseudocolour plot with a colorbar.
NOTE: By "pseudocolour", we mean that each data point is drawn as a "cell"
region on the plot, coloured according to its data value.
This is provided in Iris by the functions :meth:`iris.plot.pcolor` and
:meth:`iris.plot.pcolormesh`, which call the underlying matplotlib
functions of the same names (i.e. `matplotlib.pyplot.pcolor
<http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pcolor>`_
and `matplotlib.pyplot.pcolormesh
<http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pcolormesh>`_).
See also: http://en.wikipedia.org/wiki/False_color#Pseudocolor.
"""
import cartopy.crs as ccrs
import iris
import iris.coord_categorisation
import iris.plot as iplt
import matplotlib.pyplot as plt
import matplotlib.colors as mcols
def main():
# Enable a future option, to ensure that the netcdf load works the same way
# as in future Iris versions.
iris.FUTURE.netcdf_promote = True
# Load a sample air temperatures sequence.
file_path = iris.sample_data_path('E1_north_america.nc')
temperatures = iris.load_cube(file_path)
# Create a year-number coordinate from the time information.
iris.coord_categorisation.add_year(temperatures, 'time')
# Create a sample anomaly field for one chosen year, by extracting that
# year and subtracting the time mean.
sample_year = 1982
year_temperature = temperatures.extract(iris.Constraint(year=sample_year))
time_mean = temperatures.collapsed('time', iris.analysis.MEAN)
anomaly = year_temperature - time_mean
# Construct a plot title string explaining which years are involved.
years = temperatures.coord('year').points
plot_title = 'Temperature anomaly'
plot_title += '\n{} differences from {}-{} average.'.format(
sample_year, years[0], years[-1])
# Define scaling levels for the logarithmic colouring.
minimum_log_level = 0.1
maximum_scale_level = 3.0
# Use a standard colour map which varies blue-white-red.
# For suitable options, see the 'Diverging colormaps' section in:
# http://matplotlib.org/examples/color/colormaps_reference.html
anom_cmap = 'bwr'
# Create a 'logarithmic' data normalization.
anom_norm = mcols.SymLogNorm(linthresh=minimum_log_level,
linscale=0,
vmin=-maximum_scale_level,
vmax=maximum_scale_level)
# Setting "linthresh=minimum_log_level" makes its non-logarithmic
# data range equal to our 'zero band'.
# Setting "linscale=0" maps the whole zero band to the middle colour value
# (i.e. 0.5), which is the neutral point of a "diverging" style colormap.
# Create an Axes, specifying the map projection.
plt.axes(projection=ccrs.LambertConformal())
# Make a pseudocolour plot using this colour scheme.
mesh = iplt.pcolormesh(anomaly, cmap=anom_cmap, norm=anom_norm)
# Add a colourbar, with extensions to show handling of out-of-range values.
bar = plt.colorbar(mesh, orientation='horizontal', extend='both')
# Set some suitable fixed "logarithmic" colourbar tick positions.
tick_levels = [-3, -1, -0.3, 0.0, 0.3, 1, 3]
bar.set_ticks(tick_levels)
# Modify the tick labels so that the centre one shows "+/-<minumum-level>".
tick_levels[3] = r'$\pm${:g}'.format(minimum_log_level)
bar.set_ticklabels(tick_levels)
# Label the colourbar to show the units.
bar.set_label('[{}, log scale]'.format(anomaly.units))
# Add coastlines and a title.
plt.gca().coastlines()
plt.title(plot_title)
# Display the result.
iplt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
russellnibbelink/cycsv | FCOexcel.py | 1 | 2386 | import pandas
import random
from openpyxl import load_workbook
import time
start_time = time.time()
###----------CONSTANTS------------###
template = 'Output Template 2.8.4V.xlsx'
output = 'Output Template 2.8.4V Filled.xlsx'
XLSXengine = 'openpyxl'
output_sheets = ["Data-Front End", "Data-Reactor", "Data-Recycle", "Data-Inventories", "Data-Economics", "Data-Wildcard"]
metrics = {
"U Resources Mined": [5,2, "Data-Front End"],
}
###----------FUNCTIONS------------###
#initialize the excel write, returns the write object
def init(tpl = template, out = output, eng = XLSXengine, sheet = output_sheets):
book = load_workbook(tpl)
writer = pandas.ExcelWriter(out, engine = eng)
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
return writer
#exports the excel sheet
def export(writer_object):
writer_object.save()
#writes dataframe to excel spreadsheet
def toExcel(dFrame, writer_object, worksheet, col_to_write, row, col):
dFrame.to_excel(writer_object, worksheet, columns = col_to_write, header = False, index = False, index_label = None, merge_cells = False, engine = XLSXengine, startrow = row, startcol = col)
#writes metric
def writeMetric(metric, dFrame):
row = metrics[metric][0]
col = metrics[metric][1]
sheet = metrics[metric][2]
toExcel(dFrame, sheet, [0,1,2,3], row, col)
#creates a random matrix for testing purposes
def matrix(h, w):
hor = []
ver = []
for j in range(0, h):
for i in range(0, w):
ver.append(random.randrange(0,h))
hor.append(ver)
ver = []
return hor
m = matrix(200, 4)
#print(m)
df = pandas.DataFrame(m)
#print(df)
#book = load_workbook('Output Template 2.8.4V.xlsx')
#writer = pandas.ExcelWriter('Output Template 2.8.4V Filled.xlsx', engine ='openpyxl')
#writer.book = book
#writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
#print(writer.sheets)
#df.to_excel(writer, "Data-Front End", columns=[0,1,2,3], header = False, index = False, index_label=None, merge_cells=False, engine = 'openpyxl', startrow = 5, startcol = 2)
#this will write starting at the top left corner.
#writer.save()
elapsed_time = time.time() - start_time
print(elapsed_time)
##todo: write a python function convert a and aa and ab to their respective index numbers
##write forloop to write multiple dataframes to exel | bsd-3-clause |
niltonlk/nest-simulator | pynest/examples/spatial/grid_iaf_oc.py | 8 | 1781 | # -*- coding: utf-8 -*-
#
# grid_iaf_oc.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create three populations of iaf_psc_alpha neurons on a 4x3 grid, each with different center
-------------------------------------------------------------------------------------------
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
import numpy as np
for ctr in [(0.0, 0.0), (-2.0, 2.0), (0.5, 1.0)]:
plt.figure()
nest.ResetKernel()
l1 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[4, 3], extent=[2., 1.5],
center=ctr))
nest.PlotLayer(l1, nodesize=50, fig=plt.gcf())
# beautify
plt.axis([-3, 3, -3, 3])
plt.axes().set_aspect('equal', 'box')
plt.axes().set_xticks(np.arange(-3.0, 3.1, 1.0))
plt.axes().set_yticks(np.arange(-3.0, 3.1, 1.0))
plt.grid(True)
plt.xlabel(f'4 Columns, Extent: 1.5, Center: {ctr[0]:.1f}')
plt.ylabel(f'2 Rows, Extent: 1.0, Center: {ctr[1]:.1f}')
plt.show()
# plt.savefig('grid_iaf_oc_{}_{}.png'.format(ctr[0], ctr[1]))
| gpl-2.0 |
Eric89GXL/scikit-learn | examples/linear_model/plot_sgd_weighted_classes.py | 9 | 1431 | """
================================================
SGD: Separating hyperplane with weighted classes
================================================
Fit linear SVMs with and without class weighting.
Allows to handle problems with unbalanced classes.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.linear_model import SGDClassifier
# we create 40 separable points
np.random.seed(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * np.random.randn(n_samples_1, 2),
0.5 * np.random.randn(n_samples_2, 2) + [2, 2]]
y = np.array([0] * (n_samples_1) + [1] * (n_samples_2), dtype=np.float64)
idx = np.arange(y.shape[0])
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# fit the model and get the separating hyperplane
clf = SGDClassifier(n_iter=100, alpha=0.01)
clf.fit(X, y)
w = clf.coef_.ravel()
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_ / w[1]
# get the separating hyperplane using weighted classes
wclf = SGDClassifier(n_iter=100, alpha=0.01, class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_.ravel()
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_ / ww[1]
# plot separating hyperplanes and samples
h0 = pl.plot(xx, yy, 'k-', label='no weights')
h1 = pl.plot(xx, wyy, 'k--', label='with weights')
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=pl.cm.Paired)
pl.legend()
pl.axis('tight')
pl.show()
| bsd-3-clause |
ibab/tensorflow | tensorflow/examples/skflow/out_of_core_data_classification.py | 9 | 2194 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics, cross_validation
import pandas as pd
import dask.dataframe as dd
from tensorflow.contrib import learn
# Sometimes when your dataset is too large to hold in the memory
# you may want to load it into a out-of-core dataframe as provided by dask library
# to firstly draw sample batches and then load into memory for training.
# Load dataset.
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
# Note that we use iris here just for demo purposes
# You can load your own large dataset into a out-of-core dataframe
# using dask's methods, e.g. read_csv() in dask
# details please see: http://dask.pydata.org/en/latest/dataframe.html
# We firstly load them into pandas dataframe and then convert into dask dataframe
X_train, y_train, X_test, y_test = [pd.DataFrame(data) for data in [X_train, y_train, X_test, y_test]]
X_train, y_train, X_test, y_test = [dd.from_pandas(data, npartitions=2) for data in [X_train, y_train, X_test, y_test]]
# Initialize a TensorFlow linear classifier
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
# Fit the model using training set
classifier.fit(X_train, y_train)
# Make predictions on each partitions of testing data
predictions = X_test.map_partitions(classifier.predict).compute()
# Calculate accuracy
score = metrics.accuracy_score(y_test.compute(), predictions)
| apache-2.0 |
DougBurke/astropy | astropy/convolution/kernels.py | 2 | 32546 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import math
import numpy as np
from .core import Kernel1D, Kernel2D, Kernel
from .utils import KernelSizeError
from ..modeling import models
from ..modeling.core import Fittable1DModel, Fittable2DModel
from ..utils.decorators import deprecated_renamed_argument
__all__ = ['Gaussian1DKernel', 'Gaussian2DKernel', 'CustomKernel',
'Box1DKernel', 'Box2DKernel', 'Tophat2DKernel',
'Trapezoid1DKernel', 'MexicanHat1DKernel', 'MexicanHat2DKernel',
'AiryDisk2DKernel', 'Moffat2DKernel', 'Model1DKernel',
'Model2DKernel', 'TrapezoidDisk2DKernel', 'Ring2DKernel']
def _round_up_to_odd_integer(value):
i = math.ceil(value)
if i % 2 == 0:
return i + 1
else:
return i
class Gaussian1DKernel(Kernel1D):
"""
1D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
Parameters
----------
stddev : number
Standard deviation of the Gaussian kernel.
x_size : odd int, optional
Size of the kernel array. Default = 8 * stddev
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin. Very slow.
factor : number, optional
Factor of oversampling. Default factor = 10. If the factor
is too large, evaluation can be very slow.
See Also
--------
Box1DKernel, Trapezoid1DKernel, MexicanHat1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian1DKernel
gauss_1D_kernel = Gaussian1DKernel(10)
plt.plot(gauss_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = False
def __init__(self, stddev, **kwargs):
self._model = models.Gaussian1D(1. / (np.sqrt(2 * np.pi) * stddev),
0, stddev)
self._default_size = _round_up_to_odd_integer(8 * stddev)
super().__init__(**kwargs)
self._truncation = np.abs(1. - self._array.sum())
class Gaussian2DKernel(Kernel2D):
"""
2D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
Parameters
----------
x_stddev : float
Standard deviation of the Gaussian in x before rotating by theta.
y_stddev : float
Standard deviation of the Gaussian in y before rotating by theta.
theta : float
Rotation angle in radians. The rotation angle increases
counterclockwise.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * stddev.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * stddev.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian2DKernel
gaussian_2D_kernel = Gaussian2DKernel(10)
plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = False
@deprecated_renamed_argument('stddev', 'x_stddev', '3.0')
def __init__(self, x_stddev, y_stddev=None, theta=0.0, **kwargs):
if y_stddev is None:
y_stddev = x_stddev
self._model = models.Gaussian2D(1. / (2 * np.pi * x_stddev * y_stddev),
0, 0, x_stddev=x_stddev,
y_stddev=y_stddev, theta=theta)
self._default_size = _round_up_to_odd_integer(
8 * np.max([x_stddev, y_stddev]))
super().__init__(**kwargs)
self._truncation = np.abs(1. - self._array.sum())
class Box1DKernel(Kernel1D):
"""
1D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifacts, when applied repeatedly to the same data.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2. E.g a Box kernel with an effective
smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5].
Parameters
----------
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'linear_interp' (default)
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian1DKernel, Trapezoid1DKernel, MexicanHat1DKernel
Examples
--------
Kernel response function:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box1DKernel
box_1D_kernel = Box1DKernel(9)
plt.plot(box_1D_kernel, drawstyle='steps')
plt.xlim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box1D(1. / width, 0, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs['mode'] = 'linear_interp'
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class Box2DKernel(Kernel2D):
"""
2D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifact, when applied repeatedly to the same data.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2.
Parameters
----------
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'linear_interp' (default)
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box2DKernel
box_2D_kernel = Box2DKernel(9)
plt.imshow(box_2D_kernel, interpolation='none', origin='lower',
vmin=0.0, vmax=0.015)
plt.xlim(-1, 9)
plt.ylim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box2D(1. / width ** 2, 0, 0, width, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs['mode'] = 'linear_interp'
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class Tophat2DKernel(Kernel2D):
"""
2D Tophat filter kernel.
The Tophat filter is an isotropic smoothing filter. It can produce
artifacts when applied repeatedly on the same data.
Parameters
----------
radius : int
Radius of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, MexicanHat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Tophat2DKernel
tophat_2D_kernel = Tophat2DKernel(40)
plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius, **kwargs):
self._model = models.Disk2D(1. / (np.pi * radius ** 2), 0, 0, radius)
self._default_size = _round_up_to_odd_integer(2 * radius)
super().__init__(**kwargs)
self._truncation = 0
class Ring2DKernel(Kernel2D):
"""
2D Ring filter kernel.
The Ring filter kernel is the difference between two Tophat kernels of
different width. This kernel is useful for, e.g., background estimation.
Parameters
----------
radius_in : number
Inner radius of the ring kernel.
width : number
Width of the ring kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,
Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Ring2DKernel
ring_2D_kernel = Ring2DKernel(9, 8)
plt.imshow(ring_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius_in, width, **kwargs):
radius_out = radius_in + width
self._model = models.Ring2D(1. / (np.pi * (radius_out ** 2 - radius_in ** 2)),
0, 0, radius_in, width)
self._default_size = _round_up_to_odd_integer(2 * radius_out)
super().__init__(**kwargs)
self._truncation = 0
class Trapezoid1DKernel(Kernel1D):
"""
1D trapezoid kernel.
Parameters
----------
width : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, MexicanHat1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Trapezoid1DKernel
trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2)
plt.plot(trapezoid_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('amplitude')
plt.xlim(-1, 28)
plt.show()
"""
_is_bool = False
def __init__(self, width, slope=1., **kwargs):
self._model = models.Trapezoid1D(1, 0, width, slope)
self._default_size = _round_up_to_odd_integer(width + 2. / slope)
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class TrapezoidDisk2DKernel(Kernel2D):
"""
2D trapezoid kernel.
Parameters
----------
radius : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,
Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import TrapezoidDisk2DKernel
trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2)
plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, slope=1., **kwargs):
self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope)
self._default_size = _round_up_to_odd_integer(2 * radius + 2. / slope)
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class MexicanHat1DKernel(Kernel1D):
"""
1D Mexican hat filter kernel.
The Mexican Hat, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smoothes the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The
normalization is the same as for `scipy.ndimage.gaussian_laplace`,
except for a minus sign.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import MexicanHat1DKernel
mexicanhat_1D_kernel = MexicanHat1DKernel(10)
plt.plot(mexicanhat_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_is_bool = True
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.sqrt(2 * np.pi) * width ** 3)
self._model = models.MexicanHat1D(amplitude, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
self._truncation = np.abs(self._array.sum() / self._array.size)
class MexicanHat2DKernel(Kernel2D):
"""
2D Mexican hat filter kernel.
The Mexican Hat, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smoothes the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (pi * width ** 4). The normalization
is the same as for `scipy.ndimage.gaussian_laplace`, except
for a minus sign.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * width.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import MexicanHat2DKernel
mexicanhat_2D_kernel = MexicanHat2DKernel(10)
plt.imshow(mexicanhat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.pi * width ** 4)
self._model = models.MexicanHat2D(amplitude, 0, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
self._truncation = np.abs(self._array.sum() / self._array.size)
class AiryDisk2DKernel(Kernel2D):
"""
2D Airy disk kernel.
This kernel models the diffraction pattern of a circular aperture. This
kernel is normalized to a peak value of 1.
Parameters
----------
radius : float
The radius of the Airy disk kernel (radius of the first zero).
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * radius.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * radius.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import AiryDisk2DKernel
airydisk_2D_kernel = AiryDisk2DKernel(10)
plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, **kwargs):
self._model = models.AiryDisk2D(1, 0, 0, radius)
self._default_size = _round_up_to_odd_integer(8 * radius)
super().__init__(**kwargs)
self.normalize()
self._truncation = None
class Moffat2DKernel(Kernel2D):
"""
2D Moffat kernel.
This kernel is a typical model for a seeing limited PSF.
Parameters
----------
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * radius.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * radius.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Moffat2DKernel
moffat_2D_kernel = Moffat2DKernel(3, 2)
plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, gamma, alpha, **kwargs):
self._model = models.Moffat2D((gamma - 1.0) / (np.pi * alpha * alpha),
0, 0, gamma, alpha)
fwhm = 2.0 * alpha * (2.0 ** (1.0 / gamma) - 1.0) ** 0.5
self._default_size = _round_up_to_odd_integer(4.0 * fwhm)
super().__init__(**kwargs)
self.normalize()
self._truncation = None
class Model1DKernel(Kernel1D):
"""
Create kernel from 1D model.
The model has to be centered on x = 0.
Parameters
----------
model : `~astropy.modeling.Fittable1DModel`
Kernel response function model
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable1DModel`
See also
--------
Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian1D model:
>>> from astropy.modeling.models import Gaussian1D
>>> from astropy.convolution.kernels import Model1DKernel
>>> gauss = Gaussian1D(1, 0, 2)
And create a custom one dimensional kernel from it:
>>> gauss_kernel = Model1DKernel(gauss, x_size=9)
This kernel can now be used like a usual Astropy kernel.
"""
_separable = False
_is_bool = False
def __init__(self, model, **kwargs):
if isinstance(model, Fittable1DModel):
self._model = model
else:
raise TypeError("Must be Fittable1DModel")
super().__init__(**kwargs)
class Model2DKernel(Kernel2D):
"""
Create kernel from 2D model.
The model has to be centered on x = 0 and y = 0.
Parameters
----------
model : `~astropy.modeling.Fittable2DModel`
Kernel response function model
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * width.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable2DModel`
See also
--------
Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian2D model:
>>> from astropy.modeling.models import Gaussian2D
>>> from astropy.convolution.kernels import Model2DKernel
>>> gauss = Gaussian2D(1, 0, 0, 2, 2)
And create a custom two dimensional kernel from it:
>>> gauss_kernel = Model2DKernel(gauss, x_size=9)
This kernel can now be used like a usual astropy kernel.
"""
_is_bool = False
_separable = False
def __init__(self, model, **kwargs):
self._separable = False
if isinstance(model, Fittable2DModel):
self._model = model
else:
raise TypeError("Must be Fittable2DModel")
super().__init__(**kwargs)
class PSFKernel(Kernel2D):
"""
Initialize filter kernel from astropy PSF instance.
"""
_separable = False
def __init__(self):
raise NotImplementedError('Not yet implemented')
class CustomKernel(Kernel):
"""
Create filter kernel from list or array.
Parameters
----------
array : list or array
Filter kernel array. Size must be odd.
Raises
------
TypeError
If array is not a list or array.
KernelSizeError
If array size is even.
See also
--------
Model2DKernel, Model1DKernel
Examples
--------
Define one dimensional array:
>>> from astropy.convolution.kernels import CustomKernel
>>> import numpy as np
>>> array = np.array([1, 2, 3, 2, 1])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
1
Define two dimensional array:
>>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
2
"""
def __init__(self, array):
self.array = array
super().__init__(self._array)
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
@array.setter
def array(self, array):
"""
Filter kernel array setter
"""
if isinstance(array, np.ndarray):
self._array = array.astype(np.float64)
elif isinstance(array, list):
self._array = np.array(array, dtype=np.float64)
else:
raise TypeError("Must be list or array.")
# Check if array is odd in all axes
odd = all(axes_size % 2 != 0 for axes_size in self.shape)
if not odd:
raise KernelSizeError("Kernel size must be odd in all axes.")
# Check if array is bool
ones = self._array == 1.
zeros = self._array == 0
self._is_bool = bool(np.all(np.logical_or(ones, zeros)))
self._truncation = 0.0
| bsd-3-clause |
ctk3b/mdtraj | mdtraj/html/imagebutton_widget.py | 3 | 1798 | import base64
import matplotlib.pyplot as pp
from six.moves import StringIO
try:
from ipywidgets.widgets import DOMWidget, CallbackDispatcher
from traitlets import Any, Unicode, CUnicode, Bool, Bytes
except ImportError:
from IPython.html.widgets import DOMWidget, CallbackDispatcher
from IPython.utils.traitlets import Any, Unicode, CUnicode, Bool, Bytes
from matplotlib.backends.backend_agg import FigureCanvasAgg
__all__ = ['MPLFigureButton', 'ImageButton']
class ImageButton(DOMWidget):
disabled = Bool(False, help="Enable or disable user changes.", sync=True)
_view_name = Unicode('ImageButtonView', sync=True)
format = Unicode('png', sync=True)
width = CUnicode(sync=True)
height = CUnicode(sync=True)
_b64value = Unicode(sync=True)
value = Bytes()
def _value_changed(self, name, old, new):
self._b64value = base64.b64encode(new)
def __init__(self, **kwargs):
super(ImageButton, self).__init__(**kwargs)
self._click_handlers = CallbackDispatcher()
self.on_msg(self._handle_button_msg)
def on_click(self, callback, remove=False):
self._click_handlers.register_callback(callback, remove=remove)
def _handle_button_msg(self, _, content):
if content.get('event', '') == 'click':
self._click_handlers(self, content)
class MPLFigureButton(ImageButton):
fig = Any()
def __init__(self, **kwargs):
super(MPLFigureButton, self).__init__(**kwargs)
if self.fig is None:
self.fig = pp.gcf()
def _fig_changed(self, name, old, new):
self._sync_b64value(new)
def _sync_b64value(self, figure):
buf = StringIO()
FigureCanvasAgg(figure).print_png(buf)
self._b64value = base64.b64encode(buf.getvalue())
| lgpl-2.1 |
CtheDataIO-sdpenaloza/Kaggle-Titanic-Machine-Learning-from-Disaster | ML-RandomForestRegression/random_forest_regression.py | 1 | 1318 | # Random Forest Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting Random Forest Regression to the dataset
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 500, random_state = 0)
regressor.fit(X, y)
# Predicting a new result
y_pred = regressor.predict(6.5)
# Visualising the Random Forest Regression results (higher resolution)
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Random Forest Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() | gpl-3.0 |
sheliak/sclip | examples.py | 1 | 2232 | from matplotlib import *
from pylab import *
from scipy.optimize import curve_fit
import sclip
import itertools
from mpl_toolkits.mplot3d import Axes3D
################################################## 2D example ##############################################
#this is a function we want to fit
def func(x,a,b):
return a*x+b
#this function does the fitting. The arguments here are all mandatory, but you don't actually have to use the ye. Use mask as shown here!
def myfit(p,ye,mask):
x=p[0]
y=p[1]
popt,pcov = curve_fit(func,x[mask],y[mask],p0=[1,0])
return func(x,popt[0],popt[1])
#generate some data:
x=np.array([0,1,2,3,4,5,6,7,8,9])
y=np.array([0,1.1,2.3,1,3.9,4.8,15,7.2,8.1,9.1])
ye=np.array([0,0,0,1.4,0,0,0,0,0,0])
#fit with sclip:
f,steps,mask=sclip.sclip((x,y),myfit,5,ye,sl=2,su=2,min_data=2,grow=0)
fig=figure('2D example')
#plot data points
errorbar(x,y,ye,fmt='o')
#plot the final fit
plot(x,f,'r-')
#plot rejected points
plot(x[np.invert(mask)],y[np.invert(mask)],'rx',ms=15)
#plot intermediate steps:
for y in steps:
plot(x,y,'b-', alpha=0.3)
############################################# 3D example #########################################
#generate some data:
p=np.array([[1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4],[1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4],[0.7,0.8,0.6,1.1,0.8,2.9,0.8,1.1,1,1,1.2,1.4,2,2.2,2.3,2.6]])
#a function that fits a 2D polynomial
def polyfit2d(x, y, z, order=3):
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m, _, _, _ = np.linalg.lstsq(G, z)
return m
#a function that evaluates the polynomial
def polyval2d(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
z = z+ a * x**i * y**j
return z
#a wrapper that sclip can use.
def wrapper(p,ye,mask):
m=polyfit2d(p[0][mask],p[1][mask],p[2][mask],order=2)
return polyval2d(p[0],p[1],m)
#fit
f=sclip.sclip(p,wrapper,5,ye=[],sl=2)
#plot the results
fig = plt.figure('3D example')
ax = fig.add_subplot(111, projection='3d')
ax.plot(p[0],p[1],p[2],'ko',ms=10)
ax.plot_trisurf(p[0],p[1],f[0],alpha=0.4)
show() | gpl-3.0 |
garibaldu/radioblobs | code/code_2d/elais/count_overlaps_real.py | 1 | 3582 | import numpy as np
import math
import sys
import matplotlib.pyplot as plt
import pylab as pl
from matplotlib.patches import Ellipse
import pyfits
if len(sys.argv) == 4:
row = int(sys.argv[1])
col = int(sys.argv[2])
stub = sys.argv[3]
else:
sys.exit('usage: python %s row col stub' % (sys.argv[0]))
fitsfile = '%s_map.fits' % stub
hdulist = pyfits.open(fitsfile,memmap=True)
z = hdulist[0].data
hdulist.close()
while z.shape[0] == 1: z = z[0]
zsmall=z[row:row+500,col:col+500]
try:
gt = np.genfromtxt('%s_%s/%s_%s_%s_gt_FULL' % (row,col,stub,row,col))
except:
sys.exit()#'no sources here: %s %s' %(row,col) )
gt = gt.clip(min=0)
if len(gt.shape)==1: gt = gt.reshape(1,gt.shape[0])
if int(row) in [2298,2743,2773,3051,6252,6412,6418,6430]:
fnd = np.genfromtxt('%s_%s/DirModel_width_data_K10_cut' % (row,col))
else:
fnd = np.genfromtxt('%s_%s/DirModel_width_data_K10' % (row,col))
if len(fnd.shape)==1: fnd = fnd.reshape(1,fnd.shape[0])
fnd = np.delete(fnd,np.where(fnd[:,5]<=-np.log(0.01/0.99)),0)
TP = 0.
FP = 0.
FN = float(gt.shape[0])
prop = 0.1
foundouts = []
Nx,Ny = 500,500
X,Y = np.meshgrid(np.arange(Nx),np.arange(Ny))
for i in range(fnd.shape[0]):
#print i
if (fnd[i] == [0]*fnd.shape[1]).sum() == fnd.shape[1]: continue
#Ze = np.zeros((Ny,Nx))
#x = (X-fnd[i,1])*math.cos(fnd[i,4])+(Y-fnd[i,0])*math.sin(fnd[i,4])
#y = -(X-fnd[i,1])*math.sin(fnd[i,4])+(Y-fnd[i,0])*math.cos(fnd[i,4])
#a = np.power((1.5*fnd[i,3]),2)
#b = np.power((1.5*fnd[i,2]),2)
#Ze[np.where((np.power(x,2)/a+np.power(y,2)/b) <=1)] = 1
#ly = max(fnd[i,1]-1.5*fnd[i,3],0)
#ry = min(fnd[i,1]+1.5*fnd[i,3],Ny)
#lx = max(fnd[i,0]-1.5*fnd[i,2],0)
#rx = min(fnd[i,0]+1.5*fnd[i,2],Nx)
#Ze[lx:rx,ly:ry] =1
x,y = fnd[i,0],fnd[i,1]
found = False
for j in range(gt.shape[0]):
if (gt[j] == [0]*gt.shape[1]).sum() == gt.shape[1]: continue
Zt = np.zeros((Ny,Nx))
Zt[gt[j,4]:gt[j,5]+1,gt[j,2]:gt[j,3]+1] = 1
xl,xr = gt[j,4],gt[j,5]+1
yl,yr = gt[j,2],gt[j,3]+1
#print np.logical_and(Ze==1, Zt==1).sum()
#if np.logical_and(Ze==1, Zt==1).sum() >= prop*np.sum(Ze==1) and np.logical_and(Ze==1, Zt==1).sum() >= prop*np.sum(Zt==1):
if x>=xl and x<=xr and y>=yl and y<=yr:
TP += 1; FN -= 1; found = True
#print fnd[i]
#print gt[j]
gt[j] = [0]*gt.shape[1]
foundouts.append(i)
break
if not found: FP += 1
print '%s %s %s %s %s' % (TP, FP, FN,TP/(TP+FP),TP/(TP+FN))
#print 'ests: %s; trues: %s; precision: %s; recall: %s' % (fnd.shape[0],gt.shape[0],TP/(TP+FP),TP/(TP+FN))
#outf = '%s_%s/%s_%s_%s_cut' % (row,col,stub,row,col)
#out=np.asarray([TP,FP,FN,fnd.shape[0],gt.shape[0],TP/(TP+FP),TP/(TP+FN)])
#np.savetxt(outf, out)
#plt.clf()
#plt.imshow(zsmall,interpolation='nearest',cmap='gray',origin='lower')
#for i in range(len(foundouts)):
# idx = foundouts[i]
# top_mdx =fnd[idx,0]
# top_mdy =fnd[idx,1]
# top_sigx=fnd[idx,2]
# top_sigy=fnd[idx,3]
# top_phi =fnd[idx,4]
# a = top_sigx; b = top_sigy;
# rect = Ellipse((top_mdx,top_mdy),a*2,b*2,top_phi*(180./math.pi),edgecolor='red',facecolor='green',alpha = 0.5)
# pl.gca().add_patch(rect)
#rect = Rectangle((gt[i,4],sublist[i,2]),sublist[i,5]-sublist[i,4],sublist[i,3]-sublist[i,2], edgecolor='red', facecolor='green', alpha=0.5)
#pl.gca().add_patch(rect)
#plt.xlim(0,500)
#plt.ylim(0,500)
#plt.clim(0,0.0001)
#plt.savefig('%s_%s/%s_%s_%s_ew_TP' % (row,col,stub,row,col))
| gpl-2.0 |
corburn/scikit-bio | setup.py | 6 | 4944 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
import platform
import re
import ast
from setuptools import find_packages, setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext as _build_ext
# Bootstrap setup.py with numpy
# Huge thanks to coldfix's solution
# http://stackoverflow.com/a/21621689/579416
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
# version parsing from __init__ pulled from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('skbio/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
classes = """
Development Status :: 4 - Beta
License :: OSI Approved :: BSD License
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = ('Data structures, algorithms and educational '
'resources for bioinformatics.')
with open('README.rst') as f:
long_description = f.read()
# Dealing with Cython
USE_CYTHON = os.environ.get('USE_CYTHON', False)
ext = '.pyx' if USE_CYTHON else '.c'
# There's a bug in some versions of Python 3.4 that propagates
# -Werror=declaration-after-statement to extensions, instead of just affecting
# the compilation of the interpreter. See http://bugs.python.org/issue21121 for
# details. This acts as a workaround until the next Python 3 release -- thanks
# Wolfgang Maier (wolma) for the workaround!
ssw_extra_compile_args = ['-Wno-error=declaration-after-statement']
# Users with i686 architectures have reported that adding this flag allows
# SSW to be compiled. See https://github.com/biocore/scikit-bio/issues/409 and
# http://stackoverflow.com/q/26211814/3776794 for details.
if platform.machine() == 'i686':
ssw_extra_compile_args.append('-msse2')
extensions = [
Extension("skbio.stats.__subsample",
["skbio/stats/__subsample" + ext]),
Extension("skbio.alignment._ssw_wrapper",
["skbio/alignment/_ssw_wrapper" + ext,
"skbio/alignment/_lib/ssw.c"],
extra_compile_args=ssw_extra_compile_args)
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(name='scikit-bio',
version=version,
license='BSD',
description=description,
long_description=long_description,
author="scikit-bio development team",
author_email="[email protected]",
maintainer="scikit-bio development team",
maintainer_email="[email protected]",
url='http://scikit-bio.org',
test_suite='nose.collector',
packages=find_packages(),
ext_modules=extensions,
cmdclass={'build_ext': build_ext},
setup_requires=['numpy >= 1.9.2'],
install_requires=[
'bz2file >= 0.98',
'CacheControl[FileCache] >= 0.11.5',
'contextlib2 >= 0.4.0',
'decorator >= 3.4.2',
'future >= 0.14.3',
'IPython >= 3.2.0',
'matplotlib >= 1.4.3',
'natsort >= 4.0.3',
'numpy >= 1.9.2',
'pandas >= 0.16.2',
'scipy >= 0.15.1',
'six >= 1.9.0'
],
extras_require={'test': ["HTTPretty", "nose", "pep8", "flake8",
"python-dateutil", "check-manifest"],
'doc': ["Sphinx == 1.2.2", "sphinx-bootstrap-theme"]},
classifiers=classifiers,
package_data={
'skbio.diversity.alpha.tests': ['data/qiime-191-tt/*'],
'skbio.diversity.beta.tests': ['data/qiime-191-tt/*'],
'skbio.io.tests': ['data/*'],
'skbio.io.format.tests': ['data/*'],
'skbio.stats.tests': ['data/*'],
'skbio.stats.distance.tests': ['data/*'],
'skbio.stats.ordination.tests': ['data/*']
}
)
| bsd-3-clause |
selective-inference/selective-inference | doc/learning_examples/HIV/HIV_scale_CV.py | 3 | 4262 | import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
# load in the X matrix
from selection.tests.instance import HIV_NRTI
X_full = HIV_NRTI(datafile="NRTI_DATA.txt", standardize=False)[0]
from selection.learning.utils import full_model_inference, liu_inference, pivot_plot
from selection.learning.core import split_sampler, keras_fit
from selection.learning.Rutils import lasso_glmnet, cv_glmnet_lam
from selection.learning.learners import mixture_learner
mixture_learner.scales = [1]*10 + [1.5,2,3,4,5,10]
boot_design = False
def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=15000, seed=0):
# description of statistical problem
n, p = X_full.shape
if boot_design:
idx = np.random.choice(np.arange(n), n, replace=True)
X = X_full[idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout
X += 0.1 * np.std(X) * np.random.standard_normal(X.shape) # to make non-degenerate
else:
X = X_full.copy()
X = X - np.mean(X, 0)[None, :]
X = X / np.std(X, 0)[None, :]
n, p = X.shape
truth = np.zeros(p)
truth[:s] = np.linspace(signal[0], signal[1], s)
np.random.shuffle(truth)
truth /= np.sqrt(n)
truth *= sigma
y = X.dot(truth) + sigma * np.random.standard_normal(n)
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
print(dispersion, sigma**2)
splitting_sampler = split_sampler(X * y[:, None], covS)
def meta_algorithm(X, XTXi, resid, sampler):
S = sampler(scale=0.) # deterministic with scale=0
ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X
G = lasso_glmnet(X, ynew, *[None]*4)
select = G.select(seed=seed)
return set(list(select[0]))
selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid)
# run selection algorithm
df = full_model_inference(X,
y,
truth,
selection_algorithm,
splitting_sampler,
success_params=(1, 1),
B=B,
fit_probability=keras_fit,
fit_args={'epochs':10,
'sizes':[100]*5,
'dropout':0.,
'activation':'relu'},
learner_klass=mixture_learner)
if df is not None:
lam_min, lam_1se = cv_glmnet_lam(X.copy(), y.copy(), seed=seed)
lam_min, lam_1se = n * lam_min, n * lam_1se
liu_df = liu_inference(X,
y,
1.00001 * lam_min,
dispersion,
truth,
alpha=alpha)
return pd.merge(df, liu_df, on='variable')
else:
return df
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
U = np.linspace(0, 1, 101)
plt.clf()
init_seed = np.fabs(np.random.standard_normal() * 500)
for i in range(500):
df = simulate(seed=init_seed+i)
csvfile = 'HIV_CV_scale.csv'
outbase = csvfile[:-4]
if df is not None or i > 0:
try:
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
if df is not None:
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, lengths_ax = pivot_plot(df, outbase)
liu_pivot = df['liu_pivot']
liu_pivot = liu_pivot[~np.isnan(liu_pivot)]
pivot_ax.plot(U, sm.distributions.ECDF(liu_pivot)(U), 'gray', label='Liu CV',
linewidth=3)
pivot_ax.legend()
fig = pivot_ax.figure
fig.savefig(csvfile[:-4] + '.pdf')
| bsd-3-clause |
jmschrei/scikit-learn | sklearn/neighbors/base.py | 30 | 30586 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
louispotok/pandas | pandas/tests/sparse/frame/test_apply.py | 3 | 2616 | import pytest
import numpy as np
from pandas import SparseDataFrame, DataFrame, Series, bdate_range
from pandas.core import nanops
from pandas.util import testing as tm
@pytest.fixture
def dates():
return bdate_range('1/1/2011', periods=10)
@pytest.fixture
def empty():
return SparseDataFrame()
@pytest.fixture
def frame(dates):
data = {'A': [np.nan, np.nan, np.nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, np.nan, np.nan, np.nan, 3, 4, 5, 6],
'C': np.arange(10, dtype=np.float64),
'D': [0, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan]}
return SparseDataFrame(data, index=dates)
@pytest.fixture
def fill_frame(frame):
values = frame.values.copy()
values[np.isnan(values)] = 2
return SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=2,
index=frame.index)
def test_apply(frame):
applied = frame.apply(np.sqrt)
assert isinstance(applied, SparseDataFrame)
tm.assert_almost_equal(applied.values, np.sqrt(frame.values))
# agg / broadcast
with tm.assert_produces_warning(FutureWarning):
broadcasted = frame.apply(np.sum, broadcast=True)
assert isinstance(broadcasted, SparseDataFrame)
with tm.assert_produces_warning(FutureWarning):
exp = frame.to_dense().apply(np.sum, broadcast=True)
tm.assert_frame_equal(broadcasted.to_dense(), exp)
applied = frame.apply(np.sum)
tm.assert_series_equal(applied,
frame.to_dense().apply(nanops.nansum))
def test_apply_fill(fill_frame):
applied = fill_frame.apply(np.sqrt)
assert applied['A'].fill_value == np.sqrt(2)
def test_apply_empty(empty):
assert empty.apply(np.sqrt) is empty
def test_apply_nonuq():
orig = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=['a', 'a', 'c'])
sparse = orig.to_sparse()
res = sparse.apply(lambda s: s[0], axis=1)
exp = orig.apply(lambda s: s[0], axis=1)
# dtype must be kept
assert res.dtype == np.int64
# ToDo: apply must return subclassed dtype
assert isinstance(res, Series)
tm.assert_series_equal(res.to_dense(), exp)
# df.T breaks
sparse = orig.T.to_sparse()
res = sparse.apply(lambda s: s[0], axis=0) # noqa
exp = orig.T.apply(lambda s: s[0], axis=0)
# TODO: no non-unique columns supported in sparse yet
# tm.assert_series_equal(res.to_dense(), exp)
def test_applymap(frame):
# just test that it works
result = frame.applymap(lambda x: x * 2)
assert isinstance(result, SparseDataFrame)
| bsd-3-clause |
stephenliu1989/HK_DataMiner | hkdataminer/scripts/plotPopulation.py | 1 | 1488 | __author__ = 'stephen'
import os,sys
import numpy as np
import scipy.io
#import matplotlib.pyplot as plt
from collections import Counter
HK_DataMiner_Path = os.path.relpath(os.pardir)
#HK_DataMiner_Path = os.path.abspath("/home/stephen/Dropbox/projects/work-2015.5/HK_DataMiner/")
sys.path.append(HK_DataMiner_Path)
import argparse
from utils import plot_cluster_size_distribution
cli = argparse.ArgumentParser()
cli.add_argument('-c', '--assignments_dir', type=str)
args = cli.parse_args()
assignments_dir = args.assignments_dir
labels = np.loadtxt(assignments_dir, dtype=np.int32)
#counts = Counter(labels)
counts = list(Counter(labels).values())
total_states = np.max(labels) + 1
states_magnitude = int(np.ceil(np.log10(total_states)))
total_frames = len(labels)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print "states", total_states, "frames", total_frames
populations = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations[magnitude] += 1
#print magnitude populations
print "Populations Probability:"
#bins = [0]
for i in xrange(len(populations)):
populations[i] = populations[i] / total_states
print "10 ^", i, "to", "10 ^", i+1,":", populations[i]*100, "%"
#bins.append(10**(i+1))
name = assignments_dir[:-4] + '_Populations'
print "name:", name
plot_cluster_size_distribution(populations=populations, name=name)
#print bins
#plt.hist(labels)
#plt.show()
| apache-2.0 |
Neuroglycerin/neukrill-net-work | generate_hlf_testcache_3aug.py | 1 | 2238 | #!/usr/bin/env python
"""
Generate a cache of all the ComputerVision highlevelfeatures to send to Pylearn2
"""
from __future__ import division
import neukrill_net.augment
import neukrill_net.highlevelfeatures
import neukrill_net.utils
import copy
import numpy as np
from sklearn.externals import joblib
# Define output path
pkl_path1 = '/disk/data1/s1145806/cached_hlf_test3_data_raw.pkl'
pkl_path2 = '/disk/data1/s1145806/cached_hlf_test3_raw.pkl'
pkl_path3 = '/disk/data1/s1145806/cached_hlf_test3_data_ranged.pkl'
pkl_path4 = '/disk/data1/s1145806/cached_hlf_test3_ranged.pkl'
pkl_path5 = '/disk/data1/s1145806/cached_hlf_test3_data_posranged.pkl'
pkl_path6 = '/disk/data1/s1145806/cached_hlf_test3_posranged.pkl'
# Define which basic attributes to use
attrlst = ['height','width','numpixels','sideratio','mean','std','stderr',
'propwhite','propnonwhite','propbool']
# Parse the data
settings = neukrill_net.utils.Settings('settings.json')
X,y = neukrill_net.utils.load_rawdata(settings.image_fnames)
# Combine all the features we want to use
hlf_list = []
hlf_list.append( neukrill_net.highlevelfeatures.BasicAttributes(attrlst) )
hlf_list.append( neukrill_net.highlevelfeatures.ContourMoments() )
hlf_list.append( neukrill_net.highlevelfeatures.ContourHistogram() )
hlf_list.append( neukrill_net.highlevelfeatures.ThresholdAdjacency() )
hlf_list.append( neukrill_net.highlevelfeatures.ZernikeMoments() )
hlf_list.append( neukrill_net.highlevelfeatures.Haralick() )
# hlf_list.append( neukrill_net.highlevelfeatures.CoocurProps() )
augs = {'units': 'uint8',
'rotate': 3,
'rotate_is_resizable': 1}
aug_fun = neukrill_net.augment.augmentation_wrapper(**augs)
hlf = neukrill_net.highlevelfeatures.MultiHighLevelFeature(hlf_list, augment_func=aug_fun)
# Save the raw values of every feature
X_raw = hlf.generate_cache(X)
# Save the feature matrix to disk
joblib.dump(X_raw, pkl_path1)
# the [-1,1] squashed values
X_range = X / np.amax(np.absolute(X),0)
# Save the feature matrix
joblib.dump(X_range, pkl_path3)
# the [0,1] squashed values
X_posrange = (X-X.min(0))/(X.max(0)-X.min(0))
# Save the feature matrix
joblib.dump(X_posrange, pkl_path5)
| mit |
YihaoLu/statsmodels | statsmodels/tools/testing.py | 23 | 1443 | """assert functions from numpy and pandas testing
"""
import re
from distutils.version import StrictVersion
import numpy as np
import numpy.testing as npt
import pandas
import pandas.util.testing as pdt
# for pandas version check
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def is_pandas_min_version(min_version):
'''check whether pandas is at least min_version
'''
from pandas.version import short_version as pversion
return StrictVersion(strip_rc(pversion)) >= min_version
# local copies, all unchanged
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_equal, assert_array_less,
assert_array_max_ulp, assert_raises, assert_string_equal, assert_warns)
# adjusted functions
def assert_equal(actual, desired, err_msg='', verbose=True, **kwds):
if not is_pandas_min_version('0.14.1'):
npt.assert_equal(actual, desired, err_msg='', verbose=True)
else:
if isinstance(desired, pandas.Index):
pdt.assert_index_equal(actual, desired)
elif isinstance(desired, pandas.Series):
pdt.assert_series_equal(actual, desired, **kwds)
elif isinstance(desired, pandas.DataFrame):
pdt.assert_frame_equal(actual, desired, **kwds)
else:
npt.assert_equal(actual, desired, err_msg='', verbose=True)
| bsd-3-clause |
mattgiguere/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
hlin117/statsmodels | statsmodels/tsa/filters/filtertools.py | 25 | 12438 | # -*- coding: utf-8 -*-
"""Linear Filters for time series analysis and testing
TODO:
* check common sequence in signature of filter functions (ar,ma,x) or (x,ar,ma)
Created on Sat Oct 23 17:18:03 2010
Author: Josef-pktd
"""
#not original copied from various experimental scripts
#version control history is there
from statsmodels.compat.python import range
import numpy as np
import scipy.fftpack as fft
from scipy import signal
from scipy.signal.signaltools import _centered as trim_centered
from ._utils import _maybe_get_pandas_wrapper
def _pad_nans(x, head=None, tail=None):
if np.ndim(x) == 1:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[np.nan] * head, x, [np.nan] * tail]
elif tail is None:
return np.r_[[np.nan] * head, x]
elif head is None:
return np.r_[x, [np.nan] * tail]
elif np.ndim(x) == 2:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[[np.nan] * x.shape[1]] * head, x,
[[np.nan] * x.shape[1]] * tail]
elif tail is None:
return np.r_[[[np.nan] * x.shape[1]] * head, x]
elif head is None:
return np.r_[x, [[np.nan] * x.shape[1]] * tail]
else:
raise ValueError("Nan-padding for ndim > 2 not implemented")
#original changes and examples in sandbox.tsa.try_var_convolve
# don't do these imports, here just for copied fftconvolve
#get rid of these imports
#from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
# ifftn, fftfreq
#from numpy import product,array
def fftconvolveinv(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
copied from scipy.signal.signaltools, but here used to try out inverse filter
doesn't work or I can't get it to work
2010-10-23:
looks ok to me for 1d,
from results below with padded data array (fftp)
but it doesn't work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
IN1 = fft.fftn(in1,fsize)
#IN1 *= fftn(in2,fsize) #JP: this looks like the only change I made
IN1 /= fft.fftn(in2,fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work for VARMA
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#code duplication with fftconvolveinv
def fftconvolve3(in1, in2=None, in3=None, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
for use with arma (old version: in1=num in2=den in3=data
* better for consistency with other functions in1=data in2=num in3=den
* note in2 and in3 need to have consistent dimension/shape
since I'm using max of in2, in3 shapes and not the sum
copied from scipy.signal.signaltools, but here used to try out inverse
filter doesn't work or I can't get it to work
2010-10-23
looks ok to me for 1d,
from results below with padded data array (fftp)
but it doesn't work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
if (in2 is None) and (in3 is None):
raise ValueError('at least one of in2 and in3 needs to be given')
s1 = np.array(in1.shape)
if not in2 is None:
s2 = np.array(in2.shape)
else:
s2 = 0
if not in3 is None:
s3 = np.array(in3.shape)
s2 = max(s2, s3) # try this looks reasonable for ARMA
#s2 = s3
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
#convolve shorter ones first, not sure if it matters
if not in2 is None:
IN1 = fft.fftn(in2, fsize)
if not in3 is None:
IN1 /= fft.fftn(in3, fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work for VARMA
IN1 *= fft.fftn(in1, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#original changes and examples in sandbox.tsa.try_var_convolve
#examples and tests are there
def recursive_filter(x, ar_coeff, init=None):
'''
Autoregressive, or recursive, filtering.
Parameters
----------
x : array-like
Time-series data. Should be 1d or n x 1.
ar_coeff : array-like
AR coefficients in reverse time order. See Notes
init : array-like
Initial values of the time-series prior to the first value of y.
The default is zero.
Returns
-------
y : array
Filtered array, number of columns determined by x and ar_coeff. If a
pandas object is given, a pandas object is returned.
Notes
-----
Computes the recursive filter ::
y[n] = ar_coeff[0] * y[n-1] + ...
+ ar_coeff[n_coeff - 1] * y[n - n_coeff] + x[n]
where n_coeff = len(n_coeff).
'''
_pandas_wrapper = _maybe_get_pandas_wrapper(x)
x = np.asarray(x).squeeze()
ar_coeff = np.asarray(ar_coeff).squeeze()
if x.ndim > 1 or ar_coeff.ndim > 1:
raise ValueError('x and ar_coeff have to be 1d')
if init is not None: # integer init are treated differently in lfiltic
if len(init) != len(ar_coeff):
raise ValueError("ar_coeff must be the same length as init")
init = np.asarray(init, dtype=float)
if init is not None:
zi = signal.lfiltic([1], np.r_[1, -ar_coeff], init, x)
else:
zi = None
y = signal.lfilter([1.], np.r_[1, -ar_coeff], x, zi=zi)
if init is not None:
result = y[0]
else:
result = y
if _pandas_wrapper:
return _pandas_wrapper(result)
return result
def convolution_filter(x, filt, nsides=2):
'''
Linear filtering via convolution. Centered and backward displaced moving
weighted average.
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
filt : array_like
Linear filter coefficients in reverse time-order. Should have the
same number of dimensions as x though if 1d and ``x`` is 2d will be
coerced to 2d.
nsides : int, optional
If 2, a centered moving average is computed using the filter
coefficients. If 1, the filter coefficients are for past values only.
Both methods use scipy.signal.convolve.
Returns
-------
y : ndarray, 2d
Filtered array, number of columns determined by x and filt. If a
pandas object is given, a pandas object is returned. The index of
the return is the exact same as the time period in ``x``
Notes
-----
In nsides == 1, x is filtered ::
y[n] = filt[0]*x[n-1] + ... + filt[n_filt-1]*x[n-n_filt]
where n_filt is len(filt).
If nsides == 2, x is filtered around lag 0 ::
y[n] = filt[0]*x[n - n_filt/2] + ... + filt[n_filt / 2] * x[n]
+ ... + x[n + n_filt/2]
where n_filt is len(filt). If n_filt is even, then more of the filter
is forward in time than backward.
If filt is 1d or (nlags,1) one lag polynomial is applied to all
variables (columns of x). If filt is 2d, (nlags, nvars) each series is
independently filtered with its own lag polynomial, uses loop over nvar.
This is different than the usual 2d vs 2d convolution.
Filtering is done with scipy.signal.convolve, so it will be reasonably
fast for medium sized data. For large data fft convolution would be
faster.
'''
# for nsides shift the index instead of using 0 for 0 lag this
# allows correct handling of NaNs
if nsides == 1:
trim_head = len(filt) - 1
trim_tail = None
elif nsides == 2:
trim_head = int(np.ceil(len(filt)/2.) - 1) or None
trim_tail = int(np.ceil(len(filt)/2.) - len(filt) % 2) or None
else: # pragma : no cover
raise ValueError("nsides must be 1 or 2")
_pandas_wrapper = _maybe_get_pandas_wrapper(x)
x = np.asarray(x)
filt = np.asarray(filt)
if x.ndim > 1 and filt.ndim == 1:
filt = filt[:, None]
if x.ndim > 2:
raise ValueError('x array has to be 1d or 2d')
if filt.ndim == 1 or min(filt.shape) == 1:
result = signal.convolve(x, filt, mode='valid')
elif filt.ndim == 2:
nlags = filt.shape[0]
nvar = x.shape[1]
result = np.zeros((x.shape[0] - nlags + 1, nvar))
if nsides == 2:
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:, i] = signal.convolve(x[:, i], filt[:, i],
mode='valid')
elif nsides == 1:
for i in range(nvar):
result[:, i] = signal.convolve(x[:, i], np.r_[0, filt[:, i]],
mode='valid')
result = _pad_nans(result, trim_head, trim_tail)
if _pandas_wrapper:
return _pandas_wrapper(result)
return result
#copied from sandbox.tsa.garch
def miso_lfilter(ar, ma, x, useic=False): #[0.1,0.1]):
'''
use nd convolution to merge inputs,
then use lfilter to produce output
arguments for column variables
return currently 1d
Parameters
----------
ar : array_like, 1d, float
autoregressive lag polynomial including lag zero, ar(L)y_t
ma : array_like, same ndim as x, currently 2d
moving average lag polynomial ma(L)x_t
x : array_like, 2d
input data series, time in rows, variables in columns
Returns
-------
y : array, 1d
filtered output series
inp : array, 1d
combined input series
Notes
-----
currently for 2d inputs only, no choice of axis
Use of signal.lfilter requires that ar lag polynomial contains
floating point numbers
does not cut off invalid starting and final values
miso_lfilter find array y such that::
ar(L)y_t = ma(L)x_t
with shapes y (nobs,), x (nobs,nvars), ar (narlags,), ma (narlags,nvars)
'''
ma = np.asarray(ma)
ar = np.asarray(ar)
#inp = signal.convolve(x, ma, mode='valid')
#inp = signal.convolve(x, ma)[:, (x.shape[1]+1)//2]
#Note: convolve mixes up the variable left-right flip
#I only want the flip in time direction
#this might also be a mistake or problem in other code where I
#switched from correlate to convolve
# correct convolve version, for use with fftconvolve in other cases
#inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
inp = signal.correlate(x, ma[::-1,:])[:, (x.shape[1]+1)//2]
#for testing 2d equivalence between convolve and correlate
#np.testing.assert_almost_equal(inp2, inp)
nobs = x.shape[0]
# cut of extra values at end
#todo initialize also x for correlate
if useic:
return signal.lfilter([1], ar, inp,
#zi=signal.lfilter_ic(np.array([1.,0.]),ar, ic))[0][:nobs], inp[:nobs]
zi=signal.lfiltic(np.array([1.,0.]),ar, useic))[0][:nobs], inp[:nobs]
else:
return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
#return signal.lfilter([1], ar, inp), inp
| bsd-3-clause |
wolfiex/DSMACC-testing | dsmacc/observations/constrain/fitobspete_archive.py | 1 | 7827 | ''' observation data array '''
''' read species 15a , matrix of values in init.
intial conditions flag add,
To compile f90 file:
export CFLAGS=''
export CXXFLAGS=''
export F77=gfortran
export FC=gfortran
export F90=gfortran
export FFLAGS=''
export CPP='gcc'
f2py --f90flags='-ffree-form -fPIC' -c splinecf.f90 -m splinef --f90exec=/usr/bin/gfortran
--verbose
'''
import numpy as np
import splinef
import os
global names
names = []
copyto = '../../../'
matched= 'constraints_040617/*.csv'
def get_spline(match='*.csv',plot=True):
#data is saved in 1-24
import glob
save=[]
for filename in glob.glob(match):
print filename
myname = filename.split('/')[-1].split('_')[0]
names.append(myname)
scale = 1
if '_pptv' in filename: scale = 1e-12
if '_ppbv' in filename: scale = 1e-9
data = tuple(open(filename))
data = np.array([i.replace(' ','').split(',') for i in data])
data = data.astype(np.float)
#ensure x is forwards going - required by spline
newdata = []
old = 0
for i in data:
if i[0]>old: newdata.append(i)
old = i[0]
data = np.array(newdata)
x=data[:,0]
y=data[:,1]
'''
# update first and last values to ensure continuity
x= np.insert(x,0,0)
x =np.insert(x,len(x),24)
mp = (y[-1]+y[0])/2
y= np.insert(y,0,mp)
y =np.insert(y,len(y),mp)
'''
x = [i/24. for i in x]
y = [np.log10(i*scale) for i in y]
coeff = splinef.spline(x,y)
newx = np.array([i/24. for i in xrange(25)])
spyint = [splinef.seval(i,x,y,*coeff) for i in newx]
newx = np.insert(newx,0,-.1)
newx = np.insert(newx,len(newx),1.1)
mp = (spyint[-2]+y[1])/2
spyint[0] = mp
spyint = np.insert(spyint,0,mp)
spyint[-1] = mp
spyint = np.insert(spyint,len(spyint),mp)
coeff = splinef.spline(newx,spyint)
modelfit = np.linspace(0,1,num=100,endpoint=True)
modelresult = [splinef.seval(i,newx,spyint,*coeff) for i in modelfit]
if plot:
import matplotlib.pyplot as plt
plt.clf()
plt.plot(x, y, 'o', newx, spyint, '+', modelfit, modelresult, '--')
plt.legend(['data', 'hourly', 'cubic'], loc='best')
plt.title(filename)
#plt.show()
plt.savefig(filename.replace('.csv','.png'))
save.append(spyint)
for j in coeff:
save.append(j)
#string = r"""C(ind_%s)= CFACTOR*10**seval(%s,DFRACT,(/%s/), (/%s/), (/%s/), (/%s/), (/%s/) )"""%(len(modelfit),myname,modelfit,modelresult,coeff[0],coeff[1],coeff[])
save.append(newx)
return save
cfarray = np.asfortranarray(np.array(get_spline(match=matched)))
splinef.writeobs(cfarray)
ratestring = "!obs:%d\n"%len(cfarray)
ratestring+='''
!! Constrain from observations
!USE model_Global, ONLY: CONSTRAIN,CFACTOR,spcf,obs
!DFRACT = mod(((DAYCOUNTER*dt)/86400.) + mod(JDAY,1.),1.)
'''
#generator to increase nubmers
def count(n):
num = 1
while num < n:
yield num
num += 1
c = count(len(cfarray))
n=next
BVOC = ['C5H8','APINENE','BPINENE','LIMONENE']
ANTH = ['C2H6','C3H8','IC4H10','NC4H10','CHEX','IC5H12','M2PE','NC5H12','NC6H14','NC7H16','NC8H18','NC9H20','NC10H22','NC11H24','NC12H26','BENZENE','TOLUENE','EBENZ','OXYL','MXYL','IPBENZ','PBENZ','METHTOL','PETHTOL','TM135B','OETHTOL','TM124B','TM123B','DIME35EB','C2H4','C3H6','C2H2','TBUT2ENE','BUT1ENE','MEPROPENE','CBUT2ENE','C4H6','TPENT2ENE','CPENT2ENE']
# ['C2H6','C3H8','IC4H10','NC4H10','CHEX','IC5H12','M2PE','NC5H12','NC6H14','NC7H16','NC8H18','NC9H20','NC10H22','NC11H24','NC12H26']
NOX = ['NOEMISS','NO2EMIS']
CO = ['CO']
LAROM = []
#['IPBENZ','PBENZ','METHTOL','PETHTOL','TM135B','OETHTOL','TM124B','TM123B','DIME35EB']
SAROM = []
#['BENZENE','TOLUENE','EBENZ','OXYL','MXYL']
ALKE = []
# ['C2H4','C3H6','C2H2','TBUT2ENE','BUT1ENE','MEPROPENE','CBUT2ENE','C4H6','TPENT2ENE','CPENT2ENE']
METHANE = ['CH4']
for i in names:
if i in ['NOX','nox','NOx']:
ratestring += "\n TNOX_OLD = CFACTOR*10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(n(c),n(c),n(c),n(c))
elif i in ['DEPOS','depos']:
ratestring += "\n DEPOS = 10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(n(c),n(c),n(c),n(c))
elif i in ['DILUTE','dilute']:
ratestring += "\n DILUTE = 10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(n(c),n(c),n(c),n(c))
elif i in ['BLH','blh']:
ratestring += "\n BLH = 10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(n(c),n(c),n(c),n(c))
elif i in ['SAREA','sarea']:
ratestring += "\n SAREA = SAFAC*10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(n(c),n(c),n(c),n(c))
elif i in ['TEMP','temp']:
ratestring += "\n TEMP = 10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(n(c),n(c),n(c),n(c))
elif i in ['H2O','h2o']:
ratestring += "\n H2O = CFACTOR*10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(n(c),n(c),n(c),n(c))
elif i in BVOC:
ratestring += "\n C(ind_%s) = BVOCFAC*CFACTOR*10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(i,n(c),n(c),n(c),n(c))
ratestring += '\n CONSTRAIN(ind_%s) = C(ind_%s)\n'%(i,i)
elif i in ANTH:
ratestring += "\n C(ind_%s) = ANTHFAC*CFACTOR*10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(i,n(c),n(c),n(c),n(c))
ratestring += '\n CONSTRAIN(ind_%s) = C(ind_%s)\n'%(i,i)
elif i in NOX:
ratestring += "\n C(ind_%s) = NOXFAC*CFACTOR*10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(i,n(c),n(c),n(c),n(c))
ratestring += '\n CONSTRAIN(ind_%s) = C(ind_%s)\n'%(i,i)
elif i in CO:
ratestring += "\n C(ind_%s) = (COFAC*CFACTOR*10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:)))+(1.3E-7*CFACTOR)"%(i,n(c),n(c),n(c),n(c))
ratestring += '\n CONSTRAIN(ind_%s) = C(ind_%s)\n'%(i,i)
elif i in LAROM:
ratestring += "\n C(ind_%s) = LAROMF*CFACTOR*10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(i,n(c),n(c),n(c),n(c))
ratestring += '\n CONSTRAIN(ind_%s) = C(ind_%s)\n'%(i,i)
elif i in SAROM:
ratestring += "\n C(ind_%s) = SAROMF*CFACTOR*10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(i,n(c),n(c),n(c),n(c))
ratestring += '\n CONSTRAIN(ind_%s) = C(ind_%s)\n'%(i,i)
elif i in ALKE:
ratestring += "\n C(ind_%s) = ALKEFAC*CFACTOR*10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(i,n(c),n(c),n(c),n(c))
ratestring += '\n CONSTRAIN(ind_%s) = C(ind_%s)\n'%(i,i)
elif i in METHANE:
ratestring += "\n C(ind_%s) = (CH4FAC*CFACTOR*10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:)))+(1.8E-6*CFACTOR)"%(i,n(c),n(c),n(c),n(c))
ratestring += '\n CONSTRAIN(ind_%s) = C(ind_%s)\n'%(i,i)
else:
ratestring += "\n C(ind_%s)= CFACTOR*10**seval(27,dfract,spcf(obs,:),spcf(%d,:),spcf(%d,:),spcf(%d,:),spcf(%d,:))"%(i,n(c),n(c),n(c),n(c))
ratestring += '\n CONSTRAIN(ind_%s) = C(ind_%s)\n'%(i,i)
print ratestring
with open(copyto+'include.obs','w') as f:
f.write(ratestring)
os.system('mv spline.obs ../../../')
print ''
print ' '.join(names)
'''
Add to incude file with conditional
print*, dfract,seval(27,0.5_dp,spcf(obs,:),spcf(1,:),&
spcf(2,:),spcf(3,:),spcf(4,:)), c(ind_O3),SHAPE(spcf)
'''
| gpl-3.0 |
pap/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt4.py | 69 | 20664 | from __future__ import division
import math
import os
import sys
import matplotlib
from matplotlib import verbose
from matplotlib.cbook import is_string_like, onetrue
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, IdleEvent, cursors
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.widgets import SubplotTool
try:
from PyQt4 import QtCore, QtGui, Qt
except ImportError:
raise ImportError("Qt4 backend requires that PyQt4 is installed.")
backend_version = "0.9.1"
def fn_name(): return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE : QtCore.Qt.SizeAllCursor,
cursors.HAND : QtCore.Qt.PointingHandCursor,
cursors.POINTER : QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION : QtCore.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
if QtGui.QApplication.startingUp():
if DEBUG: print "Starting up QApplication"
global qApp
qApp = QtGui.QApplication( [" "] )
QtCore.QObject.connect( qApp, QtCore.SIGNAL( "lastWindowClosed()" ),
qApp, QtCore.SLOT( "quit()" ) )
#remember that matplotlib created the qApp - will be used by show()
_create_qApp.qAppCreatedHere = True
_create_qApp.qAppCreatedHere = False
def show():
"""
Show all the figures and enter the qt main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if DEBUG: print 'Inside show'
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
if _create_qApp.qAppCreatedHere:
QtGui.qApp.exec_()
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
thisFig = Figure( *args, **kwargs )
canvas = FigureCanvasQT( thisFig )
manager = FigureManagerQT( canvas, num )
return manager
class FigureCanvasQT( QtGui.QWidget, FigureCanvasBase ):
keyvald = { QtCore.Qt.Key_Control : 'control',
QtCore.Qt.Key_Shift : 'shift',
QtCore.Qt.Key_Alt : 'alt',
}
# left 1, middle 2, right 3
buttond = {1:1, 2:3, 4:2}
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQt: ', figure
_create_qApp()
QtGui.QWidget.__init__( self )
FigureCanvasBase.__init__( self, figure )
self.figure = figure
self.setMouseTracking( True )
# hide until we can test and fix
#self.startTimer(backend_IdleEvent.milliseconds)
w,h = self.get_width_height()
self.resize( w, h )
def __timerEvent(self, event):
# hide until we can test and fix
self.mpl_idle_event(event)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent( self, event ):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond[event.button()]
FigureCanvasBase.button_press_event( self, x, y, button )
if DEBUG: print 'button pressed:', event.button()
def mouseMoveEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event( self, x, y )
#if DEBUG: print 'mouse move'
def mouseReleaseEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond[event.button()]
FigureCanvasBase.button_release_event( self, x, y, button )
if DEBUG: print 'button released'
def keyPressEvent( self, event ):
key = self._get_key( event )
FigureCanvasBase.key_press_event( self, key )
if DEBUG: print 'key press', key
def keyReleaseEvent( self, event ):
key = self._get_key(event)
FigureCanvasBase.key_release_event( self, key )
if DEBUG: print 'key release', key
def resizeEvent( self, event ):
if DEBUG: print 'resize (%d x %d)' % (event.size().width(), event.size().height())
QtGui.QWidget.resizeEvent( self, event )
w = event.size().width()
h = event.size().height()
if DEBUG: print "FigureCanvasQtAgg.resizeEvent(", w, ",", h, ")"
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches( winch, hinch )
self.draw()
def resize( self, w, h ):
# Pass through to Qt to resize the widget.
QtGui.QWidget.resize( self, w, h )
# Resize the figure by converting pixels to inches.
pixelPerInch = self.figure.dpi
wInch = w / pixelPerInch
hInch = h / pixelPerInch
self.figure.set_size_inches( wInch, hInch )
# Redraw everything.
self.draw()
def sizeHint( self ):
w, h = self.get_width_height()
return QtCore.QSize( w, h )
def minumumSizeHint( self ):
return QtCore.QSize( 10, 10 )
def _get_key( self, event ):
if event.key() < 256:
key = str(event.text())
elif event.key() in self.keyvald:
key = self.keyvald[ event.key() ]
else:
key = None
return key
def flush_events(self):
Qt.qApp.processEvents()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerQT( FigureManagerBase ):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__( self, canvas, num ):
if DEBUG: print 'FigureManagerQT.%s' % fn_name()
FigureManagerBase.__init__( self, canvas, num )
self.canvas = canvas
self.window = QtGui.QMainWindow()
self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' )
self.window.setWindowIcon(QtGui.QIcon( image ))
# Give the keyboard focus to the figure instead of the manager
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
QtCore.QObject.connect( self.window, QtCore.SIGNAL( 'destroyed()' ),
self._widgetclosed )
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, self.window)
self.window.addToolBar(self.toolbar)
QtCore.QObject.connect(self.toolbar, QtCore.SIGNAL("message"),
self.window.statusBar().showMessage)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
def notify_axes_change( fig ):
# This will be called whenever the current axes is changed
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver( notify_axes_change )
def _widgetclosed( self ):
if self.window._destroying: return
self.window._destroying = True
Gcf.destroy(self.num)
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height)
def destroy( self, *args ):
if self.window._destroying: return
self.window._destroying = True
QtCore.QObject.disconnect( self.window, QtCore.SIGNAL( 'destroyed()' ),
self._widgetclosed )
if self.toolbar: self.toolbar.destroy()
if DEBUG: print "destroy figure manager"
self.window.close()
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT( NavigationToolbar2, QtGui.QToolBar ):
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.coordinates = coordinates
QtGui.QToolBar.__init__( self, parent )
NavigationToolbar2.__init__( self, canvas )
def _icon(self, name):
return QtGui.QIcon(os.path.join(self.basedir, name))
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams[ 'datapath' ],'images')
a = self.addAction(self._icon('home.svg'), 'Home', self.home)
a.setToolTip('Reset original view')
a = self.addAction(self._icon('back.svg'), 'Back', self.back)
a.setToolTip('Back to previous view')
a = self.addAction(self._icon('forward.svg'), 'Forward', self.forward)
a.setToolTip('Forward to next view')
self.addSeparator()
a = self.addAction(self._icon('move.svg'), 'Pan', self.pan)
a.setToolTip('Pan axes with left mouse, zoom with right')
a = self.addAction(self._icon('zoom_to_rect.svg'), 'Zoom', self.zoom)
a.setToolTip('Zoom to rectangle')
self.addSeparator()
a = self.addAction(self._icon('subplots.png'), 'Subplots',
self.configure_subplots)
a.setToolTip('Configure subplots')
a = self.addAction(self._icon('filesave.svg'), 'Save',
self.save_figure)
a.setToolTip('Save the figure')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtGui.QLabel( "", self )
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop )
self.locLabel.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
def dynamic_update( self ):
self.canvas.draw()
def set_message( self, s ):
self.emit(QtCore.SIGNAL("message"), s)
if self.coordinates:
self.locLabel.setText(s.replace(', ', '\n'))
def set_cursor( self, cursor ):
if DEBUG: print 'Set cursor' , cursor
QtGui.QApplication.restoreOverrideCursor()
QtGui.QApplication.setOverrideCursor( QtGui.QCursor( cursord[cursor] ) )
def draw_rubberband( self, event, x0, y0, x1, y1 ):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [ int(val)for val in min(x0,x1), min(y0, y1), w, h ]
self.canvas.drawRectangle( rect )
def configure_subplots(self):
self.adj_window = QtGui.QMainWindow()
win = self.adj_window
win.setAttribute(QtCore.Qt.WA_DeleteOnClose)
win.setWindowTitle("Subplot Configuration Tool")
image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' )
win.setWindowIcon(QtGui.QIcon( image ))
tool = SubplotToolQt(self.canvas.figure, win)
win.setCentralWidget(tool)
win.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
win.show()
def _get_canvas(self, fig):
return FigureCanvasQT(fig)
def save_figure( self ):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
start = "image." + default_filetype
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname = QtGui.QFileDialog.getSaveFileName(
self, "Choose a filename to save to", start, filters, selectedFilter)
if fname:
try:
self.canvas.print_figure( unicode(fname) )
except Exception, e:
QtGui.QMessageBox.critical(
self, "Error saving file", str(e),
QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton)
class SubplotToolQt( SubplotTool, QtGui.QWidget ):
def __init__(self, targetfig, parent):
QtGui.QWidget.__init__(self, None)
self.targetfig = targetfig
self.parent = parent
self.sliderleft = QtGui.QSlider(QtCore.Qt.Horizontal)
self.sliderbottom = QtGui.QSlider(QtCore.Qt.Vertical)
self.sliderright = QtGui.QSlider(QtCore.Qt.Horizontal)
self.slidertop = QtGui.QSlider(QtCore.Qt.Vertical)
self.sliderwspace = QtGui.QSlider(QtCore.Qt.Horizontal)
self.sliderhspace = QtGui.QSlider(QtCore.Qt.Vertical)
# constraints
QtCore.QObject.connect( self.sliderleft,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderright.setMinimum )
QtCore.QObject.connect( self.sliderright,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderleft.setMaximum )
QtCore.QObject.connect( self.sliderbottom,
QtCore.SIGNAL( "valueChanged(int)" ),
self.slidertop.setMinimum )
QtCore.QObject.connect( self.slidertop,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderbottom.setMaximum )
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
adjustments = ('left:', 'bottom:', 'right:', 'top:', 'wspace:', 'hspace:')
for slider, adjustment in zip(sliders, adjustments):
slider.setMinimum(0)
slider.setMaximum(1000)
slider.setSingleStep(5)
layout = QtGui.QGridLayout()
leftlabel = QtGui.QLabel('left')
layout.addWidget(leftlabel, 2, 0)
layout.addWidget(self.sliderleft, 2, 1)
toplabel = QtGui.QLabel('top')
layout.addWidget(toplabel, 0, 2)
layout.addWidget(self.slidertop, 1, 2)
layout.setAlignment(self.slidertop, QtCore.Qt.AlignHCenter)
bottomlabel = QtGui.QLabel('bottom')
layout.addWidget(QtGui.QLabel('bottom'), 4, 2)
layout.addWidget(self.sliderbottom, 3, 2)
layout.setAlignment(self.sliderbottom, QtCore.Qt.AlignHCenter)
rightlabel = QtGui.QLabel('right')
layout.addWidget(rightlabel, 2, 4)
layout.addWidget(self.sliderright, 2, 3)
hspacelabel = QtGui.QLabel('hspace')
layout.addWidget(hspacelabel, 0, 6)
layout.setAlignment(hspacelabel, QtCore.Qt.AlignHCenter)
layout.addWidget(self.sliderhspace, 1, 6)
layout.setAlignment(self.sliderhspace, QtCore.Qt.AlignHCenter)
wspacelabel = QtGui.QLabel('wspace')
layout.addWidget(wspacelabel, 4, 6)
layout.setAlignment(wspacelabel, QtCore.Qt.AlignHCenter)
layout.addWidget(self.sliderwspace, 3, 6)
layout.setAlignment(self.sliderwspace, QtCore.Qt.AlignBottom)
layout.setRowStretch(1,1)
layout.setRowStretch(3,1)
layout.setColumnStretch(1,1)
layout.setColumnStretch(3,1)
layout.setColumnStretch(6,1)
self.setLayout(layout)
self.sliderleft.setSliderPosition(int(targetfig.subplotpars.left*1000))
self.sliderbottom.setSliderPosition(\
int(targetfig.subplotpars.bottom*1000))
self.sliderright.setSliderPosition(\
int(targetfig.subplotpars.right*1000))
self.slidertop.setSliderPosition(int(targetfig.subplotpars.top*1000))
self.sliderwspace.setSliderPosition(\
int(targetfig.subplotpars.wspace*1000))
self.sliderhspace.setSliderPosition(\
int(targetfig.subplotpars.hspace*1000))
QtCore.QObject.connect( self.sliderleft,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcleft )
QtCore.QObject.connect( self.sliderbottom,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcbottom )
QtCore.QObject.connect( self.sliderright,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcright )
QtCore.QObject.connect( self.slidertop,
QtCore.SIGNAL( "valueChanged(int)" ),
self.functop )
QtCore.QObject.connect( self.sliderwspace,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcwspace )
QtCore.QObject.connect( self.sliderhspace,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funchspace )
def funcleft(self, val):
if val == self.sliderright.value():
val -= 1
self.targetfig.subplots_adjust(left=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
if val == self.sliderleft.value():
val += 1
self.targetfig.subplots_adjust(right=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
if val == self.slidertop.value():
val -= 1
self.targetfig.subplots_adjust(bottom=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
if val == self.sliderbottom.value():
val += 1
self.targetfig.subplots_adjust(top=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def error_msg_qt( msg, parent=None ):
if not is_string_like( msg ):
msg = ','.join( map( str,msg ) )
QtGui.QMessageBox.warning( None, "Matplotlib", msg, QtGui.QMessageBox.Ok )
def exception_handler( type, value, tb ):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename != None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror != None:
msg += value.strerror
else:
msg += str(value)
if len( msg ) : error_msg_qt( msg )
FigureManager = FigureManagerQT
| agpl-3.0 |
imaculate/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 34 | 9987 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
Fab7c4/paparazzi | sw/tools/tcp_aircraft_server/phoenix/__init__.py | 86 | 4470 | #Copyright 2014, Antoine Drouin
"""
Phoenix is a Python library for interacting with Paparazzi
"""
import math
"""
Unit convertions
"""
def rad_of_deg(d): return d/180.*math.pi
def deg_of_rad(r): return r*180./math.pi
def rps_of_rpm(r): return r*2.*math.pi/60.
def rpm_of_rps(r): return r/2./math.pi*60.
def m_of_inch(i): return i*0.0254
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color' : 'k', 'fontsize' : 20 }
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig == None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend <> None:
ax.legend(legend, loc='best')
if xlim <> None:
ax.set_xlim(xlim[0], xlim[1])
if ylim <> None:
ax.set_ylim(ylim[0], ylim[1])
"""
Messages
"""
#: dictionary mapping the C type to its length in bytes (e.g char -> 1)
TYPE_TO_LENGTH_MAP = {
"char" : 1,
"uint8" : 1,
"int8" : 1,
"uint16" : 2,
"int16" : 2,
"uint32" : 4,
"int32" : 4,
"float" : 4,
"double" : 8,
}
#: dictionary mapping the C type to correct format string
TYPE_TO_PRINT_MAP = {
float : "%f",
str : "%s",
chr : "%c",
int : "%d"
}
ACID_ALL = 0xFF
ACID_TEST = 0xFE
ACID_GROUNDSTATION = 0xFD
#: dictionary mapping debug types to format characters
DEBUG_MESSAGES = {
"DEBUG_UINT8" : "%d",
"DEBUG_INT32" : "%d",
"DEBUG_FLOAT" : "%#f"
}
"""
Binary logs
See format description in sw/airborne/subsystems/datalink/fms_link.c
"""
import struct
def hex_of_bin(b): return ' '.join( [ "%02X" % ord( x ) for x in b ] )
import pdb
def read_binary_log(filename, tick_freq = 2*512.):
f = open(filename, "rb")
d = f.read()
packet_header_len = 6
msg_header_len = 2
def read_packet(d, packet_start):
payload_start = packet_start+packet_header_len
timestamp, payload_len = struct.unpack("IH", d[packet_start:payload_start])
msgs = read_packet_payload(d, payload_start, payload_len)
next_packet = payload_start+payload_len+2
return timestamp, msgs, next_packet
def read_packet_payload(d, s, l):
msgs = []
packet_end = s+l; msg_start = s
while msg_start<packet_end:
payload_start = msg_start+msg_header_len
msg_len, msg_id = struct.unpack("BB", d[msg_start:payload_start])
payload_end = payload_start+msg_len
msg_payload = d[payload_start:payload_end]
msgs.append([msg_id, msg_payload])
#print msg_id, msg_len, hex_of_bin(msg_payload)
msg_start = payload_end
return msgs
packets = []
packet_start=0
while packet_start<len(d):
timestamp, msgs, next_packet = read_packet(d, packet_start)
packets.append([timestamp/tick_freq, msgs])
#print timestamp, msgs
packet_start = next_packet
f.close()
return packets
def extract_from_binary_log(protocol, packets, msg_names, t_min=None, t_max=None):
ret = [{'time':[], 'data':[]} for m in msg_names]
if t_min == None: t_min = packets[0][0]
if t_max == None: t_max = packets[-1][0]
for t, msgs in packets:
if t>= t_min and t<= t_max:
for id, payload in msgs:
m = protocol.get_message_by_id('telemetry', id)
try: i = msg_names.index(m.name)
except: pass
finally: ret[i]['time'].append(t); ret[i]['data'].append(m.unpack_scaled_values(payload))
return ret
| gpl-2.0 |
Myasuka/scikit-learn | sklearn/covariance/graph_lasso_.py | 127 | 25626 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| bsd-3-clause |
Sentient07/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
pprett/scikit-learn | benchmarks/bench_mnist.py | 45 | 6977 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LogisticRegression-SAG': LogisticRegression(solver='sag', tol=1e-1,
C=1e4),
'LogisticRegression-SAGA': LogisticRegression(solver='saga', tol=1e-1,
C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
solver='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
solver='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
gpetretto/pymatgen | pymatgen/io/abinit/works.py | 2 | 57650 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Works for Abinit
"""
from __future__ import unicode_literals, division, print_function
import os
import shutil
import time
import abc
import collections
import numpy as np
import six
import copy
from six.moves import filter
from monty.collections import AttrDict
from monty.itertools import chunks
from monty.functools import lazy_property
from monty.fnmatch import WildCard
#from monty.dev import deprecated
from pydispatch import dispatcher
from pymatgen.core.units import EnergyArray
from . import wrappers
from .nodes import Dependency, Node, NodeError, NodeResults, check_spectator
from .tasks import (Task, AbinitTask, ScfTask, NscfTask, DfptTask, PhononTask, DdkTask,
BseTask, RelaxTask, DdeTask, BecTask, ScrTask, SigmaTask,
DteTask, EphTask, CollinearThenNonCollinearScfTask)
from .utils import Directory
from .netcdf import ETSF_Reader, NetcdfReader
from .abitimer import AbinitTimerParser
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Work",
"BandStructureWork",
"RelaxWork",
"G0W0Work",
"QptdmWork",
"SigmaConvWork",
"BseMdfWork",
"PhononWork",
]
class WorkResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
@classmethod
def from_node(cls, work):
"""Initialize an instance from a :class:`Work` instance."""
new = super(WorkResults, cls).from_node(work)
# Will put all files found in outdir in GridFs
# Warning: assuming binary files.
d = {os.path.basename(f): f for f in work.outdir.list_filepaths()}
new.register_gridfs_files(**d)
return new
class WorkError(NodeError):
"""Base class for the exceptions raised by Work objects."""
class BaseWork(six.with_metaclass(abc.ABCMeta, Node)):
Error = WorkError
Results = WorkResults
# interface modeled after subprocess.Popen
@property
@abc.abstractmethod
def processes(self):
"""Return a list of objects that support the `subprocess.Popen` protocol."""
def poll(self):
"""
Check if all child processes have terminated. Set and return returncode attribute.
"""
return [task.poll() for task in self]
def wait(self):
"""
Wait for child processed to terminate. Set and return returncode attribute.
"""
return [task.wait() for task in self]
def communicate(self, input=None):
"""
Interact with processes: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a
string to be sent to the child processed, or None, if no data should be
sent to the children.
communicate() returns a list of tuples (stdoutdata, stderrdata).
"""
return [task.communicate(input) for task in self]
@property
def returncodes(self):
"""
The children return codes, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
return [task.returncode for task in self]
@property
def ncores_reserved(self):
"""
Returns the number of cores reserved in this moment.
A core is reserved if it's still not running but
we have submitted the task to the queue manager.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_SUB)
@property
def ncores_allocated(self):
"""
Returns the number of CPUs allocated in this moment.
A core is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(task.manager.num_cores for task in self if task.status in [task.S_SUB, task.S_RUN])
@property
def ncores_used(self):
"""
Returns the number of cores used in this moment.
A core is used if there's a job that is running on it.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_RUN)
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or
None if no task can be submitted at present"
Raises:
`StopIteration` if all tasks are done.
"""
# All the tasks are done so raise an exception
# that will be handled by the client code.
if all(task.is_completed for task in self):
raise StopIteration("All tasks completed.")
for task in self:
if task.can_run:
return task
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.warning("Possible deadlock in fetch_task_to_run!")
return None
def fetch_alltasks_to_run(self):
"""
Returns a list with all the tasks that can be submitted.
Empty list if not task has been found.
"""
return [task for task in self if task.can_run]
@abc.abstractmethod
def setup(self, *args, **kwargs):
"""Method called before submitting the calculations."""
def _setup(self, *args, **kwargs):
self.setup(*args, **kwargs)
def connect_signals(self):
"""
Connect the signals within the work.
The :class:`Work` is responsible for catching the important signals raised from
its task and raise new signals when some particular condition occurs.
"""
for task in self:
dispatcher.connect(self.on_ok, signal=task.S_OK, sender=task)
def disconnect_signals(self):
"""
Disable the signals within the work. This function reverses the process of `connect_signals`
"""
for task in self:
try:
dispatcher.disconnect(self.on_ok, signal=task.S_OK, sender=task)
except dispatcher.errors.DispatcherKeyError as exc:
logger.debug(str(exc))
@property
def all_ok(self):
return all(task.status == task.S_OK for task in self)
#@check_spectator
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It executes on_all_ok when all task in self have reached `S_OK`.
"""
logger.debug("in on_ok with sender %s" % sender)
if self.all_ok:
if self.finalized:
return AttrDict(returncode=0, message="Work has been already finalized")
else:
# Set finalized here, because on_all_ok might change it (e.g. Relax + EOS in a single work)
self.finalized = True
try:
results = AttrDict(**self.on_all_ok())
except Exception as exc:
self.history.critical("on_all_ok raises %s" % str(exc))
self.finalized = False
raise
# Signal to possible observers that the `Work` reached S_OK
self.history.info("Work %s is finalized and broadcasts signal S_OK" % str(self))
if self._finalized:
self.send_signal(self.S_OK)
return results
return AttrDict(returncode=1, message="Not all tasks are OK!")
#@check_spectator
def on_all_ok(self):
"""
This method is called once the `Work` is completed i.e. when all the tasks
have reached status S_OK. Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
def get_results(self, **kwargs):
"""
Method called once the calculations are completed.
The base version returns a dictionary task_name: TaskResults for each task in self.
"""
results = self.Results.from_node(self)
return results
def get_graphviz(self, engine="automatic", graph_attr=None, node_attr=None, edge_attr=None):
"""
Generate task graph in the DOT language (only parents and children of this work).
Args:
engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage']
graph_attr: Mapping of (attribute, value) pairs for the graph.
node_attr: Mapping of (attribute, value) pairs set for all nodes.
edge_attr: Mapping of (attribute, value) pairs set for all edges.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
"""
from graphviz import Digraph
fg = Digraph("work", #filename="work_%s.gv" % os.path.basename(self.workdir),
engine="fdp" if engine == "automatic" else engine)
# Set graph attributes.
# https://www.graphviz.org/doc/info/
#fg.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir))
fg.attr(label=repr(self))
#fg.attr(fontcolor="white", bgcolor='purple:pink')
fg.attr(rankdir="LR", pagedir="BL")
#fg.attr(constraint="false", pack="true", packMode="clust")
fg.node_attr.update(color='lightblue2', style='filled')
#fg.node_attr.update(ranksep='equally')
# Add input attributes.
if graph_attr is not None:
fg.graph_attr.update(**graph_attr)
if node_attr is not None:
fg.node_attr.update(**node_attr)
if edge_attr is not None:
fg.edge_attr.update(**edge_attr)
def node_kwargs(node):
return dict(
#shape="circle",
color=node.color_hex,
label=(str(node) if not hasattr(node, "pos_str") else
node.pos_str + "\n" + node.__class__.__name__),
)
edge_kwargs = dict(arrowType="vee", style="solid")
cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2")
# Build cluster with tasks in *this* work
cluster_name = "cluster%s" % self.name
with fg.subgraph(name=cluster_name) as wg:
wg.attr(**cluster_kwargs)
wg.attr(label="%s (%s)" % (self.__class__.__name__, self.name))
for task in self:
wg.node(task.name, **node_kwargs(task))
# Connect task to children
for child in task.get_children():
# Test if child is in this cluster (self).
myg = wg if child in self else fg
myg.node(child.name, **node_kwargs(child))
# Find file extensions required by this task
i = [dep.node for dep in child.deps].index(task)
edge_label = "+".join(child.deps[i].exts)
myg.edge(task.name, child.name, label=edge_label, color=task.color_hex,
**edge_kwargs)
# Connect task to parents
for parent in task.get_parents():
# Test if parent is in this cluster (self).
myg = wg if parent in self else fg
myg.node(parent.name, **node_kwargs(parent))
# Find file extensions required by this task
i = [dep.node for dep in task.deps].index(parent)
edge_label = "+".join(task.deps[i].exts)
myg.edge(parent.name, task.name, label=edge_label, color=parent.color_hex,
**edge_kwargs)
# Treat the case in which we have a work producing output for tasks in *this* work.
#for work in self.flow:
# children = work.get_children()
# if not children or all(child not in self for child in children):
# continue
# cluster_name = "cluster%s" % work.name
# seen = set()
# for child in children:
# if child not in self: continue
# # This is not needed, too much confusing
# #fg.edge(cluster_name, child.name, color=work.color_hex, **edge_kwargs)
# # Find file extensions required by work
# i = [dep.node for dep in child.deps].index(work)
# for ext in child.deps[i].exts:
# out = "%s (%s)" % (ext, work.name)
# fg.node(out)
# fg.edge(out, child.name, **edge_kwargs)
# key = (cluster_name, out)
# if key not in seen:
# fg.edge(cluster_name, out, color=work.color_hex, **edge_kwargs)
# seen.add(key)
return fg
class NodeContainer(six.with_metaclass(abc.ABCMeta)):
"""
Mixin classes for `Work` and `Flow` objects providing helper functions
to register tasks in the container. The helper function call the
`register` method of the container.
"""
# TODO: Abstract protocol for containers
@abc.abstractmethod
def register_task(self, *args, **kwargs):
"""
Register a task in the container.
"""
# TODO: shall flow.register_task return a Task or a Work?
# Helper functions
def register_scf_task(self, *args, **kwargs):
"""Register a Scf task."""
kwargs["task_class"] = ScfTask
return self.register_task(*args, **kwargs)
def register_collinear_then_noncollinear_scf_task(self, *args, **kwargs):
"""Register a Scf task that perform a SCF run first with nsppol = 2 and then nspinor = 2"""
kwargs["task_class"] = CollinearThenNonCollinearScfTask
return self.register_task(*args, **kwargs)
def register_nscf_task(self, *args, **kwargs):
"""Register a nscf task."""
kwargs["task_class"] = NscfTask
return self.register_task(*args, **kwargs)
def register_relax_task(self, *args, **kwargs):
"""Register a task for structural optimization."""
kwargs["task_class"] = RelaxTask
return self.register_task(*args, **kwargs)
def register_phonon_task(self, *args, **kwargs):
"""Register a phonon task."""
kwargs["task_class"] = PhononTask
return self.register_task(*args, **kwargs)
def register_ddk_task(self, *args, **kwargs):
"""Register a ddk task."""
kwargs["task_class"] = DdkTask
return self.register_task(*args, **kwargs)
def register_scr_task(self, *args, **kwargs):
"""Register a screening task."""
kwargs["task_class"] = ScrTask
return self.register_task(*args, **kwargs)
def register_sigma_task(self, *args, **kwargs):
"""Register a sigma task."""
kwargs["task_class"] = SigmaTask
return self.register_task(*args, **kwargs)
# TODO: Remove
def register_dde_task(self, *args, **kwargs):
"""Register a Dde task."""
kwargs["task_class"] = DdeTask
return self.register_task(*args, **kwargs)
def register_dte_task(self, *args, **kwargs):
"""Register a Dte task."""
kwargs["task_class"] = DteTask
return self.register_task(*args, **kwargs)
def register_bec_task(self, *args, **kwargs):
"""Register a BEC task."""
kwargs["task_class"] = BecTask
return self.register_task(*args, **kwargs)
def register_bse_task(self, *args, **kwargs):
"""Register a Bethe-Salpeter task."""
kwargs["task_class"] = BseTask
return self.register_task(*args, **kwargs)
def register_eph_task(self, *args, **kwargs):
"""Register an electron-phonon task."""
kwargs["task_class"] = EphTask
return self.register_task(*args, **kwargs)
def walknset_vars(self, task_class=None, *args, **kwargs):
"""
Set the values of the ABINIT variables in the input files of the nodes
Args:
task_class: If not None, only the input files of the tasks belonging
to class `task_class` are modified.
Example:
flow.walknset_vars(ecut=10, kptopt=4)
"""
def change_task(task):
if task_class is not None and task.__class__ is not task_class: return False
return True
if self.is_work:
for task in self:
if not change_task(task): continue
task.set_vars(*args, **kwargs)
elif self.is_flow:
for task in self.iflat_tasks():
if not change_task(task): continue
task.set_vars(*args, **kwargs)
else:
raise TypeError("Don't know how to set variables for object class %s" % self.__class__.__name__)
class Work(BaseWork, NodeContainer):
"""
A Work is a list of (possibly connected) tasks.
"""
def __init__(self, workdir=None, manager=None):
"""
Args:
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
super(Work, self).__init__()
self._tasks = []
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
def set_manager(self, manager):
"""Set the :class:`TaskManager` to use to launch the :class:`Task`."""
self.manager = manager.deepcopy()
for task in self:
task.set_manager(manager)
@property
def flow(self):
"""The flow containing this :class:`Work`."""
return self._flow
def set_flow(self, flow):
"""Set the flow associated to this :class:`Work`."""
if not hasattr(self, "_flow"):
self._flow = flow
else:
if self._flow != flow:
raise ValueError("self._flow != flow")
@lazy_property
def pos(self):
"""The position of self in the :class:`Flow`"""
for i, work in enumerate(self.flow):
if self == work:
return i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Directories with (input|output|temporary) data.
# The work will use these directories to connect
# itself to other works and/or to produce new data
# that will be used by its children.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
self.wdir = Directory(self.workdir)
def chroot(self, new_workdir):
self.set_workdir(new_workdir, chroot=True)
for i, task in enumerate(self):
new_tdir = os.path.join(self.workdir, "t" + str(i))
task.set_workdir(new_tdir, chroot=True)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return self._tasks.__iter__()
def __getitem__(self, slice):
return self._tasks[slice]
def chunks(self, chunk_size):
"""Yield successive chunks of tasks of lenght chunk_size."""
for tasks in chunks(self, chunk_size):
yield tasks
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.indir.path_in("in_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.outdir.path_in("out_" + ext)
@property
def processes(self):
return [task.process for task in self]
@property
def all_done(self):
"""True if all the :class:`Task` objects in the :class:`Work` are done."""
return all(task.status >= task.S_DONE for task in self)
@property
def isnc(self):
"""True if norm-conserving calculation."""
return all(task.isnc for task in self)
@property
def ispaw(self):
"""True if PAW calculation."""
return all(task.ispaw for task in self)
@property
def status_counter(self):
"""
Returns a `Counter` object that counts the number of task with
given status (use the string representation of the status as key).
"""
counter = collections.Counter()
for task in self:
counter[str(task.status)] += 1
return counter
def allocate(self, manager=None):
"""
This function is called once we have completed the initialization
of the :class:`Work`. It sets the manager of each task (if not already done)
and defines the working directories of the tasks.
Args:
manager: :class:`TaskManager` object or None
"""
for i, task in enumerate(self):
if not hasattr(task, "manager"):
# Set the manager
# Use the one provided in input else the one of the work/flow.
if manager is not None:
task.set_manager(manager)
else:
# Look first in work and then in the flow.
if hasattr(self, "manager"):
task.set_manager(self.manager)
else:
task.set_manager(self.flow.manager)
task_workdir = os.path.join(self.workdir, "t" + str(i))
if not hasattr(task, "workdir"):
task.set_workdir(task_workdir)
else:
if task.workdir != task_workdir:
raise ValueError("task.workdir != task_workdir: %s, %s" % (task.workdir, task_workdir))
def register(self, obj, deps=None, required_files=None, manager=None, task_class=None):
"""
Registers a new :class:`Task` and add it to the internal list, taking into account possible dependencies.
Args:
obj: :class:`AbinitInput` instance or `Task` object.
deps: Dictionary specifying the dependency of this node or list of dependencies
None means that this obj has no dependency.
required_files: List of strings with the path of the files used by the task.
Note that the files must exist when the task is registered.
Use the standard approach based on Works, Tasks and deps
if the files will be produced in the future.
manager:
The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use
the `TaskManager` specified during the creation of the :class:`Work`.
task_class: Task subclass to instantiate. Default: :class:`AbinitTask`
Returns:
:class:`Task` object
"""
task_workdir = None
if hasattr(self, "workdir"):
task_workdir = os.path.join(self.workdir, "t" + str(len(self)))
if isinstance(obj, Task):
task = obj
else:
# Set the class
if task_class is None:
task_class = AbinitTask
task = task_class.from_input(obj, task_workdir, manager)
self._tasks.append(task)
# Handle possible dependencies given either as dict or list.
if deps is not None:
if hasattr(deps, "items"):
deps = [Dependency(node, exts) for node, exts in deps.items()]
task.add_deps(deps)
# Handle possible dependencies.
if required_files is not None:
task.add_required_files(required_files)
return task
# Needed by NodeContainer
register_task = register
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the working directory."""
return os.path.join(self.workdir, filename)
def setup(self, *args, **kwargs):
"""
Method called before running the calculations.
The default implementation is empty.
"""
def build(self, *args, **kwargs):
"""Creates the top level directory."""
# Create the directories of the work.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Build dirs and files of each task.
for task in self:
task.build(*args, **kwargs)
# Connect signals within the work.
self.connect_signals()
@property
def status(self):
"""
Returns the status of the work i.e. the minimum of the status of the tasks.
"""
return self.get_all_status(only_min=True)
def get_all_status(self, only_min=False):
"""
Returns a list with the status of the tasks in self.
Args:
only_min: If True, the minimum of the status is returned.
"""
if len(self) == 0:
# The work will be created in the future.
if only_min:
return self.S_INIT
else:
return [self.S_INIT]
self.check_status()
status_list = [task.status for task in self]
if only_min:
return min(status_list)
else:
return status_list
def check_status(self):
"""Check the status of the tasks."""
# Recompute the status of the tasks
# Ignore OK and LOCKED tasks.
for task in self:
if task.status in (task.S_OK, task.S_LOCKED): continue
task.check_status()
# Take into account possible dependencies. Use a list instead of generators
for task in self:
if task.status == task.S_LOCKED: continue
if task.status < task.S_SUB and all(status == task.S_OK for status in task.deps_status):
task.set_status(task.S_READY, "Status set to Ready")
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by `|`.
Files matching one of the regular expressions will be preserved.
example: exclude_wildard="*.nc|*.txt" preserves all the files
whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
path = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(path)
def rm_indatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_indatadir()
def rm_outdatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_outatadir()
def rm_tmpdatadir(self):
"""Remove all the tmpdata directories."""
for task in self:
task.rm_tmpdatadir()
def move(self, dest, isabspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir, use isabspath=True
to specify an absolute path.
"""
if not isabspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def submit_tasks(self, wait=False):
"""
Submits the task in self and wait.
TODO: change name.
"""
for task in self:
task.start()
if wait:
for task in self: task.wait()
def start(self, *args, **kwargs):
"""
Start the work. Calls build and _setup first, then submit the tasks.
Non-blocking call unless wait is set to True
"""
wait = kwargs.pop("wait", False)
# Initial setup
self._setup(*args, **kwargs)
# Build dirs and files.
self.build(*args, **kwargs)
# Submit tasks (does not block)
self.submit_tasks(wait=wait)
def read_etotals(self, unit="Ha"):
"""
Reads the total energy from the GSR file produced by the task.
Return a numpy array with the total energies in Hartree
The array element is set to np.inf if an exception is raised while reading the GSR file.
"""
if not self.all_done:
raise self.Error("Some task is still in running/submitted state")
etotals = []
for task in self:
# Open the GSR file and read etotal (Hartree)
gsr_path = task.outdir.has_abiext("GSR")
etot = np.inf
if gsr_path:
with ETSF_Reader(gsr_path) as r:
etot = r.read_value("etotal")
etotals.append(etot)
return EnergyArray(etotals, "Ha").to(unit)
def parse_timers(self):
"""
Parse the TIMER section reported in the ABINIT output files.
Returns:
:class:`AbinitTimerParser` object
"""
filenames = list(filter(os.path.exists, [task.output_file.path for task in self]))
parser = AbinitTimerParser()
parser.parse(filenames)
return parser
class BandStructureWork(Work):
"""Work for band structure calculations."""
def __init__(self, scf_input, nscf_input, dos_inputs=None, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run
nscf_input: Input for the NSCF run defining the band structure calculation.
dos_inputs: Input(s) for the DOS. DOS is computed only if dos_inputs is not None.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super(BandStructureWork, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Register the NSCF run and its dependency.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Add DOS computation(s) if requested.
self.dos_tasks = []
if dos_inputs is not None:
if not isinstance(dos_inputs, (list, tuple)):
dos_inputs = [dos_inputs]
for dos_input in dos_inputs:
dos_task = self.register_nscf_task(dos_input, deps={self.scf_task: "DEN"})
self.dos_tasks.append(dos_task)
def plot_ebands(self, **kwargs):
"""
Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`.
Returns:
`matplotlib` figure
"""
with self.nscf_task.open_gsr() as gsr:
return gsr.ebands.plot(**kwargs)
def plot_ebands_with_edos(self, dos_pos=0, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained (note: 0 refers to the first DOS task).
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot_with_edos` method to customize the plot.
Returns:
`matplotlib` figure.
"""
with self.nscf_task.open_gsr() as gsr:
gs_ebands = gsr.ebands
with self.dos_tasks[dos_pos].open_gsr() as gsr:
dos_ebands = gsr.ebands
edos = dos_ebands.get_edos(method=method, step=step, width=width)
return gs_ebands.plot_with_edos(edos, **kwargs)
def plot_edoses(self, dos_pos=None, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained.
None is all DOSes should be displayed. Accepts integer or list of integers.
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot` method to customize the plot.
Returns:
`matplotlib` figure.
"""
if dos_pos is not None and not isinstance(dos_pos, (list, tuple)): dos_pos = [dos_pos]
from abipy.electrons.ebands import ElectronDosPlotter
plotter = ElectronDosPlotter()
for i, task in enumerate(self.dos_tasks):
if dos_pos is not None and i not in dos_pos: continue
with task.open_gsr() as gsr:
edos = gsr.ebands.get_edos(method=method, step=step, width=width)
ngkpt = task.get_inpvar("ngkpt")
plotter.add_edos("ngkpt %s" % str(ngkpt), edos)
return plotter.combiplot(**kwargs)
class RelaxWork(Work):
"""
Work for structural relaxations. The first task relaxes the atomic position
while keeping the unit cell parameters fixed. The second task uses the final
structure to perform a structural relaxation in which both the atomic positions
and the lattice parameters are optimized.
"""
def __init__(self, ion_input, ioncell_input, workdir=None, manager=None, target_dilatmx=None):
"""
Args:
ion_input: Input for the relaxation of the ions (cell is fixed)
ioncell_input: Input for the relaxation of the ions and the unit cell.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super(RelaxWork, self).__init__(workdir=workdir, manager=manager)
self.ion_task = self.register_relax_task(ion_input)
# Note:
# 1) It would be nice to restart from the WFK file but ABINIT crashes due to the
# different unit cell parameters if paral_kgb == 1
#paral_kgb = ion_input[0]["paral_kgb"]
#if paral_kgb == 1:
#deps = {self.ion_task: "WFK"} # --> FIXME: Problem in rwwf
#deps = {self.ion_task: "DEN"}
deps = None
self.ioncell_task = self.register_relax_task(ioncell_input, deps=deps)
# Lock ioncell_task as ion_task should communicate to ioncell_task that
# the calculation is OK and pass the final structure.
self.ioncell_task.lock(source_node=self)
self.transfer_done = False
self.target_dilatmx = target_dilatmx
#@check_spectator
def on_ok(self, sender):
"""
This callback is called when one task reaches status S_OK.
If sender == self.ion_task, we update the initial structure
used by self.ioncell_task and we unlock it so that the job can be submitted.
"""
logger.debug("in on_ok with sender %s" % sender)
if sender == self.ion_task and not self.transfer_done:
# Get the relaxed structure from ion_task
ion_structure = self.ion_task.get_final_structure()
# Transfer it to the ioncell task (we do it only once).
self.ioncell_task._change_structure(ion_structure)
self.transfer_done = True
# Unlock ioncell_task so that we can submit it.
self.ioncell_task.unlock(source_node=self)
elif sender == self.ioncell_task and self.target_dilatmx:
actual_dilatmx = self.ioncell_task.get_inpvar('dilatmx', 1.)
if self.target_dilatmx < actual_dilatmx:
self.ioncell_task.reduce_dilatmx(target=self.target_dilatmx)
logger.info('Converging dilatmx. Value reduce from {} to {}.'
.format(actual_dilatmx, self.ioncell_task.get_inpvar('dilatmx')))
self.ioncell_task.reset_from_scratch()
return super(RelaxWork, self).on_ok(sender)
def plot_ion_relaxation(self, **kwargs):
"""
Plot the history of the ion-cell relaxation.
kwargs are passed to the plot method of :class:`HistFile`
Return `matplotlib` figure or None if hist file is not found.
"""
with self.ion_task.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
def plot_ioncell_relaxation(self, **kwargs):
"""
Plot the history of the ion-cell relaxation.
kwargs are passed to the plot method of :class:`HistFile`
Return `matplotlib` figure or None if hist file is not found.
"""
with self.ioncell_task.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
class G0W0Work(Work):
"""
Work for general G0W0 calculations.
All input can be either single inputs or lists of inputs
"""
def __init__(self, scf_inputs, nscf_inputs, scr_inputs, sigma_inputs,
workdir=None, manager=None):
"""
Args:
scf_inputs: Input(s) for the SCF run, if it is a list add all but only link
to the last input (used for convergence studies on the KS band gap)
nscf_inputs: Input(s) for the NSCF run, if it is a list add all but only
link to the last (i.e. addditiona DOS and BANDS)
scr_inputs: Input for the screening run
sigma_inputs: List of :class:AbinitInput`for the self-energy run.
if scr and sigma are lists of the same length, every sigma gets its own screening.
if there is only one screening all sigma inputs are linked to this one
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
super(G0W0Work, self).__init__(workdir=workdir, manager=manager)
spread_scr = (isinstance(sigma_inputs, (list, tuple)) and
isinstance(scr_inputs, (list, tuple)) and
len(sigma_inputs) == len(scr_inputs))
#print("spread_scr", spread_scr)
self.sigma_tasks = []
# Register the GS-SCF run.
# register all scf_inputs but link the nscf only the last scf in the list
# multiple scf_inputs can be provided to perform convergence studies
if isinstance(scf_inputs, (list, tuple)):
for scf_input in scf_inputs:
self.scf_task = self.register_scf_task(scf_input)
else:
self.scf_task = self.register_scf_task(scf_inputs)
# Register the NSCF run (s).
if isinstance(nscf_inputs, (list, tuple)):
for nscf_input in nscf_inputs:
self.nscf_task = nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
else:
self.nscf_task = nscf_task = self.register_nscf_task(nscf_inputs, deps={self.scf_task: "DEN"})
# Register the SCR and SIGMA run(s).
if spread_scr:
for scr_input, sigma_input in zip(scr_inputs, sigma_inputs):
scr_task = self.register_scr_task(scr_input, deps={nscf_task: "WFK"})
sigma_task = self.register_sigma_task(sigma_input, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(sigma_task)
else:
# Sigma work(s) connected to the same screening.
scr_task = self.register_scr_task(scr_inputs, deps={nscf_task: "WFK"})
if isinstance(sigma_inputs, (list, tuple)):
for inp in sigma_inputs:
task = self.register_sigma_task(inp, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
else:
task = self.register_sigma_task(sigma_inputs, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
class SigmaConvWork(Work):
"""
Work for self-energy convergence studies.
"""
def __init__(self, wfk_node, scr_node, sigma_inputs, workdir=None, manager=None):
"""
Args:
wfk_node: The node who has produced the WFK file or filepath pointing to the WFK file.
scr_node: The node who has produced the SCR file or filepath pointing to the SCR file.
sigma_inputs: List of :class:`AbinitInput` for the self-energy runs.
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
# Cast to node instances.
wfk_node, scr_node = Node.as_node(wfk_node), Node.as_node(scr_node)
super(SigmaConvWork, self).__init__(workdir=workdir, manager=manager)
# Register the SIGMA runs.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
for sigma_input in sigma_inputs:
self.register_sigma_task(sigma_input, deps={wfk_node: "WFK", scr_node: "SCR"})
class BseMdfWork(Work):
"""
Work for simple BSE calculations in which the self-energy corrections
are approximated by the scissors operator and the screening is modeled
with the model dielectric function.
"""
def __init__(self, scf_input, nscf_input, bse_inputs, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run.
nscf_input: Input for the NSCF run.
bse_inputs: List of Inputs for the BSE run.
workdir: Working directory of the calculation.
manager: :class:`TaskManager`.
"""
super(BseMdfWork, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Construct the input for the NSCF run.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Construct the input(s) for the BSE run.
if not isinstance(bse_inputs, (list, tuple)):
bse_inputs = [bse_inputs]
for bse_input in bse_inputs:
self.register_bse_task(bse_input, deps={self.nscf_task: "WFK"})
def get_mdf_robot(self):
"""Builds and returns a :class:`MdfRobot` for analyzing the results in the MDF files."""
from abilab.robots import MdfRobot
robot = MdfRobot()
for task in self[2:]:
mdf_path = task.outdir.has_abiext(robot.EXT)
if mdf_path:
robot.add_file(str(task), mdf_path)
return robot
#def plot_conv_mdf(self, **kwargs)
# with self.get_mdf_robot() as robot:
# robot.get_mdf_plooter()
# plotter.plot(**kwargs)
class QptdmWork(Work):
"""
This work parallelizes the calculation of the q-points of the screening.
It also provides the callback `on_all_ok` that calls mrgscr to merge
all the partial screening files produced.
"""
def create_tasks(self, wfk_file, scr_input):
"""
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
"""
assert len(self) == 0
wfk_file = self.wfk_file = os.path.abspath(wfk_file)
# Build a temporary work in the tmpdir that will use a shell manager
# to run ABINIT in order to get the list of q-points for the screening.
shell_manager = self.manager.to_shell_manager(mpi_procs=1)
w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager)
fake_input = scr_input.deepcopy()
fake_task = w.register(fake_input)
w.allocate()
w.build()
# Create the symbolic link and add the magic value
# nqpdm = -1 to the input to get the list of q-points.
fake_task.inlink_file(wfk_file)
fake_task.set_vars({"nqptdm": -1})
fake_task.start_and_wait()
# Parse the section with the q-points
with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader:
qpoints = reader.read_value("reduced_coordinates_of_kpoints")
#print("qpoints)
#w.rmtree()
# Now we can register the task for the different q-points
for qpoint in qpoints:
qptdm_input = scr_input.deepcopy()
qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)
new_task = self.register_scr_task(qptdm_input, manager=self.manager)
# Add the garbage collector.
if self.flow.gc is not None:
new_task.set_gc(self.flow.gc)
self.allocate()
def merge_scrfiles(self, remove_scrfiles=True):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
If remove_scrfiles is True, the partial SCR files are removed after the merge.
"""
scr_files = list(filter(None, [task.outdir.has_abiext("SCR") for task in self]))
logger.debug("will call mrgscr to merge %s:\n" % str(scr_files))
assert len(scr_files) == len(self)
mrgscr = wrappers.Mrgscr(manager=self[0].manager, verbose=1)
final_scr = mrgscr.merge_qpoints(self.outdir.path, scr_files, out_prefix="out")
if remove_scrfiles:
for scr_file in scr_files:
try:
os.remove(scr_file)
except IOError:
pass
return final_scr
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
"""
final_scr = self.merge_scrfiles()
return self.Results(node=self, returncode=0, message="mrgscr done", final_scr=final_scr)
class MergeDdb(object):
"""Mixin class for Works that have to merge the DDB files produced by the tasks."""
def merge_ddb_files(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
Returns:
path to the output DDB file
"""
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in self \
if isinstance(task, DfptTask)]))
self.history.info("Will call mrgddb to merge %s:\n" % str(ddb_files))
# DDB files are always produces so this should never happen!
if not ddb_files:
raise RuntimeError("Cannot find any DDB file to merge by the task of " % self)
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
if len(ddb_files) == 1:
# Avoid the merge. Just copy the DDB file to the outdir of the work.
shutil.copy(ddb_files[0], out_ddb)
else:
# Call mrgddb
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self[0].manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc)
return out_ddb
class PhononWork(Work, MergeDdb):
"""
This work usually consists of one GS + nirred Phonon tasks where nirred is
the number of irreducible perturbations for a given q-point.
It provides the callback method (on_all_ok) that calls mrgddb (mrgdv) to merge
all the partial DDB (POT) files produced. The two files are available in the
output directory of the Work.
"""
@classmethod
def from_scf_task(cls, scf_task, qpoints, is_ngqpt=False, tolerance=None, manager=None):
"""
Construct a `PhononWork` from a :class:`ScfTask` object.
The input file for phonons is automatically generated from the input of the ScfTask.
Each phonon task depends on the WFK file produced by scf_task.
Args:
scf_task: ScfTask object.
qpoints: q-points in reduced coordinates. Accepts single q-point, list of q-points
or three integers defining the q-mesh if `is_ngqpt`.
is_ngqpt: True if `qpoints` should be interpreted as divisions instead of q-points.
tolerance: dict {varname: value} with the tolerance to be used in the DFPT run.
Defaults to {"tolvrs": 1.0e-10}.
manager: :class:`TaskManager` object.
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task %s does not inherit from ScfTask" % scf_task)
if is_ngqpt:
qpoints = scf_task.input.abiget_ibz(ngkpt=qpoints, shiftk=[0, 0, 0], kptopt=1).points
qpoints = np.reshape(qpoints, (-1, 3))
new = cls(manager=manager)
for qpt in qpoints:
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance)
for ph_inp in multi:
new.register_phonon_task(ph_inp, deps={scf_task: "WFK"})
return new
@classmethod
def from_scf_input(cls, scf_input, qpoints, is_ngqpt=False, tolerance=None, manager=None):
"""
Similar to `from_scf_task`, the difference is that this method requires
an input for SCF calculation instead of a ScfTask. All the tasks (Scf + Phonon)
are packed in a single Work whereas in the previous case we usually have multiple works.
"""
if is_ngqpt:
qpoints = scf_input.abiget_ibz(ngkpt=qpoints, shiftk=[0, 0, 0], kptopt=1).points
qpoints = np.reshape(qpoints, (-1, 3))
new = cls(manager=manager)
scf_task = new.register_scf_task(scf_input)
for qpt in qpoints:
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance)
for ph_inp in multi:
new.register_phonon_task(ph_inp, deps={scf_task: "WFK"})
return new
def merge_pot1_files(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgdvdb` in sequential on the local machine to produce
the final DVDB file in the outdir of the `Work`.
Returns:
path to the output DVDB file. None if not DFPT POT file is found.
"""
pot1_files = []
for task in self:
if not isinstance(task, DfptTask): continue
pot1_files.extend(task.outdir.list_filepaths(wildcard="*_POT*"))
# prtpot=0 disables the output of the DFPT POT files so an empty list is not fatal here.
if not pot1_files: return None
self.history.info("Will call mrgdvdb to merge %s:\n" % str(pot1_files))
# Final DDB file will be produced in the outdir of the work.
out_dvdb = self.outdir.path_in("out_DVDB")
if len(pot1_files) == 1:
# Avoid the merge. Just move the DDB file to the outdir of the work
shutil.copy(pot1_files[0], out_dvdb)
else:
mrgdvdb = wrappers.Mrgdvdb(manager=self[0].manager, verbose=0)
mrgdvdb.merge(self.outdir.path, pot1_files, out_dvdb)
return out_dvdb
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
# Merge DVDB files.
out_dvdb = self.merge_pot1_files()
results = self.Results(node=self, returncode=0, message="DDB merge done")
results.register_gridfs_files(DDB=(out_ddb, "t"))
return results
class BecWork(Work, MergeDdb):
"""
Work for the computation of the Born effective charges.
This work consists of DDK tasks and phonon + electric field perturbation
It provides the callback method (on_all_ok) that calls mrgddb to merge the
partial DDB files produced by the work.
"""
@classmethod
def from_scf_task(cls, scf_task, ddk_tolerance=None):
"""Build a BecWork from a ground-state task."""
if not isinstance(scf_task, ScfTask):
raise TypeError("task %s does not inherit from GsTask" % scf_task)
new = cls() #manager=scf_task.manager)
# DDK calculations
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation and phonons
# Each bec task is connected to all the previous DDK task and to the scf_task.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
bec_inputs = scf_task.input.make_bec_inputs() #tolerance=efile
for bec_inp in bec_inputs:
new.register_bec_task(bec_inp, deps=bec_deps)
return new
def on_all_ok(self):
"""
This method is called when all the task reach S_OK
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
results = self.Results(node=self, returncode=0, message="DDB merge done")
results.register_gridfs_files(DDB=(out_ddb, "t"))
return results
class DteWork(Work, MergeDdb):
"""
Work for the computation of the third derivative of the energy.
This work consists of DDK tasks and electric field perturbation.
It provides the callback method (on_all_ok) that calls mrgddb to merge the partial DDB files produced
"""
@classmethod
def from_scf_task(cls, scf_task, ddk_tolerance=None):
"""Build a DteWork from a ground-state task."""
if not isinstance(scf_task, ScfTask):
raise TypeError("task %s does not inherit from GsTask" % scf_task)
new = cls() #manager=scf_task.manager)
# DDK calculations
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation
# Each task is connected to all the previous DDK, DDE task and to the scf_task.
multi_dde = scf_task.input.make_dde_inputs(use_symmetries=False)
# To compute the nonlinear coefficients all the directions of the perturbation
# have to be taken in consideration
# DDE calculations
dde_tasks = []
dde_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
dde_deps.update({scf_task: "WFK"})
for dde_inp in multi_dde:
dde_task = new.register_dde_task(dde_inp, deps=dde_deps)
dde_tasks.append(dde_task)
#DTE calculations
dte_deps = {scf_task: "WFK DEN"}
dte_deps.update({dde_task: "1WF 1DEN" for dde_task in dde_tasks})
multi_dte = scf_task.input.make_dte_inputs()
dte_tasks = []
for dte_inp in multi_dte:
dte_task = new.register_dte_task(dte_inp, deps=dte_deps)
dte_tasks.append(dte_task)
return new
def on_all_ok(self):
"""
This method is called when all the task reach S_OK
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
results = self.Results(node=self, returncode=0, message="DDB merge done")
results.register_gridfs_files(DDB=(out_ddb, "t"))
return results
| mit |
dataplumber/nexus | climatology/clim/climatology1.py | 2 | 10033 | """
climatology.py
Compute a multi-epoch (multi-day) climatology from daily SST Level-3 grids.
Simple code to be run on Spark cluster, or using multi-core parallelism on single machine.
"""
import sys, os, calendar, urlparse, urllib
from datetime import datetime
import numpy as N
from variables import getVariables, close
#from timePartitions import partitionFilesByKey
from split import fixedSplit
#from stats import Stats
from pathos.multiprocessing import ProcessingPool as Pool
from plotlib import imageMap, makeMovie
#from gaussInterp import gaussInterp
VERBOSE = 1
# Possible execution modes
# Multicore & cluster modes use pathos pool.map(); Spark mode uses PySpark cluster.
ExecutionModes = ['sequential', 'multicore', 'cluster', 'spark']
# SST L3m 4.6km Metadata
# SST calues are scaled integers in degrees Celsius, lat/lon is 4320 x 8640
# Variable = 'sst', Mask = 'qual_sst', Coordinates = ['lat', 'lon']
# Generate algorithmic name for N-day Climatology product
SSTClimatologyTemplate = 'SST.L3.Global.Clim.%(period)s.%(date)s.%(version)s.nc' #??
# Simple mask and average functions to get us started, then add gaussian interpolation.
# MODIS L3 SST product, qual_sst is [-1, 2] - =0 is best data, can add =1 for better coverage
def qcMask(var, mask): return N.ma.array(var, mask=N.ma.make_mask(mask))
#def qcMask(var, mask): return N.ma.masked_where(mask != 0, var)
def average(a): return N.ma.mean(a, axis=0)
#AveragingFunctions = {'pixelAverage': average, 'gaussInterp': gaussInterp}
AveragingFunctions = {'pixelAverage': average, 'gaussInterp': average}
def climByAveragingPeriods(urls, # list of (daily) granule URLs for a long time period (e.g. a year)
nEpochs, # compute a climatology for every N epochs (days) by 'averaging'
nWindow, # number of epochs in window needed for averaging
variable, # name of primary variable in file
mask, # name of mask variable
coordinates, # names of coordinate arrays to read and pass on (e.g. 'lat' and 'lon')
maskFn=qcMask, # mask function to compute mask from mask variable
averager='pixelAverage', # averaging function to use, one of ['pixelAverage', 'gaussInterp']
mode='sequential', # Map across time periods of N-days for concurrent work, executed by:
# 'sequential' map, 'multicore' using pool.map(), 'cluster' using pathos pool.map(),
# or 'spark' using PySpark
numNodes=1, # number of cluster nodes to use
nWorkers=4, # number of parallel workers per node
averagingFunctions=AveragingFunctions, # dict of possible averaging functions
legalModes=ExecutionModes # list of possiblel execution modes
):
'''Compute a climatology every N days by applying a mask and averaging function.
Writes the averaged variable grid, attributes of the primary variable, and the coordinate arrays in a dictionary.
***Assumption: This routine assumes that the N grids will fit in memory.***
'''
try:
averageFn = averagingFunctions[averager]
except :
averageFn = average
print >>sys.stderr, 'climatology: Error, Averaging function must be one of: %s' % str(averagingFunctions)
urlSplits = [s for s in fixedSplit(urls, nEpochs)]
if VERBOSE: print >>sys.stderr, urlSplits
def climsContoured(urls):
n = len(urls)
var = climByAveraging(urls, variable, mask, coordinates, maskFn, averageFn)
return contourMap(var, variable, coordinates, n, urls[0])
if mode == 'sequential':
plots = map(climsContoured, urlSplits)
elif mode == 'multicore':
pool = Pool(nWorkers)
plots = pool.map(climsContoured, urlSplits)
elif mode == 'cluster':
pass
elif mode == 'spark':
pass
plots = map(climsContoured, urlSplits)
print plots
return plots
# return makeMovie(plots, 'clim.mpg')
def climByAveraging(urls, # list of granule URLs for a time period
variable, # name of primary variable in file
mask, # name of mask variable
coordinates, # names of coordinate arrays to read and pass on (e.g. 'lat' and 'lon')
maskFn=qcMask, # mask function to compute mask from mask variable
averageFn=average # averaging function to use
):
'''Compute a climatology over N arrays by applying a mask and averaging function.
Returns the averaged variable grid, attributes of the primary variable, and the coordinate arrays in a dictionary.
***Assumption: This routine assumes that the N grids will fit in memory.***
'''
n = len(urls)
varList = [variable, mask]
for i, url in enumerate(urls):
fn = retrieveFile(url, '~/cache')
if VERBOSE: print >>sys.stderr, 'Read variables and mask ...'
var, fh = getVariables(fn, varList) # return dict of variable objects by name
if i == 0:
dtype = var[variable].dtype
shape = (n,) + var[variable].shape
accum = N.ma.empty(shape, dtype)
v = maskFn(var[variable], var[mask]) # apply quality mask variable to get numpy MA
# v = var[variable][:]
accum[i] = v # accumulate N arrays for 'averaging'
if i+1 != len(urls): # keep var dictionary from last file to grab metadata
close(fh) # REMEMBER: closing fh loses in-memory data structures
if VERBOSE: print >>sys.stderr, 'Averaging ...'
coord, fh = getVariables(fn, coordinates) # read coordinate arrays and add to dict
for c in coordinates: var[c] = coord[c][:]
if averageFn == average:
avg = averageFn(accum) # call averaging function
else:
var[variable] = accum
if averageFn == gaussInterp:
varNames = variable + coordinates
avg, vweight, status = \
gaussInterp(var, varNames, latGrid, lonGrid, wlat, wlon, slat, slon, stime, vfactor, missingValue)
var['attributes'] = var[variable].__dict__ # save attributes of primary variable
var[variable] = avg # return primary variable & mask arrays in dict
var[mask] = N.ma.getmask(avg)
# close(fh) # Can't close, lose netCDF4.Variable objects, leaking two fh
return var
def contourMap(var, variable, coordinates, n, url):
p = urlparse.urlparse(url)
filename = os.path.split(p.path)[1]
return filename
outFile = filename + '.png'
# Downscale variable array (SST) before contouring, matplotlib is TOO slow on large arrays
vals = var[variable][:]
# Fixed color scale, write file, turn off auto borders, set title, reverse lat direction so monotonically increasing??
imageMap(var[coordinates[1]][:], var[coordinates[0]][:], var[variable][:],
vmin=-2., vmax=45., outFile=outFile, autoBorders=False,
title='%s %d-day Mean from %s' % (variable.upper(), n, filename))
print >>sys.stderr, 'Writing contour plot to %s' % outFile
return outFile
def isLocalFile(url):
'''Check if URL is a local path.'''
u = urlparse.urlparse(url)
if u.scheme == '' or u.scheme == 'file':
if not path.exists(u.path):
print >>sys.stderr, 'isLocalFile: File at local path does not exist: %s' % u.path
return (True, u.path)
else:
return (False, u.path)
def retrieveFile(url, dir=None):
'''Retrieve a file from a URL, or if it is a local path then verify it exists.'''
if dir is None: dir = './'
ok, path = isLocalFile(url)
fn = os.path.split(path)[1]
outPath = os.path.join(dir, fn)
if not ok:
if os.path.exists(outPath):
print >>sys.stderr, 'retrieveFile: Using cached file: %s' % outPath
else:
try:
print >>sys.stderr, 'retrieveFile: Retrieving (URL) %s to %s' % (url, outPath)
urllib.urlretrieve(url, outPath)
except:
print >>sys.stderr, 'retrieveFile: Cannot retrieve file at URL: %s' % url
return None
return outPath
def dailyFile2date(path, offset=1):
'''Convert YYYYDOY string in filename to date.'''
fn = os.path.split(path)[1]
year = int(fn[offset:offset+4])
doy = int(fn[offset+5:offset+8])
return fn[5:15].replace('.', '/')
def formatRegion(r):
"""Format lat/lon region specifier as string suitable for file name."""
if isinstance(r, str):
return r
else:
strs = [str(i).replace('-', 'm') for i in r]
return 'region-%s-%sby%s-%s' % tuple(strs)
def formatGrid(r):
"""Format lat/lon grid resolution specifier as string suitable for file name."""
if isinstance(r, str):
return r
else:
return str(r[0]) + 'by' + str(r[1])
def main(args):
nEpochs = int(args[0])
nWindow = int(args[1])
averager = args[2]
mode = args[3]
nWorkers = int(args[4])
urlFile = args[5]
urls = [s.strip() for s in open(urlFile, 'r')]
return climByAveragingPeriods(urls, nEpochs, nWindow, 'sst', 'qual_sst', ['lat', 'lon'],
averager=averager, mode=mode, nWorkers=nWorkers)
if __name__ == '__main__':
print main(sys.argv[1:])
# python climatology.py 5 5 pixelAverage sequential 1 urls_sst_10days.txt
# python climatology.py 5 5 gaussianInterp multicore 8 urls_sst_40days.txt
| apache-2.0 |
MoamerEncsConcordiaCa/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 2 | 41538 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return const, const, control_flow_ops.group(train_op_1, training_op_2)
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, control_flow_ops.no_op()
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testModelFnArgs(self):
expected_param = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
def _argument_checker(features, labels, mode, params, config):
_, _ = features, labels
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertTrue(config.i_am_test)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
config=expected_config)
est.fit(input_fn=boston_input_fn, steps=1)
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(est.model_dir + '/export', serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaises(ValueError):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
bavardage/statsmodels | statsmodels/sandbox/tsa/diffusion2.py | 38 | 13366 | """ Diffusion 2: jump diffusion, stochastic volatility, stochastic time
Created on Tue Dec 08 15:03:49 2009
Author: josef-pktd following Meucci
License: BSD
contains:
CIRSubordinatedBrownian
Heston
IG
JumpDiffusionKou
JumpDiffusionMerton
NIG
VG
References
----------
Attilio Meucci, Review of Discrete and Continuous Processes in Finance: Theory and Applications
Bloomberg Portfolio Research Paper No. 2009-02-CLASSROOM July 1, 2009
http://papers.ssrn.com/sol3/papers.cfm?abstract_id=1373102
this is currently mostly a translation from matlab of
http://www.mathworks.com/matlabcentral/fileexchange/23554-review-of-discrete-and-continuous-processes-in-finance
license BSD:
Copyright (c) 2008, Attilio Meucci
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
TODO:
* vectorize where possible
* which processes are exactly simulated by finite differences ?
* include or exclude (now) the initial observation ?
* convert to and merge with diffusion.py (part 1 of diffusions)
* which processes can be easily estimated ?
loglike or characteristic function ?
* tests ? check for possible index errors (random indices), graphs look ok
* adjust notation, variable names, more consistent, more pythonic
* delete a few unused lines, cleanup
* docstrings
random bug (showed up only once, need fuzz-testing to replicate)
File "...\diffusion2.py", line 375, in <module>
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
File "...\diffusion2.py", line 129, in simulate
jumps_ts[n] = CumS[Events]
IndexError: index out of bounds
CumS is empty array, Events == -1
"""
import numpy as np
#from scipy import stats # currently only uses np.random
import matplotlib.pyplot as plt
class JumpDiffusionMerton(object):
'''
Example
-------
mu=.00 # deterministic drift
sig=.20 # Gaussian component
l=3.45 # Poisson process arrival rate
a=0 # drift of log-jump
D=.2 # st.dev of log-jump
X = JumpDiffusionMerton().simulate(mu,sig,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(X.T)
plt.title('Merton jump-diffusion')
'''
def __init__(self):
pass
def simulate(self, m,s,lambd,a,D,ts,nrepl):
T = ts[-1] # time points
# simulate number of jumps
n_jumps = np.random.poisson(lambd*T, size=(nrepl, 1))
jumps=[]
nobs=len(ts)
jumps=np.zeros((nrepl,nobs))
for j in range(nrepl):
# simulate jump arrival time
t = T*np.random.rand(n_jumps[j])#,1) #uniform
t = np.sort(t,0)
# simulate jump size
S = a + D*np.random.randn(n_jumps[j],1)
# put things together
CumS = np.cumsum(S)
jumps_ts = np.zeros(nobs)
for n in range(nobs):
Events = np.sum(t<=ts[n])-1
#print n, Events, CumS.shape, jumps_ts.shape
jumps_ts[n]=0
if Events > 0:
jumps_ts[n] = CumS[Events] #TODO: out of bounds see top
#jumps = np.column_stack((jumps, jumps_ts)) #maybe wrong transl
jumps[j,:] = jumps_ts
D_Diff = np.zeros((nrepl,nobs))
for k in range(nobs):
Dt=ts[k]
if k>1:
Dt=ts[k]-ts[k-1]
D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.randn(nrepl)
x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps))
return x
class JumpDiffusionKou(object):
def __init__(self):
pass
def simulate(self, m,s,lambd,p,e1,e2,ts,nrepl):
T=ts[-1]
# simulate number of jumps
N = np.random.poisson(lambd*T,size =(nrepl,1))
jumps=[]
nobs=len(ts)
jumps=np.zeros((nrepl,nobs))
for j in range(nrepl):
# simulate jump arrival time
t=T*np.random.rand(N[j])
t=np.sort(t)
# simulate jump size
ww = np.random.binomial(1, p, size=(N[j]))
S = ww * np.random.exponential(e1, size=(N[j])) - \
(1-ww) * np.random.exponential(e2, N[j])
# put things together
CumS = np.cumsum(S)
jumps_ts = np.zeros(nobs)
for n in range(nobs):
Events = sum(t<=ts[n])-1
jumps_ts[n]=0
if Events:
jumps_ts[n]=CumS[Events]
jumps[j,:] = jumps_ts
D_Diff = np.zeros((nrepl,nobs))
for k in range(nobs):
Dt=ts[k]
if k>1:
Dt=ts[k]-ts[k-1]
D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.normal(size=nrepl)
x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps))
return x
class VG(object):
'''variance gamma process
'''
def __init__(self):
pass
def simulate(self, m,s,kappa,ts,nrepl):
T=len(ts)
dXs = np.zeros((nrepl,T))
for t in range(T):
dt=ts[1]-0
if t>1:
dt = ts[t]-ts[t-1]
#print dt/kappa
#TODO: check parameterization of gamrnd, checked looks same as np
d_tau = kappa * np.random.gamma(dt/kappa,1.,size=(nrepl))
#print s*np.sqrt(d_tau)
# this raises exception:
#dX = stats.norm.rvs(m*d_tau,(s*np.sqrt(d_tau)))
# np.random.normal requires scale >0
dX = np.random.normal(loc=m*d_tau, scale=1e-6+s*np.sqrt(d_tau))
dXs[:,t] = dX
x = np.cumsum(dXs,1)
return x
class IG(object):
'''inverse-Gaussian ??? used by NIG
'''
def __init__(self):
pass
def simulate(self, l,m,nrepl):
N = np.random.randn(nrepl,1)
Y = N**2
X = m + (.5*m*m/l)*Y - (.5*m/l)*np.sqrt(4*m*l*Y+m*m*(Y**2))
U = np.random.rand(nrepl,1)
ind = U>m/(X+m)
X[ind] = m*m/X[ind]
return X.ravel()
class NIG(object):
'''normal-inverse-Gaussian
'''
def __init__(self):
pass
def simulate(self, th,k,s,ts,nrepl):
T = len(ts)
DXs = np.zeros((nrepl,T))
for t in range(T):
Dt=ts[1]-0
if t>1:
Dt=ts[t]-ts[t-1]
l = 1/k*(Dt**2)
m = Dt
DS = IG().simulate(l,m,nrepl)
N = np.random.randn(nrepl)
DX = s*N*np.sqrt(DS) + th*DS
#print DS.shape, DX.shape, DXs.shape
DXs[:,t] = DX
x = np.cumsum(DXs,1)
return x
class Heston(object):
'''Heston Stochastic Volatility
'''
def __init__(self):
pass
def simulate(self, m, kappa, eta,lambd,r, ts, nrepl,tratio=1.):
T = ts[-1]
nobs = len(ts)
dt = np.zeros(nobs) #/tratio
dt[0] = ts[0]-0
dt[1:] = np.diff(ts)
DXs = np.zeros((nrepl,nobs))
dB_1 = np.sqrt(dt) * np.random.randn(nrepl,nobs)
dB_2u = np.sqrt(dt) * np.random.randn(nrepl,nobs)
dB_2 = r*dB_1 + np.sqrt(1-r**2)*dB_2u
vt = eta*np.ones(nrepl)
v=[]
dXs = np.zeros((nrepl,nobs))
vts = np.zeros((nrepl,nobs))
for t in range(nobs):
dv = kappa*(eta-vt)*dt[t]+ lambd*np.sqrt(vt)*dB_2[:,t]
dX = m*dt[t] + np.sqrt(vt*dt[t]) * dB_1[:,t]
vt = vt + dv
vts[:,t] = vt
dXs[:,t] = dX
x = np.cumsum(dXs,1)
return x, vts
class CIRSubordinatedBrownian(object):
'''CIR subordinated Brownian Motion
'''
def __init__(self):
pass
def simulate(self, m, kappa, T_dot,lambd,sigma, ts, nrepl):
T = ts[-1]
nobs = len(ts)
dtarr = np.zeros(nobs) #/tratio
dtarr[0] = ts[0]-0
dtarr[1:] = np.diff(ts)
DXs = np.zeros((nrepl,nobs))
dB = np.sqrt(dtarr) * np.random.randn(nrepl,nobs)
yt = 1.
dXs = np.zeros((nrepl,nobs))
dtaus = np.zeros((nrepl,nobs))
y = np.zeros((nrepl,nobs))
for t in range(nobs):
dt = dtarr[t]
dy = kappa*(T_dot-yt)*dt + lambd*np.sqrt(yt)*dB[:,t]
yt = np.maximum(yt+dy,1e-10) # keep away from zero ?
dtau = np.maximum(yt*dt, 1e-6)
dX = np.random.normal(loc=m*dtau, scale=sigma*np.sqrt(dtau))
y[:,t] = yt
dtaus[:,t] = dtau
dXs[:,t] = dX
tau = np.cumsum(dtaus,1)
x = np.cumsum(dXs,1)
return x, tau, y
def schout2contank(a,b,d):
th = d*b/np.sqrt(a**2-b**2)
k = 1/(d*np.sqrt(a**2-b**2))
s = np.sqrt(d/np.sqrt(a**2-b**2))
return th,k,s
if __name__ == '__main__':
#Merton Jump Diffusion
#^^^^^^^^^^^^^^^^^^^^^
# grid of time values at which the process is evaluated
#("0" will be added, too)
nobs = 252.#1000 #252.
ts = np.linspace(1./nobs, 1., nobs)
nrepl=5 # number of simulations
mu=.010 # deterministic drift
sigma = .020 # Gaussian component
lambd = 3.45 *10 # Poisson process arrival rate
a=0 # drift of log-jump
D=.2 # st.dev of log-jump
jd = JumpDiffusionMerton()
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('Merton jump-diffusion')
sigma = 0.2
lambd = 3.45
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('Merton jump-diffusion')
#Kou jump diffusion
#^^^^^^^^^^^^^^^^^^
mu=.0 # deterministic drift
lambd=4.25 # Poisson process arrival rate
p=.5 # prob. of up-jump
e1=.2 # parameter of up-jump
e2=.3 # parameter of down-jump
sig=.2 # Gaussian component
x = JumpDiffusionKou().simulate(mu,sig,lambd,p,e1,e2,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('double exponential (Kou jump diffusion)')
#variance-gamma
#^^^^^^^^^^^^^^
mu = .1 # deterministic drift in subordinated Brownian motion
kappa = 1. #10. #1 # inverse for gamma shape parameter
sig = 0.5 #.2 # s.dev in subordinated Brownian motion
x = VG().simulate(mu,sig,kappa,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('variance gamma')
#normal-inverse-Gaussian
#^^^^^^^^^^^^^^^^^^^^^^^
# (Schoutens notation)
al = 2.1
be = 0
de = 1
# convert parameters to Cont-Tankov notation
th,k,s = schout2contank(al,be,de)
x = NIG().simulate(th,k,s,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo x-axis
plt.title('normal-inverse-Gaussian')
#Heston Stochastic Volatility
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^
m=.0
kappa = .6 # 2*Kappa*Eta>Lambda^2
eta = .3**2
lambd =.25
r = -.7
T = 20.
nobs = 252.*T#1000 #252.
tsh = np.linspace(T/nobs, T, nobs)
x, vts = Heston().simulate(m,kappa, eta,lambd,r, tsh, nrepl, tratio=20.)
plt.figure()
plt.plot(x.T)
plt.title('Heston Stochastic Volatility')
plt.figure()
plt.plot(np.sqrt(vts).T)
plt.title('Heston Stochastic Volatility - CIR Vol.')
plt.figure()
plt.subplot(2,1,1)
plt.plot(x[0])
plt.title('Heston Stochastic Volatility process')
plt.subplot(2,1,2)
plt.plot(np.sqrt(vts[0]))
plt.title('CIR Volatility')
#CIR subordinated Brownian
#^^^^^^^^^^^^^^^^^^^^^^^^^
m=.1
sigma=.4
kappa=.6 # 2*Kappa*T_dot>Lambda^2
T_dot=1
lambd=1
#T=252*10
#dt=1/252
#nrepl=2
T = 10.
nobs = 252.*T#1000 #252.
tsh = np.linspace(T/nobs, T, nobs)
x, tau, y = CIRSubordinatedBrownian().simulate(m, kappa, T_dot,lambd,sigma, tsh, nrepl)
plt.figure()
plt.plot(tsh, x.T)
plt.title('CIRSubordinatedBrownian process')
plt.figure()
plt.plot(tsh, y.T)
plt.title('CIRSubordinatedBrownian - CIR')
plt.figure()
plt.plot(tsh, tau.T)
plt.title('CIRSubordinatedBrownian - stochastic time ')
plt.figure()
plt.subplot(2,1,1)
plt.plot(tsh, x[0])
plt.title('CIRSubordinatedBrownian process')
plt.subplot(2,1,2)
plt.plot(tsh, y[0], label='CIR')
plt.plot(tsh, tau[0], label='stoch. time')
plt.legend(loc='upper left')
plt.title('CIRSubordinatedBrownian')
#plt.show()
| bsd-3-clause |
apdjustino/urbansim | urbansim/utils/testing.py | 4 | 1956 | """
Utilities used in testing of UrbanSim.
"""
import numpy as np
import numpy.testing as npt
import pandas as pd
def assert_frames_equal(actual, expected, use_close=False):
"""
Compare DataFrame items by index and column and
raise AssertionError if any item is not equal.
Ordering is unimportant, items are compared only by label.
NaN and infinite values are supported.
Parameters
----------
actual : pandas.DataFrame
expected : pandas.DataFrame
use_close : bool, optional
If True, use numpy.testing.assert_allclose instead of
numpy.testing.assert_equal.
"""
if use_close:
comp = npt.assert_allclose
else:
comp = npt.assert_equal
assert (isinstance(actual, pd.DataFrame) and
isinstance(expected, pd.DataFrame)), \
'Inputs must both be pandas DataFrames.'
for i, exp_row in expected.iterrows():
assert i in actual.index, 'Expected row {!r} not found.'.format(i)
act_row = actual.loc[i]
for j, exp_item in exp_row.iteritems():
assert j in act_row.index, \
'Expected column {!r} not found.'.format(j)
act_item = act_row[j]
try:
comp(act_item, exp_item)
except AssertionError as e:
raise AssertionError(
str(e) + '\n\nColumn: {!r}\nRow: {!r}'.format(j, i))
def assert_index_equal(left, right):
"""
Similar to pdt.assert_index_equal but is not sensitive to key ordering.
Parameters
----------
left: pandas.Index
right: pandas.Index
"""
assert isinstance(left, pd.Index)
assert isinstance(right, pd.Index)
left_diff = left.difference(right)
right_diff = right.difference(left)
if len(left_diff) > 0 or len(right_diff) > 0:
raise AssertionError("keys not in left [{0}], keys not in right [{1}]".format(
left_diff, right_diff))
| bsd-3-clause |
rs2/pandas | pandas/tests/indexes/multi/test_integrity.py | 1 | 8567 | import re
import numpy as np
import pytest
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import IntervalIndex, MultiIndex, RangeIndex
import pandas._testing as tm
def test_labels_dtypes():
# GH 8456
i = MultiIndex.from_tuples([("A", 1), ("A", 2)])
assert i.codes[0].dtype == "int8"
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(40)])
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(400)])
assert i.codes[1].dtype == "int16"
i = MultiIndex.from_product([["a"], range(40000)])
assert i.codes[1].dtype == "int32"
i = pd.MultiIndex.from_product([["a"], range(1000)])
assert (i.codes[0] >= 0).all()
assert (i.codes[1] >= 0).all()
def test_values_boxed():
tuples = [
(1, pd.Timestamp("2000-01-01")),
(2, pd.NaT),
(3, pd.Timestamp("2000-01-03")),
(1, pd.Timestamp("2000-01-04")),
(2, pd.Timestamp("2000-01-02")),
(3, pd.Timestamp("2000-01-03")),
]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz="US/Central")
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq="D")
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_consistency():
# need to construct an overflow
major_axis = list(range(70000))
minor_axis = list(range(10))
major_codes = np.arange(70000)
minor_codes = np.repeat(range(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
# inconsistent
major_codes = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
assert index.is_unique is False
@pytest.mark.arm_slow
def test_hash_collisions():
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product(
[np.arange(1000), np.arange(1000)], names=["one", "two"]
)
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(len(index), dtype="intp"))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_dims():
pass
def take_invalid_kwargs():
vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]]
idx = pd.MultiIndex.from_product(vals, names=["str", "dt"])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
def test_isna_behavior(idx):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
pd.isna(idx)
def test_large_multiindex_error():
# GH12527
df_below_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]), columns=["dest"]
)
with pytest.raises(KeyError, match=r"^\(-1, 0\)$"):
df_below_1000000.loc[(-1, 0), "dest"]
with pytest.raises(KeyError, match=r"^\(3, 0\)$"):
df_below_1000000.loc[(3, 0), "dest"]
df_above_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]), columns=["dest"]
)
with pytest.raises(KeyError, match=r"^\(-1, 0\)$"):
df_above_1000000.loc[(-1, 0), "dest"]
with pytest.raises(KeyError, match=r"^\(3, 0\)$"):
df_above_1000000.loc[(3, 0), "dest"]
def test_million_record_attribute_error():
# GH 18165
r = list(range(1000000))
df = pd.DataFrame(
{"a": r, "b": r}, index=pd.MultiIndex.from_tuples([(x, x) for x in r])
)
msg = "'Series' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
df["a"].foo()
def test_can_hold_identifiers(idx):
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_metadata_immutable(idx):
levels, codes = idx.levels, idx.codes
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile("does not support mutable operations")
with pytest.raises(TypeError, match=mutable_regex):
levels[0] = levels[0]
with pytest.raises(TypeError, match=mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with pytest.raises(TypeError, match=mutable_regex):
codes[0] = codes[0]
with pytest.raises(ValueError, match="assignment destination is read-only"):
codes[0][0] = codes[0][0]
# and for names
names = idx.names
with pytest.raises(TypeError, match=mutable_regex):
names[0] = names[0]
def test_level_setting_resets_attributes():
ind = pd.MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]])
assert ind.is_monotonic
with tm.assert_produces_warning(FutureWarning):
ind.set_levels([["A", "B"], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_rangeindex_fallback_coercion_bug():
# GH 12893
foo = pd.DataFrame(np.arange(100).reshape((10, 10)))
bar = pd.DataFrame(np.arange(100).reshape((10, 10)))
df = pd.concat({"foo": foo.stack(), "bar": bar.stack()}, axis=1)
df.index.names = ["fizz", "buzz"]
str(df)
expected = pd.DataFrame(
{"bar": np.arange(100), "foo": np.arange(100)},
index=pd.MultiIndex.from_product(
[range(10), range(10)], names=["fizz", "buzz"]
),
)
tm.assert_frame_equal(df, expected, check_like=True)
result = df.index.get_level_values("fizz")
expected = pd.Int64Index(np.arange(10), name="fizz").repeat(10)
tm.assert_index_equal(result, expected)
result = df.index.get_level_values("buzz")
expected = pd.Int64Index(np.tile(np.arange(10), 10), name="buzz")
tm.assert_index_equal(result, expected)
def test_memory_usage(idx):
result = idx.memory_usage()
if len(idx):
idx.get_loc(idx[0])
result2 = idx.memory_usage()
result3 = idx.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(idx, (RangeIndex, IntervalIndex)):
assert result2 > result
if idx.inferred_type == "object":
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_nlevels(idx):
assert idx.nlevels == 2
| bsd-3-clause |
dimitri-yatsenko/pipeline | python/pipeline/stack.py | 2 | 135783 | """ Schemas for structural stacks. """
import datajoint as dj
from datajoint.jobs import key_hash
import matplotlib.pyplot as plt
import numpy as np
import scanreader
from scipy import signal
from scipy import ndimage
from scipy import optimize
import itertools
from . import experiment, notify, shared, reso, meso
anatomy = dj.create_virtual_module('pipeline_anatomy','pipeline_anatomy')
from .utils import galvo_corrections, stitching, performance, enhancement
from .utils.signal import mirrconv, float2uint8
from .exceptions import PipelineException
""" Note on our coordinate system:
Our stack/motor coordinate system is consistent with numpy's: z in the first axis pointing
downwards, y in the second axis pointing towards you and x on the third axis pointing to
the right.
"""
dj.config['external-stack'] = {'protocol': 'file',
'location': '/mnt/dj-stor01/pipeline-externals'}
dj.config['cache'] = '/tmp/dj-cache'
schema = dj.schema('pipeline_stack', locals(), create_tables=False)
@schema
class StackInfo(dj.Imported):
definition = """ # master table with general data about the stacks
-> experiment.Stack
---
nrois : tinyint # number of ROIs
nchannels : tinyint # number of channels
fill_fraction : float # raster scan temporal fill fraction (see scanimage)
"""
class ROI(dj.Part):
definition = """ # 3-D volumes that compose this stack (usually tiled to form a bigger fov)
-> StackInfo
roi_id : tinyint # same as ScanImage's
---
-> experiment.Stack.Filename
field_ids : blob # list of field_ids (0-index) sorted from shallower to deeper
roi_z : float # (um) center of ROI in the motor coordinate system (cortex is at 0)
roi_y : float # (um) center of ROI in the motor coordinate system
roi_x : float # (um) center of ROI in the motor coordinate system
roi_px_depth : smallint # number of slices
roi_px_height : smallint # lines per frame
roi_px_width : smallint # pixels per line
roi_um_depth : float # depth in microns
roi_um_height : float # height in microns
roi_um_width : float # width in microns
nframes : smallint # number of recorded frames per plane
fps : float # (Hz) volumes per second
bidirectional : boolean # true = bidirectional scanning
is_slow : boolean # whether all frames in one depth were recorded before moving to the next
"""
def _make_tuples(self, key, stack, id_in_file):
# Create results tuple
tuple_ = key.copy()
# Get field_ids ordered from shallower to deeper field in this ROI
surf_z = (experiment.Stack() & key).fetch1('surf_depth') # surface depth in fastZ coordinates (meso) or motor coordinates (reso)
if stack.is_multiROI:
field_ids = [i for i, field_roi in enumerate(stack.field_rois) if
id_in_file in field_roi]
field_depths = [stack.field_depths[i] - surf_z for i in field_ids]
else:
field_ids = range(stack.num_scanning_depths)
motor_zero = surf_z - stack.motor_position_at_zero[2]
if stack.is_slow_stack and not stack.is_slow_stack_with_fastZ: # using motor
initial_fastZ = stack.initial_secondary_z or 0
field_depths = [motor_zero - stack.field_depths[i] + 2 * initial_fastZ
for i in field_ids]
else: # using fastZ
field_depths = [motor_zero + stack.field_depths[i] for i in field_ids]
field_depths, field_ids = zip(*sorted(zip(field_depths, field_ids)))
tuple_['field_ids'] = field_ids
# Get reso/meso specific coordinates
x_zero, y_zero, _ = stack.motor_position_at_zero # motor x, y at ScanImage's 0
if stack.is_multiROI:
tuple_['roi_y'] = y_zero + stack._degrees_to_microns(stack.fields[
field_ids[0]].y)
tuple_['roi_x'] = x_zero + stack._degrees_to_microns(stack.fields[
field_ids[0]].x)
tuple_['roi_px_height'] = stack.field_heights[field_ids[0]]
tuple_['roi_px_width'] = stack.field_widths[field_ids[0]]
tuple_['roi_um_height'] = stack.field_heights_in_microns[field_ids[0]]
tuple_['roi_um_width'] = stack.field_widths_in_microns[field_ids[0]]
else:
tuple_['roi_y'] = y_zero
tuple_['roi_x'] = x_zero
tuple_['roi_px_height'] = stack.image_height
tuple_['roi_px_width'] = stack.image_width
# Estimate height and width in microns using measured FOVs for similar setups
fov_rel = (experiment.FOV() * experiment.Session() * experiment.Stack() &
key & 'session_date>=fov_ts')
zooms = fov_rel.fetch('mag').astype(np.float32) # zooms measured in same setup
closest_zoom = zooms[np.argmin(np.abs(np.log(zooms / stack.zoom)))]
dims = (fov_rel & 'ABS(mag - {}) < 1e-4'.format(closest_zoom)).fetch1(
'height', 'width')
um_height, um_width = [float(um) * (closest_zoom / stack.zoom) for um in
dims]
tuple_['roi_um_height'] = um_height * stack._y_angle_scale_factor
tuple_['roi_um_width'] = um_width * stack._x_angle_scale_factor
# Get common parameters
z_step = field_depths[1] - field_depths[0]
tuple_['roi_z'] = field_depths[0] + (field_depths[-1] - field_depths[0]) / 2
tuple_['roi_px_depth'] = len(field_ids)
tuple_['roi_um_depth'] = field_depths[-1] - field_depths[0] + z_step
tuple_['nframes'] = stack.num_frames
tuple_['fps'] = stack.fps
tuple_['bidirectional'] = stack.is_bidirectional
tuple_['is_slow'] = stack.is_slow_stack
self.insert1(tuple_)
@property
def microns_per_pixel(self):
""" Returns an array with microns per pixel in depth, height and width. """
um_dims = self.fetch1('roi_um_depth', 'roi_um_height', 'roi_um_width')
px_dims = self.fetch1('roi_px_depth', 'roi_px_height', 'roi_px_width')
return np.array([um_dim / px_dim for um_dim, px_dim in zip(um_dims, px_dims)])
def _make_tuples(self, key):
""" Read and store stack information."""
print('Reading header...')
# Read files forming this stack
filename_keys = (experiment.Stack.Filename() & key).fetch(dj.key)
stacks = []
for filename_key in filename_keys:
stack_filename = (experiment.Stack.Filename() &
filename_key).local_filenames_as_wildcard
stacks.append(scanreader.read_scan(stack_filename))
num_rois_per_file = [(s.num_rois if s.is_multiROI else 1) for s in stacks]
# Create Stack tuple
tuple_ = key.copy()
tuple_['nrois'] = np.sum(num_rois_per_file)
tuple_['nchannels'] = stacks[0].num_channels
tuple_['fill_fraction'] = stacks[0].temporal_fill_fraction
# Insert Stack
self.insert1(tuple_)
# Insert ROIs
roi_id = 1
for filename_key, num_rois, stack in zip(filename_keys, num_rois_per_file,
stacks):
for roi_id_in_file in range(num_rois):
roi_key = {**key, **filename_key, 'roi_id': roi_id}
StackInfo.ROI()._make_tuples(roi_key, stack, roi_id_in_file)
roi_id += 1
# Fill in CorrectionChannel if only one channel
if stacks[0].num_channels == 1:
CorrectionChannel().fill(key)
@schema
class Quality(dj.Computed):
definition = """ # different quality metrics for a scan (before corrections)
-> StackInfo
"""
class MeanIntensity(dj.Part):
definition = """ # mean intensity per frame and slice
-> Quality
-> StackInfo.ROI
-> shared.Channel
---
intensities : longblob # num_slices x num_frames
"""
class SummaryFrames(dj.Part):
definition = """ # mean slice at 8 different depths
-> Quality
-> StackInfo.ROI
-> shared.Channel
---
summary : longblob # h x w x 8
"""
class Contrast(dj.Part):
definition = """ # difference between 99 and 1 percentile per frame and slice
-> Quality
-> StackInfo.ROI
-> shared.Channel
---
contrasts : longblob # num_slices x num_frames
"""
def _make_tuples(self, key):
print('Computing quality metrics for stack', key)
# Insert in Quality
self.insert1(key)
for roi_tuple in (StackInfo.ROI() & key).fetch():
# Load ROI
roi_filename = (experiment.Stack.Filename() &
roi_tuple).local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
for channel in range((StackInfo() & key).fetch1('nchannels')):
# Map: Compute quality metrics in each field
f = performance.parallel_quality_stack # function to map
field_ids = roi_tuple['field_ids']
results = performance.map_fields(f, roi, field_ids=field_ids,
channel=channel)
# Reduce: Collect results
mean_intensities = np.empty((roi_tuple['roi_px_depth'],
roi_tuple['nframes']))
contrasts = np.empty((roi_tuple['roi_px_depth'], roi_tuple['nframes']))
for field_idx, field_mis, field_contrasts, _ in results:
mean_intensities[field_idx] = field_mis
contrasts[field_idx] = field_contrasts
frames = [res[3] for res in sorted(results, key=lambda res: res[0])]
frames = np.stack(frames[:: int(len(frames) / 8)], axis=-1) # frames at 8 diff depths
# Insert
roi_key = {**key, 'roi_id': roi_tuple['roi_id'], 'channel': channel + 1}
self.MeanIntensity().insert1({**roi_key, 'intensities': mean_intensities})
self.Contrast().insert1({**roi_key, 'contrasts': contrasts})
self.SummaryFrames().insert1({**roi_key, 'summary': frames})
self.notify(roi_key, frames, mean_intensities, contrasts)
@notify.ignore_exceptions
def notify(self, key, summary_frames, mean_intensities, contrasts):
# Send summary frames
import imageio
video_filename = '/tmp/' + key_hash(key) + '.gif'
percentile_99th = np.percentile(summary_frames, 99.5)
summary_frames = np.clip(summary_frames, None, percentile_99th)
summary_frames = float2uint8(summary_frames).transpose([2, 0, 1])
imageio.mimsave(video_filename, summary_frames, duration=0.4)
msg = ('summary frames for {animal_id}-{session}-{stack_idx} channel '
'{channel}').format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=video_filename, file_title=msg)
# Send intensity and contrasts
figsize = (min(4, contrasts.shape[1] / 10 + 1), contrasts.shape[0] / 30 + 1) # set heuristically
fig, axes = plt.subplots(1, 2, figsize=figsize, sharex=True, sharey=True)
fig.tight_layout()
axes[0].set_title('Mean intensity', size='small')
axes[0].imshow(mean_intensities)
axes[0].set_ylabel('Slices')
axes[0].set_xlabel('Frames')
axes[1].set_title('Contrast (99 - 1 percentile)', size='small')
axes[1].imshow(contrasts)
axes[1].set_xlabel('Frames')
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = ('quality images for {animal_id}-{session}-{stack_idx} channel '
'{channel}').format(**key)
slack_user.notify(file=img_filename, file_title=msg)
@schema
class CorrectionChannel(dj.Manual):
definition = """ # channel to use for raster and motion correction
-> experiment.Stack
---
-> shared.Channel
"""
def fill(self, key, channel=1):
for stack_key in (StackInfo() & key).fetch(dj.key):
self.insert1({**stack_key, 'channel': channel}, ignore_extra_fields=True,
skip_duplicates=True)
@schema
class RasterCorrection(dj.Computed):
definition = """ # raster correction for bidirectional resonant scans
-> StackInfo.ROI # animal_id, session, stack_idx, roi_id, version
-> CorrectionChannel # animal_id, session, stack_idx
---
raster_phase : float # difference between expected and recorded scan angle
raster_std : float # standard deviation among raster phases in different slices
"""
def _make_tuples(self, key):
""" Compute raster phase discarding top and bottom 15% of slices and tapering
edges to avoid edge artifacts."""
print('Computing raster correction for ROI', key)
# Get some params
res = (StackInfo.ROI() & key).fetch1('bidirectional', 'roi_px_height',
'roi_px_width', 'field_ids')
is_bidirectional, image_height, image_width, field_ids = res
correction_channel = (CorrectionChannel() & key).fetch1('channel') - 1
if is_bidirectional:
# Read the ROI
filename_rel = (experiment.Stack.Filename() & (StackInfo.ROI() & key))
roi_filename = filename_rel.local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Compute some parameters
skip_fields = max(1, int(round(len(field_ids) * 0.10)))
taper = np.sqrt(np.outer(signal.tukey(image_height, 0.4),
signal.tukey(image_width, 0.4)))
# Compute raster phase for each slice and take the median
raster_phases = []
for field_id in field_ids[skip_fields: -2 * skip_fields]:
# Create template (average frame tapered to avoid edge artifacts)
slice_ = roi[field_id, :, :, correction_channel, :].astype(np.float32,
copy=False)
anscombed = 2 * np.sqrt(slice_ - slice_.min(axis=(0, 1)) + 3 / 8) # anscombe transform
template = np.mean(anscombed, axis=-1) * taper
# Compute raster correction
raster_phases.append(galvo_corrections.compute_raster_phase(template,
roi.temporal_fill_fraction))
raster_phase = np.median(raster_phases)
raster_std = np.std(raster_phases)
else:
raster_phase = 0
raster_std = 0
# Insert
self.insert1({**key, 'raster_phase': raster_phase, 'raster_std': raster_std})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
msg = ('raster phase for {animal_id}-{session}-{stack_idx} roi {roi_id}: '
'{phase}').format(**key, phase=(self & key).fetch1('raster_phase'))
(notify.SlackUser() & (experiment.Session() & key)).notify(msg)
def correct(self, roi):
""" Correct roi with parameters extracted from self. In place.
:param np.array roi: ROI (fields, image_height, image_width, frames).
"""
raster_phase = self.fetch1('raster_phase')
fill_fraction = (StackInfo() & self).fetch1('fill_fraction')
if abs(raster_phase) < 1e-7:
corrected = roi.astype(np.float32, copy=False)
else:
corrected = roi # in_place
for i, field in enumerate(roi):
corrected[i] = galvo_corrections.correct_raster(field, raster_phase,
fill_fraction)
return corrected
@schema
class MotionCorrection(dj.Computed):
definition = """ # motion correction for each slice in the stack
-> RasterCorrection
---
y_shifts : longblob # y motion correction shifts (num_slices x num_frames)
x_shifts : longblob # x motion correction shifts (num_slices x num_frames)
"""
def _make_tuples(self, key):
""" Compute motion shifts to align frames over time and over slices."""
print('Computing motion correction for ROI', key)
# Get some params
res = (StackInfo.ROI() & key).fetch1('nframes', 'roi_px_height', 'roi_px_width',
'field_ids')
num_frames, image_height, image_width, field_ids = res
correction_channel = (CorrectionChannel() & key).fetch1('channel') - 1
y_shifts = np.zeros([len(field_ids), num_frames])
x_shifts = np.zeros([len(field_ids), num_frames])
if num_frames > 1:
# Read the ROI
filename_rel = (experiment.Stack.Filename() & (StackInfo.ROI() & key))
roi_filename = filename_rel.local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Compute some params
skip_rows = int(round(image_height * 0.10))
skip_cols = int(round(image_width * 0.10))
# Map: Compute shifts in parallel
f = performance.parallel_motion_stack # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (StackInfo() & key).fetch1('fill_fraction')
max_y_shift, max_x_shift = 20 / (StackInfo.ROI() & key).microns_per_pixel[1:]
results = performance.map_fields(f, roi, field_ids=field_ids,
channel=correction_channel,
kwargs={'raster_phase': raster_phase,
'fill_fraction': fill_fraction,
'skip_rows': skip_rows,
'skip_cols': skip_cols,
'max_y_shift': max_y_shift,
'max_x_shift': max_x_shift})
# Reduce: Collect results
for field_idx, y_shift, x_shift in results:
y_shifts[field_idx] = y_shift
x_shifts[field_idx] = x_shift
# Insert
self.insert1({**key, 'y_shifts': y_shifts, 'x_shifts': x_shifts})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
y_shifts, x_shifts = (MotionCorrection() & key).fetch1('y_shifts', 'x_shifts')
fps, is_slow_stack = (StackInfo.ROI() & key).fetch1('fps', 'is_slow')
num_slices, num_frames = y_shifts.shape
fps = fps * (num_slices if is_slow_stack else 1)
seconds = np.arange(num_frames) / fps
fig, axes = plt.subplots(2, 1, figsize=(13, 10), sharex=True, sharey=True)
axes[0].set_title('Shifts in y for all slices')
axes[0].set_ylabel('Pixels')
axes[0].plot(seconds, y_shifts.T)
axes[1].set_title('Shifts in x for all slices')
axes[1].set_ylabel('Pixels')
axes[1].set_xlabel('Seconds')
axes[1].plot(seconds, x_shifts.T)
fig.tight_layout()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename)
plt.close(fig)
msg = 'motion shifts for {animal_id}-{session}-{stack_idx} roi {roi_id}'.format(
**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def save_as_tiff(self, filename='roi.tif', channel=1):
""" Correct roi and save as a tiff file.
:param int channel: What channel to use. Starts at 1
"""
from tifffile import imsave
# Get some params
res = (StackInfo.ROI() & self).fetch1('field_ids', 'roi_px_depth',
'roi_px_height', 'roi_px_width')
field_ids, px_depth, px_height, px_width = res
# Load ROI
roi_filename = (experiment.Stack.Filename() & self).local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Map: Apply corrections to each field in parallel
f = performance.parallel_correct_stack # function to map
raster_phase = (RasterCorrection() & self).fetch1('raster_phase')
fill_fraction = (StackInfo() & self).fetch1('fill_fraction')
y_shifts, x_shifts = self.fetch1('y_shifts', 'x_shifts')
results = performance.map_fields(f, roi, field_ids=field_ids, channel=channel,
kwargs={'raster_phase': raster_phase,
'fill_fraction': fill_fraction,
'y_shifts': y_shifts,
'x_shifts': x_shifts})
# Reduce: Collect results
corrected_roi = np.empty((px_depth, px_height, px_width), dtype=np.float32)
for field_idx, corrected_field in results:
corrected_roi[field_idx] = corrected_field
print('Saving file at:', filename)
imsave(filename, corrected_roi)
@schema
class Stitching(dj.Computed):
definition = """ # stitches together overlapping rois
-> StackInfo
"""
@property
def key_source(self):
return StackInfo() - (StackInfo.ROI() - MotionCorrection()) # run iff all ROIs have been processed
class Volume(dj.Part):
definition = """ # union of ROIs from a stack (usually one volume per stack)
-> Stitching
volume_id : tinyint # id of this volume
"""
class ROICoordinates(dj.Part):
definition = """ # coordinates for each ROI in the stitched volume
-> Stitching # animal_id, session, stack_idx, version
-> MotionCorrection # animal_id, session, stack_idx, version, roi_id
---
-> Stitching.Volume # volume to which this ROI belongs
stitch_ys : blob # (px) center of each slice in a volume-wise coordinate system
stitch_xs : blob # (px) center of each slice in a volume-wise coordinate system
"""
def _make_tuples(self, key):
""" Stitch overlapping ROIs together and correct slice-to-slice alignment.
Iteratively stitches two overlapping ROIs if the overlapping dimension has the
same length (up to some relative tolerance). Stitching params are calculated per
slice.
Edge case: when two overlapping ROIs have different px/micron resolution
They won't be joined even if true height are the same (as pixel heights will
not match) or pixel heights could happen to match even if true heights are
different and thus they'll be erroneously stitched.
"""
print('Stitching ROIs for stack', key)
# Get some params
correction_channel = (CorrectionChannel() & key).fetch1('channel') - 1
# Read and correct ROIs forming this stack
print('Correcting ROIs...')
rois = []
for roi_tuple in (StackInfo.ROI() & key).fetch():
# Load ROI
roi_filename = (experiment.Stack.Filename() &
roi_tuple).local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Map: Apply corrections to each field in parallel
f = performance.parallel_correct_stack # function to map
raster_phase = (RasterCorrection() & roi_tuple).fetch1('raster_phase')
fill_fraction = (StackInfo() & roi_tuple).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & roi_tuple).fetch1('y_shifts',
'x_shifts')
field_ids = roi_tuple['field_ids']
results = performance.map_fields(f, roi, field_ids=field_ids,
channel=correction_channel,
kwargs={'raster_phase': raster_phase,
'fill_fraction': fill_fraction,
'y_shifts': y_shifts,
'x_shifts': x_shifts,
'apply_anscombe': True})
# Reduce: Collect results
corrected_roi = np.empty((roi_tuple['roi_px_depth'],
roi_tuple['roi_px_height'],
roi_tuple['roi_px_width']), dtype=np.float32)
for field_idx, corrected_field in results:
corrected_roi[field_idx] = corrected_field
# Create ROI object
um_per_px = (StackInfo.ROI() & (StackInfo.ROI().proj() &
roi_tuple)).microns_per_pixel
px_z, px_y, px_x = np.array([roi_tuple['roi_{}'.format(dim)] for dim in
['z', 'y', 'x']]) / um_per_px
rois.append(stitching.StitchedROI(corrected_roi, x=px_x, y=px_y, z=px_z,
id_=roi_tuple['roi_id']))
def enhance(image, sigmas):
""" Enhance 2p image. See enhancement.py for details."""
return enhancement.sharpen_2pimage(enhancement.lcn(image, sigmas))
def join_rows(rois_):
""" Iteratively join all rois that overlap in the same row."""
sorted_rois = sorted(rois_, key=lambda roi: (roi.x, roi.y))
prev_num_rois = float('inf')
while len(sorted_rois) < prev_num_rois:
prev_num_rois = len(sorted_rois)
for left, right in itertools.combinations(sorted_rois, 2):
if left.is_aside_to(right):
roi_key = {**key, 'roi_id': left.roi_coordinates[0].id}
um_per_px = (StackInfo.ROI() & roi_key).microns_per_pixel
# Compute stitching shifts
neighborhood_size = 25 / um_per_px[1:]
left_ys, left_xs = [], []
for l, r in zip(left.slices, right.slices):
left_slice = enhance(l.slice, neighborhood_size)
right_slice = enhance(r.slice, neighborhood_size)
delta_y, delta_x = stitching.linear_stitch(left_slice,
right_slice,
r.x - l.x)
left_ys.append(r.y - delta_y)
left_xs.append(r.x - delta_x)
# Fix outliers
max_y_shift, max_x_shift = 10 / um_per_px[1:]
left_ys, left_xs, _ = galvo_corrections.fix_outliers(
np.array(left_ys), np.array(left_xs), max_y_shift,
max_x_shift, method='linear')
# Stitch together
right.join_with(left, left_xs, left_ys)
sorted_rois.remove(left)
break # restart joining
return sorted_rois
# Stitch overlapping rois recursively
print('Computing stitching parameters...')
prev_num_rois = float('Inf') # to enter the loop at least once
while len(rois) < prev_num_rois:
prev_num_rois = len(rois)
# Join rows
rois = join_rows(rois)
# Join columns
[roi.rot90() for roi in rois]
rois = join_rows(rois)
[roi.rot270() for roi in rois]
# Compute slice-to slice alignment
print('Computing slice-to-slice alignment...')
for roi in rois:
big_volume = roi.volume
num_slices, image_height, image_width = big_volume.shape
roi_key = {**key, 'roi_id': roi.roi_coordinates[0].id}
um_per_px = (StackInfo.ROI() & roi_key).microns_per_pixel
# Enhance
neighborhood_size = 25 / um_per_px[1:]
for i in range(num_slices):
big_volume[i] = enhance(big_volume[i], neighborhood_size)
# Drop 10% of the image borders
skip_rows = int(round(image_height * 0.1))
skip_columns = int(round(image_width * 0.1))
big_volume = big_volume[:, skip_rows:-skip_rows, skip_columns: -skip_columns]
y_aligns = np.zeros(num_slices)
x_aligns = np.zeros(num_slices)
for i in range(1, num_slices):
# Align current slice to previous one
y_aligns[i], x_aligns[i] = galvo_corrections.compute_motion_shifts(
big_volume[i], big_volume[i - 1], in_place=False)
# Fix outliers
max_y_shift, max_x_shift = 15 / um_per_px[1:]
y_fixed, x_fixed, _ = galvo_corrections.fix_outliers(y_aligns, x_aligns,
max_y_shift, max_x_shift)
# Accumulate shifts so shift i is shift in i -1 plus shift to align i to i-1
y_cumsum, x_cumsum = np.cumsum(y_fixed), np.cumsum(x_fixed)
# Detrend to discard influence of vessels going through the slices
filter_size = int(round(60 / um_per_px[0])) # 60 microns in z
filter_size += 1 if filter_size % 2 == 0 else 0
if len(y_cumsum) > filter_size:
smoothing_filter = signal.hann(filter_size)
smoothing_filter /= sum(smoothing_filter)
y_detrend = y_cumsum - mirrconv(y_cumsum, smoothing_filter)
x_detrend = x_cumsum - mirrconv(x_cumsum, smoothing_filter)
else:
y_detrend = y_cumsum - y_cumsum.mean()
x_detrend = x_cumsum - x_cumsum.mean()
# Apply alignment shifts in roi
for slice_, y_align, x_align in zip(roi.slices, y_detrend, x_detrend):
slice_.y -= y_align
slice_.x -= x_align
for roi_coord in roi.roi_coordinates:
roi_coord.ys = [prev_y - y_align for prev_y, y_align in zip(roi_coord.ys,
y_detrend)]
roi_coord.xs = [prev_x - x_align for prev_x, x_align in zip(roi_coord.xs,
x_detrend)]
# Insert in Stitching
print('Inserting...')
self.insert1(key)
# Insert each stitched volume
for volume_id, roi in enumerate(rois, start=1):
self.Volume().insert1({**key, 'volume_id': volume_id})
# Insert coordinates of each ROI forming this volume
for roi_coord in roi.roi_coordinates:
tuple_ = {**key, 'roi_id': roi_coord.id, 'volume_id': volume_id,
'stitch_xs': roi_coord.xs, 'stitch_ys': roi_coord.ys}
self.ROICoordinates().insert1(tuple_)
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
slack_user = (notify.SlackUser() & (experiment.Session() & key))
for volume_key in (self.Volume() & key).fetch('KEY'):
for roi_coord in (self.ROICoordinates() & volume_key).fetch(as_dict=True):
center_z, num_slices, um_depth = (StackInfo.ROI() & roi_coord).fetch1(
'roi_z', 'roi_px_depth', 'roi_um_depth')
first_z = center_z - um_depth / 2 + (um_depth / num_slices) / 2
depths = first_z + (um_depth / num_slices) * np.arange(num_slices)
fig, axes = plt.subplots(2, 1, figsize=(15, 8), sharex=True)
axes[0].set_title('Center position (x)')
axes[0].plot(depths, roi_coord['stitch_xs'])
axes[1].set_title('Center position (y)')
axes[1].plot(depths, roi_coord['stitch_ys'])
axes[0].set_ylabel('Pixels')
axes[0].set_xlabel('Depths')
fig.tight_layout()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = ('stitch traces for {animal_id}-{session}-{stack_idx} volume '
'{volume_id} roi {roi_id}').format(**roi_coord)
slack_user.notify(file=img_filename, file_title=msg)
@schema
class CorrectedStack(dj.Computed):
definition = """ # all slices of each stack after corrections.
-> Stitching.Volume # animal_id, session, stack_idx, volume_id
---
z : float # (um) center of volume in the motor coordinate system (cortex is at 0)
y : float # (um) center of volume in the motor coordinate system
x : float # (um) center of volume in the motor coordinate system
px_depth : smallint # number of slices
px_height : smallint # lines per frame
px_width : smallint # pixels per line
um_depth : float # depth in microns
um_height : float # height in microns
um_width : float # width in microns
surf_z : float # (um) depth of first slice - half a z step (cortex is at z=0)
"""
class Slice(dj.Part):
definition = """ # single slice of one stack
-> CorrectedStack
-> shared.Channel
islice : smallint # index of slice in volume
---
slice : longblob # image (height x width)
"""
def _make_tuples(self, key):
print('Correcting stack', key)
for channel in range((StackInfo() & key).fetch1('nchannels')):
# Correct ROIs
rois = []
for roi_tuple in (StackInfo.ROI() * Stitching.ROICoordinates() & key).fetch():
# Load ROI
roi_filename = (experiment.Stack.Filename() &
roi_tuple).local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Map: Apply corrections to each field in parallel
f = performance.parallel_correct_stack # function to map
raster_phase = (RasterCorrection() & roi_tuple).fetch1('raster_phase')
fill_fraction = (StackInfo() & key).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & roi_tuple).fetch1('y_shifts',
'x_shifts')
field_ids = roi_tuple['field_ids']
results = performance.map_fields(f, roi, field_ids=field_ids,
channel=channel,
kwargs={'raster_phase': raster_phase,
'fill_fraction': fill_fraction,
'y_shifts': y_shifts,
'x_shifts': x_shifts})
# Reduce: Collect results
corrected_roi = np.empty((roi_tuple['roi_px_depth'],
roi_tuple['roi_px_height'],
roi_tuple['roi_px_width']), dtype=np.float32)
for field_idx, corrected_field in results:
corrected_roi[field_idx] = corrected_field
# Create ROI object (with pixel x, y, z coordinates)
px_z = roi_tuple['roi_z'] * (roi_tuple['roi_px_depth'] /
roi_tuple['roi_um_depth'])
ys = list(roi_tuple['stitch_ys'])
xs = list(roi_tuple['stitch_xs'])
rois.append(stitching.StitchedROI(corrected_roi, x=xs, y=ys, z=px_z,
id_=roi_tuple['roi_id']))
def join_rows(rois_):
""" Iteratively join all rois that overlap in the same row."""
sorted_rois = sorted(rois_, key=lambda roi: (roi.x, roi.y))
prev_num_rois = float('inf')
while len(sorted_rois) < prev_num_rois:
prev_num_rois = len(sorted_rois)
for left, right in itertools.combinations(sorted_rois, 2):
if left.is_aside_to(right):
left_xs = [s.x for s in left.slices]
left_ys = [s.y for s in left.slices]
right.join_with(left, left_xs, left_ys)
sorted_rois.remove(left)
break # restart joining
return sorted_rois
# Stitch all rois together. This is convoluted because smooth blending in
# join_with assumes rois are next to (not below or atop of) each other
prev_num_rois = float('Inf') # to enter the loop at least once
while len(rois) < prev_num_rois:
prev_num_rois = len(rois)
# Join rows
rois = join_rows(rois)
# Join columns
[roi.rot90() for roi in rois]
rois = join_rows(rois)
[roi.rot270() for roi in rois]
# Check stitching went alright
if len(rois) > 1:
msg = 'ROIs for volume {} could not be stitched properly'.format(key)
raise PipelineException(msg)
stitched = rois[0]
# Insert in CorrectedStack
roi_info = StackInfo.ROI() & key & {'roi_id': stitched.roi_coordinates[0].id}
um_per_px = roi_info.microns_per_pixel
tuple_ = key.copy()
tuple_['z'] = stitched.z * um_per_px[0]
tuple_['y'] = stitched.y * um_per_px[1]
tuple_['x'] = stitched.x * um_per_px[2]
tuple_['px_depth'] = stitched.depth
tuple_['px_height'] = stitched.height
tuple_['px_width'] = stitched.width
tuple_['um_depth'] = roi_info.fetch1('roi_um_depth') # same as original rois
tuple_['um_height'] = stitched.height * um_per_px[1]
tuple_['um_width'] = stitched.width * um_per_px[2]
tuple_['surf_z'] = (stitched.z - stitched.depth / 2) * um_per_px[0]
self.insert1(tuple_, skip_duplicates=True)
# Insert each slice
for i, slice_ in enumerate(stitched.volume):
self.Slice().insert1({**key, 'channel': channel + 1, 'islice': i + 1,
'slice': slice_})
self.notify({**key, 'channel': channel + 1})
@notify.ignore_exceptions
def notify(self, key):
import imageio
volume = (self & key).get_stack(channel=key['channel'])
volume = volume[:: int(volume.shape[0] / 8)] # volume at 8 diff depths
video_filename = '/tmp/' + key_hash(key) + '.gif'
imageio.mimsave(video_filename, float2uint8(volume), duration=1)
msg = ('corrected stack for {animal_id}-{session}-{stack_idx} volume {volume_id} '
'channel {channel}').format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=video_filename, file_title=msg,
channel='#pipeline_quality')
@property
def microns_per_pixel(self):
""" Returns an array with microns per pixel in depth, height and width. """
um_dims = self.fetch1('um_depth', 'um_height', 'um_width')
px_dims = self.fetch1('px_depth', 'px_height', 'px_width')
return np.array([um_dim / px_dim for um_dim, px_dim in zip(um_dims, px_dims)])
def get_stack(self, channel=1):
""" Get full stack (num_slices, height, width).
:param int channel: What channel to use. Starts at 1
:returns The stack: a (num_slices, image_height, image_width) array.
:rtype: np.array (float32)
"""
slice_rel = (CorrectedStack.Slice() & self & {'channel': channel})
slices = slice_rel.fetch('slice', order_by='islice')
return np.stack(slices)
def save_as_tiff(self, filename='stack.tif'):
""" Save current stack as a tiff file."""
from tifffile import imsave
# Create a composite interleaving channels
height, width, depth = self.fetch1('px_height', 'px_width', 'px_depth')
num_channels = (StackInfo() & self).fetch1('nchannels')
composite = np.zeros([num_channels * depth, height, width], dtype=np.float32)
for i in range(num_channels):
composite[i::num_channels] = self.get_stack(i + 1)
# Save
print('Saving file at:', filename)
imsave(filename, composite)
def save_video(self, filename='stack.mp4', channel=1, fps=10, dpi=250):
""" Creates an animation video showing a fly-over of the stack (top to bottom).
:param string filename: Output filename (path + filename)
:param int channel: What channel to use. Starts at 1
:param int start_index: Where in the scan to start the video.
:param int fps: Number of slices shown per second.
:param int dpi: Dots per inch, controls the quality of the video.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from matplotlib import animation
stack = self.get_stack(channel=channel)
num_slices = stack.shape[0]
fig, axes = plt.subplots(1, 1, sharex=True, sharey=True)
im = fig.gca().imshow(stack[int(num_slices / 2)])
video = animation.FuncAnimation(fig, lambda i: im.set_data(stack[i]), num_slices,
interval=1000 / fps)
fig.tight_layout()
if not filename.endswith('.mp4'):
filename += '.mp4'
print('Saving video at:', filename)
print('If this takes too long, stop it and call again with dpi <', dpi,
'(default)')
video.save(filename, dpi=dpi)
return fig
@schema
class PreprocessedStack(dj.Computed):
definition = """ # Resize to 1 um^3, apply local contrast normalization and sharpen
-> CorrectedStack
-> shared.Channel
---
resized: external-stack # original stack resized to 1 um^3
lcned: external-stack # local contrast normalized stack. Filter size: (3, 25, 25)
sharpened: external-stack # sharpened stack. Filter size: 1
"""
@property
def key_source(self):
# restrict each stack to its channels
return (CorrectedStack * shared.Channel).proj() & CorrectedStack.Slice.proj()
def make(self, key):
from .utils import registration
from .utils import enhancement
# Load stack
stack = (CorrectedStack() & key).get_stack(key['channel'])
# Resize to be 1 um^3
um_sizes = (CorrectedStack & key).fetch1('um_depth', 'um_height', 'um_width')
resized = registration.resize(stack, um_sizes, desired_res=1)
# Enhance
lcned = enhancement.lcn(resized, (3, 25, 25))
# Sharpen
sharpened = enhancement.sharpen_2pimage(lcned, 1)
# Insert
self.insert1({**key, 'resized': resized, 'lcned': lcned, 'sharpened': sharpened})
@schema
class Surface(dj.Computed):
definition = """ # Calculated surface of the brain
-> PreprocessedStack
-> shared.SurfaceMethod
---
guessed_points : longblob # Array of guessed depths stored in (z,y,x) format
surface_im : longblob # Matrix of fitted depth for each pixel in stack. Value is number of pixels to surface from top of array.
lower_bound_im : longblob # Lower bound of 95th percentile confidence interval
upper_bound_im : longblob # Upper bound of 95th percentile confidence interval
"""
def make(self, key):
# WARNINGS
# - This code assumes the surface will be in the top half of the stack
# - Only the top half of z-values are analyzed
# - Points along the edge are dropped to avoid errors due to blank space left by stack registration
# - This code assumes the surface median intensity should be in the bottom 60% of the range of values over z
# - ex. Intensities ranges from 10-20. Surface points must have an intensity < .6*(20-10) + 10 = 17.5
# - This is within the 2r x 2r window being analyzed
# - This code drops any 2r x 2r field where the first median value is above the 30th-percentile of the whole stack.
# - Windows where the final median intensity is below 10 are removed
# - Attempts to replace this with a percentile all fail
# - This code drops guessed depths > 95th-percentile and < 5th-percentile to be more robust to outliers
valid_method_ids = [1] # Used to check if method is implemented
# SETTINGS
# Note: Intial parameters for fitting set further down
r = 50 # Radius of square in pixels
upper_threshold_percent = 0.6 # Surface median intensity should be in the bottom X% of the *range* of medians
gaussian_blur_size = 5 # Size of gaussian blur applied to slice
min_points_allowed = 10 # If there are less than X points after filtering, throw an error
bounds = ([0, 0, np.NINF, np.NINF, np.NINF], [np.Inf, np.Inf, np.Inf, np.Inf, np.Inf]) # Bounds for paraboloid fit
ss_percent = 0.40 # Percentage of points to subsample for robustness check
num_iterations = 1000 # Number of iterations to use for robustness check
# DEFINITIONS
def surface_eqn(data, a, b, c, d, f):
x, y = data
return a * x ** 2 + b * y ** 2 + c * x + d * y + f
# MAIN BODY
if int(key['surface_method_id']) not in valid_method_ids:
raise PipelineException(f'Error: surface_method_id {key["surface_method_id"]} is not implemented')
print('Calculating surface of brain for stack', key)
full_stack = (PreprocessedStack & key).fetch1('resized')
depth, height, width = full_stack.shape
surface_guess_map = []
r_xs = np.arange(r, width - width % r, r * 2)[1:-1]
r_ys = np.arange(r, height - height % r, r * 2)[1:-1]
full_mesh_x, full_mesh_y = np.meshgrid(np.arange(width), np.arange(height))
# Surface z should be below this value
z_lim = int(depth / 2)
# Mean intensity of the first frame in the slice should be less than this value
z_0_upper_threshold = np.percentile(full_stack, 30)
for x in r_xs:
for y in r_ys:
stack_slice_medians = np.percentile(full_stack[0:z_lim, y - r:y + r, x - r:x + r], 50, axis=(1, 2))
blurred_slice = ndimage.gaussian_filter1d(stack_slice_medians, gaussian_blur_size)
upper_threshold_value = upper_threshold_percent * (
(blurred_slice.max() - blurred_slice.min()) - blurred_slice.min())
upper_threshold_idx = np.where(blurred_slice > upper_threshold_value)[0][0]
stack_slice_derivative = ndimage.sobel(blurred_slice)
surface_z = np.argmax(stack_slice_derivative)
if ((surface_z < upper_threshold_idx) and (blurred_slice[0] < z_0_upper_threshold) and
(blurred_slice[-1] > 10)):
surface_guess_map.append((surface_z, y, x))
if len(surface_guess_map) < min_points_allowed:
raise PipelineException(f"Surface calculation could not find enough valid points for {key}. Only "
f"{len(surface_guess_map)} detected")
# Drop the z-values lower than 5th-percentile or greater than 95th-percentile
arr = np.array(surface_guess_map)
top = np.percentile(arr[:, 0], 95)
bot = np.percentile(arr[:, 0], 5)
surface_guess_map = arr[np.logical_and(arr[:, 0] > bot, arr[:, 0] < top)]
# Guess for initial parameters
initial = [1, 1, int(width / 2), int(height / 2), 1]
popt, pcov = optimize.curve_fit(surface_eqn, (surface_guess_map[:, 2], surface_guess_map[:, 1]),
surface_guess_map[:, 0], p0=initial, maxfev=10000, bounds=bounds)
calculated_surface_map = surface_eqn((full_mesh_x, full_mesh_y), *popt)
all_sub_fitted_z = np.zeros((num_iterations, height, width))
for i in np.arange(num_iterations):
indices = np.random.choice(surface_guess_map.shape[0], int(surface_guess_map.shape[0] * ss_percent),
replace=False)
subsample = surface_guess_map[indices]
sub_popt, sub_pcov = optimize.curve_fit(surface_eqn, (subsample[:, 2], subsample[:, 1]), subsample[:, 0],
p0=initial, maxfev=10000, bounds=bounds)
all_sub_fitted_z[i, :, :] = surface_eqn((full_mesh_x, full_mesh_y), *sub_popt)
z_min_matrix = np.percentile(all_sub_fitted_z, 5, axis=0)
z_max_matrix = np.percentile(all_sub_fitted_z, 95, axis=0)
surface_key = {**key, 'guessed_points': surface_guess_map, 'surface_im': calculated_surface_map,
'lower_bound_im': z_min_matrix, 'upper_bound_im': z_max_matrix}
self.insert1(surface_key)
def plot_surface3d(self, fig_height=7, fig_width=9):
""" Plot guessed surface points and fitted surface mesh in 3D
:param fig_height: Height of returned figure
:param fig_width: Width of returned figure
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
surface_guess_map, fitted_surface = self.fetch1('guessed_points', 'surface_im')
surface_height, surface_width = fitted_surface.shape
mesh_x, mesh_y = np.meshgrid(np.arange(surface_width), np.arange(surface_height))
fig = plt.figure(figsize=(fig_width, fig_height))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(mesh_x, mesh_y, fitted_surface, cmap=cm.coolwarm, linewidth=0, antialiased=False,
alpha=0.5)
ax.scatter(surface_guess_map[:, 2], surface_guess_map[:, 1], surface_guess_map[:, 0], color='grey')
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.invert_zaxis()
return fig
def plot_surface2d(self, r=50, z=None, fig_height=10, fig_width=20):
""" Plot grid of guessed points and fitted surface depths spaced 2r apart on top of stack slice at depth = z
:param r: Defines radius of square for each grid point
:param z: Pixel depth of stack to show behind depth grid
:param fig_height: Height of returned figure
:param fig_width: Width of returned figure
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from matplotlib import cm
full_stack = (PreprocessedStack & self).fetch1('resized')
stack_depth, stack_height, stack_width = full_stack.shape
surface_guess_map, fitted_surface = self.fetch1('guessed_points', 'surface_im')
fig, axes = plt.subplots(1, 2, figsize=(fig_width, fig_height))
r_xs = np.arange(r, stack_width - stack_width % r, r * 2)
r_ys = np.arange(r, stack_height - stack_height % r, r * 2)
r_mesh_x, r_mesh_y = np.meshgrid(r_xs, r_ys)
# Using median of depth to pick slice of stack to show if not defined
if z is None:
z = np.median(fitted_surface)
if z < 0 or z > stack_depth:
raise PipelineException(f'Error: Z parameter {z} is out of bounds for stack with depth {depth}')
vmin = np.min((np.min(fitted_surface), np.min(surface_guess_map[:, 0])))
vmax = np.max((np.max(fitted_surface), np.max(surface_guess_map[:, 0])))
guessed_scatter = axes[0].scatter(x=surface_guess_map[:, 2], y=surface_guess_map[:, 1],
c=surface_guess_map[:, 0], cmap=cm.hot, vmin=vmin, vmax=vmax)
fitted_scatter = axes[1].scatter(x=r_mesh_x, y=r_mesh_y, c=fitted_surface[r_mesh_y, r_mesh_x], cmap=cm.hot,
vmin=vmin, vmax=vmax)
for point in surface_guess_map:
axes[0].annotate(int(point[0]), (point[2], point[1]), color='white')
for x in r_xs:
for y in r_ys:
axes[1].annotate(int(fitted_surface[y, x]), (x, y), color='white')
fig.colorbar(guessed_scatter, ax=axes[0], fraction=0.05)
axes[0].set_title(f'Guessed Depth, Z = {int(z)}')
fig.colorbar(fitted_scatter, ax=axes[1], fraction=0.05)
axes[1].set_title(f'Fitted Depth, Z = {int(z)}')
for ax in axes:
ax.imshow(full_stack[int(z), :, :])
ax.set_axis_off()
return fig
@schema
class SegmentationTask(dj.Manual):
definition = """ # defines the target, the method and the channel to use for segmentation
-> CorrectedStack
-> shared.Channel
-> shared.StackSegmMethod
---
-> experiment.Compartment
"""
def fill(self, key, channel=1, stacksegm_method=2, compartment='soma'):
for stack_key in (CorrectedStack() & key).fetch(dj.key):
tuple_ = {**stack_key, 'channel': channel,
'stacksegm_method': stacksegm_method,
'compartment': compartment}
self.insert1(tuple_, ignore_extra_fields=True, skip_duplicates=True)
@schema
class Segmentation(dj.Computed):
definition = """ # 3-d stack segmentation
-> PreprocessedStack
-> SegmentationTask
---
segmentation : external-stack # voxel-wise cell-ids (0 for background)
nobjects : int # number of cells found
"""
class ConvNet(dj.Part):
definition = """ # attributes particular to convnet based methods
-> master
---
centroids : external-stack # voxel-wise probability of centroids
probs : external-stack # voxel-wise probability of cell nuclei
seg_threshold : float # threshold used for the probability maps
min_voxels : int # minimum number of voxels (in cubic microns)
max_voxels : int # maximum number of voxels (in cubic microns)
compactness_factor : float # compactness factor used for the watershed segmentation
"""
def _make_tuples(self, key):
from .utils import segmentation3d
# Set params
seg_threshold = 0.8
min_voxels = 65 # sphere of diameter 5
max_voxels = 4186 # sphere of diameter 20
compactness_factor = 0.05 # bigger produces rounder cells
pad_mode = 'reflect' # any valid mode in np.pad
# Get stack at 1 um**3 voxels
resized = (PreprocessedStack & key).fetch1('resized')
# Segment
if key['stacksegm_method'] not in [1, 2]:
raise PipelineException('Unrecognized stack segmentation method: {}'.format(
key['stacksegm_method']))
method = 'single' if key['stacksegm_method'] == 1 else 'ensemble'
centroids, probs, segmentation = segmentation3d.segment(resized, method, pad_mode,
seg_threshold, min_voxels,
max_voxels,
compactness_factor)
# Insert
self.insert1({**key, 'nobjects': segmentation.max(),
'segmentation': segmentation})
self.ConvNet().insert1({**key, 'centroids': centroids, 'probs': probs,
'seg_threshold': seg_threshold, 'min_voxels': min_voxels,
'max_voxels': max_voxels,
'compactness_factor': compactness_factor})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
import imageio
from bl3d import utils
volume = (self & key).fetch1('segmentation')
volume = volume[:: int(volume.shape[0] / 8)] # volume at 8 diff depths
colored = utils.colorize_label(volume)
video_filename = '/tmp/' + key_hash(key) + '.gif'
imageio.mimsave(video_filename, colored, duration=1)
msg = 'segmentation for {animal_id}-{session}-{stack_idx}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=video_filename, file_title=msg,
channel='#pipeline_quality')
@schema
class RegistrationTask(dj.Manual):
definition = """ # declare scan fields to register to a stack as well as channels and method used
-> CorrectedStack.proj(stack_session='session') # animal_id, stack_session, stack_idx, volume_id
-> shared.Channel.proj(stack_channel='channel')
-> experiment.Scan.proj(scan_session='session') # animal_id, scan_session, scan_idx
-> shared.Channel.proj(scan_channel='channel')
-> shared.Field
-> shared.RegistrationMethod
"""
def fill(self, stack_key, scan_key, stack_channel=1, scan_channel=1, method=5):
# Add stack attributes
stack_rel = CorrectedStack() & stack_key
if len(stack_rel) > 1:
raise PipelineException('More than one stack match stack_key {}'.format(
stack_key))
tuple_ = stack_rel.proj(stack_session='session').fetch1()
# Add common attributes
tuple_['stack_channel'] = stack_channel
tuple_['scan_channel'] = scan_channel
tuple_['registration_method'] = method
# Add scan attributes
fields_rel = reso.ScanInfo.Field.proj() + meso.ScanInfo.Field.proj() & scan_key
scan_animal_ids = np.unique(fields_rel.fetch('animal_id'))
if len(scan_animal_ids) > 1 or scan_animal_ids[0] != tuple_['animal_id']:
raise PipelineException('animal_id of stack and scan do not match.')
for field in fields_rel.fetch():
RegistrationTask().insert1({**tuple_, 'scan_session': field['session'],
'scan_idx': field['scan_idx'],
'field': field['field']}, skip_duplicates=True)
@schema
class Registration(dj.Computed):
""" Our affine matrix A is represented as the usual 4 x 4 matrix using homogeneous
coordinates, i.e., each point p is an [x, y, z, 1] vector.
Because each field is flat, the original z coordinate will be the same at each grid
position (zero) and thus it won't affect its final position, so our affine matrix has
only 9 parameters: a11, a21, a31, a12, a22, a32, a14, a24 and a34.
"""
definition = """ # align a 2-d scan field to a stack
-> PreprocessedStack.proj(stack_session='session', stack_channel='channel')
-> RegistrationTask
"""
@property
def key_source(self):
stacks = PreprocessedStack.proj(stack_session='session', stack_channel='channel')
return stacks * RegistrationTask & {'registration_method': 5}
class Rigid(dj.Part):
definition = """ # 3-d template matching keeping the stack straight
-> master
---
reg_x : float # (um) center of field in motor coordinate system
reg_y : float # (um) center of field in motor coordinate system
reg_z : float # (um) center of field in motor coordinate system
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class Affine(dj.Part):
definition = """ # affine matrix learned via gradient ascent
-> master
---
a11 : float # (um) element in row 1, column 1 of the affine matrix
a21 : float # (um) element in row 2, column 1 of the affine matrix
a31 : float # (um) element in row 3, column 1 of the affine matrix
a12 : float # (um) element in row 1, column 2 of the affine matrix
a22 : float # (um) element in row 2, column 2 of the affine matrix
a32 : float # (um) element in row 3, column 2 of the affine matrix
reg_x : float # (um) element in row 1, column 4 of the affine matrix
reg_y : float # (um) element in row 2, column 4 of the affine matrix
reg_z : float # (um) element in row 3, column 4 of the affine matrix
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class NonRigid(dj.Part):
definition = """ # affine plus deformation field learned via gradient descent
-> master
---
a11 : float # (um) element in row 1, column 1 of the affine matrix
a21 : float # (um) element in row 2, column 1 of the affine matrix
a31 : float # (um) element in row 3, column 1 of the affine matrix
a12 : float # (um) element in row 1, column 2 of the affine matrix
a22 : float # (um) element in row 2, column 2 of the affine matrix
a32 : float # (um) element in row 3, column 2 of the affine matrix
reg_x : float # (um) element in row 1, column 4 of the affine matrix
reg_y : float # (um) element in row 2, column 4 of the affine matrix
reg_z : float # (um) element in row 3, column 4 of the affine matrix
landmarks : longblob # (um) x, y position of each landmark (num_landmarks x 2) assuming center of field is at (0, 0)
deformations : longblob # (um) x, y, z deformations per landmark (num_landmarks x 3)
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class Params(dj.Part):
definition = """ # document some parameters used for the registration
-> master
---
rigid_zrange : int # microns above and below experimenter's estimate (in z) to search for rigid registration
lr_linear : float # learning rate for the linear part of the affine matrix
lr_translation : float # learning rate for the translation vector
affine_iters : int # number of iterations to learn the affine registration
random_seed : int # seed used to initialize landmark deformations
landmark_gap : int # number of microns between landmarks
rbf_radius : int # critical radius for the gaussian radial basis function
lr_deformations : float # learning rate for the deformation values
wd_deformations : float # regularization term to control size of the deformations
smoothness_factor : float # regularization term to control curvature of warping field
nonrigid_iters : int # number of iterations to optimize for the non-rigid parameters
"""
def make(self, key):
from .utils import registration
from .utils import enhancement
# Set params
rigid_zrange = 80 # microns to search above and below estimated z for rigid registration
lr_linear = 0.001 # learning rate / step size for the linear part of the affine matrix
lr_translation = 1 # learning rate / step size for the translation vector
affine_iters = 200 # number of optimization iterations to learn the affine parameters
random_seed = 1234 # seed for torch random number generator (used to initialize deformations)
landmark_gap = 100 # spacing for the landmarks
rbf_radius = 150 # critical radius for the gaussian rbf
lr_deformations = 0.1 # learning rate / step size for deformation values
wd_deformations = 1e-4 # weight decay for deformations; controls their size
smoothness_factor = 0.01 # factor to keep the deformation field smooth
nonrigid_iters = 200 # number of optimization iterations for the nonrigid parameters
# Get enhanced stack
stack_key = {'animal_id': key['animal_id'], 'session': key['stack_session'],
'stack_idx': key['stack_idx'], 'volume_id': key['volume_id'],
'channel': key['stack_channel']}
original_stack = (PreprocessedStack & stack_key).fetch1('resized')
stack = (PreprocessedStack & stack_key).fetch1('sharpened')
# Get field
field_key = {'animal_id': key['animal_id'], 'session': key['scan_session'],
'scan_idx': key['scan_idx'], 'field': key['field'],
'channel': key['scan_channel']}
pipe = (reso if reso.ScanInfo & field_key else meso if meso.ScanInfo & field_key
else None)
original_field = (pipe.SummaryImages.Average & field_key).fetch1(
'average_image').astype(np.float32)
# Enhance field
field_dims = ((reso.ScanInfo if pipe == reso else meso.ScanInfo.Field) &
field_key).fetch1('um_height', 'um_width')
original_field = registration.resize(original_field, field_dims, desired_res=1)
field = enhancement.sharpen_2pimage(enhancement.lcn(original_field, (15, 15)), 1)
# Drop some edges to avoid artifacts
field = field[15:-15, 15:-15]
stack = stack[5:-5, 15:-15, 15:-15]
# RIGID REGISTRATION
from skimage import feature
# Get initial estimate of field depth from experimenters
field_z = (pipe.ScanInfo.Field & field_key).fetch1('z')
stack_z = (CorrectedStack & stack_key).fetch1('z')
z_limits = stack_z - stack.shape[0] / 2, stack_z + stack.shape[0] / 2
if field_z < z_limits[0] or field_z > z_limits[1]:
print('Warning: Estimated depth ({}) outside stack range ({}-{}).'.format(
field_z, *z_limits))
# Run registration with no rotations
px_z = field_z - stack_z + stack.shape[0] / 2 - 0.5
mini_stack = stack[max(0, int(round(px_z - rigid_zrange))): int(round(
px_z + rigid_zrange))]
corrs = np.stack([feature.match_template(s, field, pad_input=True) for s in
mini_stack])
smooth_corrs = ndimage.gaussian_filter(corrs, 0.7)
# Get results
min_z = max(0, int(round(px_z - rigid_zrange)))
min_y = int(round(0.05 * stack.shape[1]))
min_x = int(round(0.05 * stack.shape[2]))
mini_corrs = smooth_corrs[:, min_y:-min_y, min_x:-min_x]
rig_z, rig_y, rig_x = np.unravel_index(np.argmax(mini_corrs), mini_corrs.shape)
# Rewrite coordinates with respect to original z
rig_z = (min_z + rig_z + 0.5) - stack.shape[0] / 2
rig_y = (min_y + rig_y + 0.5) - stack.shape[1] / 2
rig_x = (min_x + rig_x + 0.5) - stack.shape[2] / 2
del (field_z, stack_z, z_limits, px_z, mini_stack, corrs, smooth_corrs, min_z,
min_y, min_x, mini_corrs)
# AFFINE REGISTRATION
import torch
from torch import optim
# Create field grid (height x width x 2)
grid = registration.create_grid(field.shape)
# Create torch tensors
stack_ = torch.as_tensor(stack, dtype=torch.float32)
field_ = torch.as_tensor(field, dtype=torch.float32)
grid_ = torch.as_tensor(grid, dtype=torch.float32)
# Define parameters and optimizer
linear = torch.nn.Parameter(torch.eye(3)[:, :2]) # first two columns of rotation matrix
translation = torch.nn.Parameter(torch.tensor([rig_x, rig_y, rig_z])) # translation vector
affine_optimizer = optim.Adam([{'params': linear, 'lr': lr_linear},
{'params': translation, 'lr': lr_translation}])
# Optimize
for i in range(affine_iters):
# Zero gradients
affine_optimizer.zero_grad()
# Compute gradients
pred_grid = registration.affine_product(grid_, linear, translation) # w x h x 3
pred_field = registration.sample_grid(stack_, pred_grid)
corr_loss = -(pred_field * field_).sum() / (torch.norm(pred_field) *
torch.norm(field_))
print('Corr at iteration {}: {:5.4f}'.format(i, -corr_loss))
corr_loss.backward()
# Update
affine_optimizer.step()
# Save em (originals will be modified during non-rigid registration)
affine_linear = linear.detach().clone()
affine_translation = translation.detach().clone()
# NON-RIGID REGISTRATION
# Inspired by the the Demon's Algorithm (Thirion, 1998)
torch.manual_seed(random_seed) # we use random initialization below
# Create landmarks (and their corresponding deformations)
first_y = int(round((field.shape[0] % landmark_gap) / 2))
first_x = int(round((field.shape[1] % landmark_gap) / 2))
landmarks = grid_[first_x::landmark_gap, first_y::landmark_gap].contiguous().view(
-1, 2) # num_landmarks x 2
# Compute rbf scores between landmarks and grid coordinates and between landmarks
grid_distances = torch.norm(grid_.unsqueeze(-2) - landmarks, dim=-1)
grid_scores = torch.exp(-(grid_distances * (1 / rbf_radius)) ** 2) # w x h x num_landmarks
landmark_distances = torch.norm(landmarks.unsqueeze(-2) - landmarks, dim=-1)
landmark_scores = torch.exp(-(landmark_distances * (1 / 200)) ** 2) # num_landmarks x num_landmarks
# Define parameters and optimizer
deformations = torch.nn.Parameter(torch.randn((landmarks.shape[0], 3)) / 10) # N(0, 0.1)
nonrigid_optimizer = optim.Adam([deformations], lr=lr_deformations,
weight_decay=wd_deformations)
# Optimize
for i in range(nonrigid_iters):
# Zero gradients
affine_optimizer.zero_grad() # we reuse affine_optimizer so the affine matrix changes slowly
nonrigid_optimizer.zero_grad()
# Compute grid with radial basis
affine_grid = registration.affine_product(grid_, linear, translation)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, deformations))
pred_grid = affine_grid + warping_field
pred_field = registration.sample_grid(stack_, pred_grid)
# Compute loss
corr_loss = -(pred_field * field_).sum() / (torch.norm(pred_field) *
torch.norm(field_))
# Compute cosine similarity between landmarks (and weight em by distance)
norm_deformations = deformations / torch.norm(deformations, dim=-1,
keepdim=True)
cosine_similarity = torch.mm(norm_deformations, norm_deformations.t())
reg_term = -((cosine_similarity * landmark_scores).sum() /
landmark_scores.sum())
# Compute gradients
loss = corr_loss + smoothness_factor * reg_term
print('Corr/loss at iteration {}: {:5.4f}/{:5.4f}'.format(i, -corr_loss,
loss))
loss.backward()
# Update
affine_optimizer.step()
nonrigid_optimizer.step()
# Save final results
nonrigid_linear = linear.detach().clone()
nonrigid_translation = translation.detach().clone()
nonrigid_landmarks = landmarks.clone()
nonrigid_deformations = deformations.detach().clone()
# COMPUTE SCORES (USING THE ENHANCED AND CROPPED VERSION OF THE FIELD)
# Rigid
pred_grid = registration.affine_product(grid_, torch.eye(3)[:, :2],
torch.tensor([rig_x, rig_y, rig_z]))
pred_field = registration.sample_grid(stack_, pred_grid).numpy()
rig_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# Affine
pred_grid = registration.affine_product(grid_, affine_linear, affine_translation)
pred_field = registration.sample_grid(stack_, pred_grid).numpy()
affine_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# Non-rigid
affine_grid = registration.affine_product(grid_, nonrigid_linear,
nonrigid_translation)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, nonrigid_deformations))
pred_grid = affine_grid + warping_field
pred_field = registration.sample_grid(stack_, pred_grid).numpy()
nonrigid_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# FIND FIELDS IN STACK
# Create grid of original size (h x w x 2)
original_grid = registration.create_grid(original_field.shape)
# Create torch tensors
original_stack_ = torch.as_tensor(original_stack, dtype=torch.float32)
original_grid_ = torch.as_tensor(original_grid, dtype=torch.float32)
# Rigid
pred_grid = registration.affine_product(original_grid_, torch.eye(3)[:, :2],
torch.tensor([rig_x, rig_y, rig_z]))
rig_field = registration.sample_grid(original_stack_, pred_grid).numpy()
# Affine
pred_grid = registration.affine_product(original_grid_, affine_linear,
affine_translation)
affine_field = registration.sample_grid(original_stack_, pred_grid).numpy()
# Non-rigid
affine_grid = registration.affine_product(original_grid_, nonrigid_linear,
nonrigid_translation)
original_grid_distances = torch.norm(original_grid_.unsqueeze(-2) -
nonrigid_landmarks, dim=-1)
original_grid_scores = torch.exp(-(original_grid_distances * (1 / rbf_radius)) ** 2)
warping_field = torch.einsum('whl,lt->wht', (original_grid_scores,
nonrigid_deformations))
pred_grid = affine_grid + warping_field
nonrigid_field = registration.sample_grid(original_stack_, pred_grid).numpy()
# Insert
stack_z, stack_y, stack_x = (CorrectedStack & stack_key).fetch1('z', 'y', 'x')
self.insert1(key)
self.Params.insert1({**key, 'rigid_zrange': rigid_zrange, 'lr_linear': lr_linear,
'lr_translation': lr_translation,
'affine_iters': affine_iters, 'random_seed': random_seed,
'landmark_gap': landmark_gap, 'rbf_radius': rbf_radius,
'lr_deformations': lr_deformations,
'wd_deformations': wd_deformations,
'smoothness_factor': smoothness_factor,
'nonrigid_iters': nonrigid_iters})
self.Rigid.insert1({**key, 'reg_x': stack_x + rig_x, 'reg_y': stack_y + rig_y,
'reg_z': stack_z + rig_z, 'score': rig_score,
'reg_field': rig_field})
self.Affine.insert1({**key, 'a11': affine_linear[0, 0].item(),
'a21': affine_linear[1, 0].item(),
'a31': affine_linear[2, 0].item(),
'a12': affine_linear[0, 1].item(),
'a22': affine_linear[1, 1].item(),
'a32': affine_linear[2, 1].item(),
'reg_x': stack_x + affine_translation[0].item(),
'reg_y': stack_y + affine_translation[1].item(),
'reg_z': stack_z + affine_translation[2].item(),
'score': affine_score, 'reg_field': affine_field})
self.NonRigid.insert1({**key, 'a11': nonrigid_linear[0, 0].item(),
'a21': nonrigid_linear[1, 0].item(),
'a31': nonrigid_linear[2, 0].item(),
'a12': nonrigid_linear[0, 1].item(),
'a22': nonrigid_linear[1, 1].item(),
'a32': nonrigid_linear[2, 1].item(),
'reg_x': stack_x + nonrigid_translation[0].item(),
'reg_y': stack_y + nonrigid_translation[1].item(),
'reg_z': stack_z + nonrigid_translation[2].item(),
'landmarks': nonrigid_landmarks.numpy(),
'deformations': nonrigid_deformations.numpy(),
'score': nonrigid_score, 'reg_field': nonrigid_field})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
# No notifications
pass
def get_grid(self, type='affine', desired_res=1):
""" Get registered grid for this registration. """
import torch
from .utils import registration
# Get field
field_key = self.proj(session='scan_session')
field_dims = (reso.ScanInfo & field_key or meso.ScanInfo.Field &
field_key).fetch1('um_height', 'um_width')
# Create grid at desired resolution
grid = registration.create_grid(field_dims, desired_res=desired_res) # h x w x 2
grid = torch.as_tensor(grid, dtype=torch.float32)
# Apply required transform
if type == 'rigid':
params = (Registration.Rigid & self).fetch1('reg_x', 'reg_y', 'reg_z')
delta_x, delta_y, delta_z = params
linear = torch.eye(3)[:, :2]
translation = torch.tensor([delta_x, delta_y, delta_z])
pred_grid = registration.affine_product(grid, linear, translation)
elif type == 'affine':
params = (Registration.Affine & self).fetch1('a11', 'a21', 'a31', 'a12',
'a22', 'a32', 'reg_x', 'reg_y',
'reg_z')
a11, a21, a31, a12, a22, a32, delta_x, delta_y, delta_z = params
linear = torch.tensor([[a11, a12], [a21, a22], [a31, a32]])
translation = torch.tensor([delta_x, delta_y, delta_z])
pred_grid = registration.affine_product(grid, linear, translation)
elif type == 'nonrigid':
params = (Registration.NonRigid & self).fetch1('a11', 'a21', 'a31', 'a12',
'a22', 'a32', 'reg_x', 'reg_y',
'reg_z', 'landmarks',
'deformations')
rbf_radius = (Registration.Params & self).fetch1('rbf_radius')
a11, a21, a31, a12, a22, a32, delta_x, delta_y, delta_z, landmarks, deformations = params
linear = torch.tensor([[a11, a12], [a21, a22], [a31, a32]])
translation = torch.tensor([delta_x, delta_y, delta_z])
landmarks = torch.from_numpy(landmarks)
deformations = torch.from_numpy(deformations)
affine_grid = registration.affine_product(grid, linear, translation)
grid_distances = torch.norm(grid.unsqueeze(-2) - landmarks, dim=-1)
grid_scores = torch.exp(-(grid_distances * (1 / rbf_radius)) ** 2)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, deformations))
pred_grid = affine_grid + warping_field
else:
raise PipelineException('Unrecognized registration.')
return pred_grid.numpy()
def plot_grids(self, desired_res=5):
""" Plot the grids for this different registrations as 3-d surfaces."""
# Get grids at desired resoultion
rig_grid = self.get_grid('rigid', desired_res)
affine_grid = self.get_grid('affine', desired_res)
nonrigid_grid = self.get_grid('nonrigid', desired_res)
# Plot surfaces
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
fig = plt.figure(figsize=plt.figaspect(0.5) * 1.5)
ax = fig.gca(projection='3d')
ax.plot_surface(rig_grid[..., 0], rig_grid[..., 1], rig_grid[..., 2], alpha=0.5)
ax.plot_surface(affine_grid[..., 0], affine_grid[..., 1], affine_grid[..., 2],
alpha=0.5)
ax.plot_surface(nonrigid_grid[..., 0], nonrigid_grid[..., 1],
nonrigid_grid[..., 2], alpha=0.5)
ax.set_aspect('equal')
ax.invert_zaxis()
return fig
@schema
class FieldSegmentation(dj.Computed):
definition = """ # structural segmentation of a 2-d field (using the affine registration)
-> Segmentation.proj(stack_session='session', stacksegm_channel='channel')
-> Registration
---
segm_field : longblob # field (image x height) of cell ids at 1 um/px
"""
class StackUnit(dj.Part):
definition = """ # single unit from the stack that appears in the field
-> master
sunit_id : int # id in the stack segmentation
---
depth : int # (um) size in z
height : int # (um) size in y
width : int # (um) size in x
volume : float # (um) volume of the 3-d unit
area : float # (um) area of the 2-d mask
sunit_z : float # (um) centroid for the 3d unit in the motor coordinate system
sunit_y : float # (um) centroid for the 3d unit in the motor coordinate system
sunit_x : float # (um) centroid for the 3d unit in the motor coordinate system
mask_z : float # (um) centroid for the 2d mask in the motor coordinate system
mask_y : float # (um) centroid for the 2d mask in the motor coordinate system
mask_x : float # (um) centroid for the 2d mask in the motor coordinate system
distance : float # (um) euclidean distance between centroid of 2-d mask and 3-d unit
"""
def _make_tuples(self, key):
from skimage import measure
# Get structural segmentation
stack_key = {'animal_id': key['animal_id'], 'session': key['stack_session'],
'stack_idx': key['stack_idx'], 'volume_id': key['volume_id'],
'channel': key['stacksegm_channel']}
instance = (Segmentation & stack_key).fetch1('segmentation')
# Get segmented field
grid = (Registration & key).get_grid(type='affine', desired_res=1)
stack_center = np.array((CorrectedStack & stack_key).fetch1('z', 'y', 'x'))
px_grid = (grid[..., ::-1] - stack_center - 0.5 + np.array(instance.shape) / 2)
segmented_field = ndimage.map_coordinates(instance, np.moveaxis(px_grid, -1, 0),
order=0) # nearest neighbor sampling
# Insert in FieldSegmentation
self.insert1({**key, 'segm_field': segmented_field})
# Insert each StackUnit
instance_props = measure.regionprops(instance)
instance_labels = np.array([p.label for p in instance_props])
for prop in measure.regionprops(segmented_field):
sunit_id = prop.label
instance_prop = instance_props[np.argmax(instance_labels == sunit_id)]
depth = (instance_prop.bbox[3] - instance_prop.bbox[0])
height = (instance_prop.bbox[4] - instance_prop.bbox[1])
width = (instance_prop.bbox[5] - instance_prop.bbox[2])
volume = instance_prop.area
sunit_z, sunit_y, sunit_x = (stack_center + np.array(instance_prop.centroid) -
np.array(instance.shape) / 2 + 0.5)
binary_sunit = segmented_field == sunit_id
area = np.count_nonzero(binary_sunit)
px_y, px_x = ndimage.measurements.center_of_mass(binary_sunit)
px_coords = np.array([[px_y], [px_x]])
mask_x, mask_y, mask_z = [ndimage.map_coordinates(grid[..., i], px_coords,
order=1)[0] for i in
range(3)]
distance = np.sqrt((sunit_z - mask_z) ** 2 + (sunit_y - mask_y) ** 2 +
(sunit_x - mask_x) ** 2)
# Insert in StackUnit
self.StackUnit.insert1({**key, 'sunit_id': sunit_id, 'depth': depth,
'height': height, 'width': width, 'volume': volume,
'area': area, 'sunit_z': sunit_z, 'sunit_y': sunit_y,
'sunit_x': sunit_x, 'mask_z': mask_z,
'mask_y': mask_y, 'mask_x': mask_x,
'distance': distance})
@schema
class RegistrationOverTime(dj.Computed):
definition = """ # register a field at different timepoints of recording
-> PreprocessedStack.proj(stack_session='session', stack_channel='channel')
-> RegistrationTask
"""
@property
def key_source(self):
stacks = PreprocessedStack.proj(stack_session='session', stack_channel='channel')
return stacks * RegistrationTask & {'registration_method': 5}
class Chunk(dj.Part):
definition = """ # single registered chunk
-> master
frame_num : int # frame number of the frame in the middle of this chunk
---
initial_frame : int # initial frame used in this chunk (1-based)
final_frame : int # final frame used in this chunk (1-based)
avg_chunk : longblob # average field used for registration
"""
def get_grid(self, type='nonrigid', desired_res=1):
# TODO: Taken verbatim from Registration (minor changes for formatting), refactor
""" Get registered grid for this registration. """
import torch
from .utils import registration
# Get field
field_key = self.proj(session='scan_session')
field_dims = (reso.ScanInfo & field_key or meso.ScanInfo.Field &
field_key).fetch1('um_height', 'um_width')
# Create grid at desired resolution
grid = registration.create_grid(field_dims, desired_res=desired_res) # h x w x 2
grid = torch.as_tensor(grid, dtype=torch.float32)
# Apply required transform
if type == 'rigid':
params = (RegistrationOverTime.Rigid & self).fetch1('reg_x', 'reg_y',
'reg_z')
delta_x, delta_y, delta_z = params
linear = torch.eye(3)[:, :2]
translation = torch.tensor([delta_x, delta_y, delta_z])
pred_grid = registration.affine_product(grid, linear, translation)
elif type == 'affine':
params = (RegistrationOverTime.Affine & self).fetch1('a11', 'a21', 'a31',
'a12', 'a22', 'a32',
'reg_x', 'reg_y',
'reg_z')
a11, a21, a31, a12, a22, a32, delta_x, delta_y, delta_z = params
linear = torch.tensor([[a11, a12], [a21, a22], [a31, a32]])
translation = torch.tensor([delta_x, delta_y, delta_z])
pred_grid = registration.affine_product(grid, linear, translation)
elif type == 'nonrigid':
params = (RegistrationOverTime.NonRigid & self).fetch1('a11', 'a21',
'a31', 'a12',
'a22', 'a32',
'reg_x', 'reg_y',
'reg_z',
'landmarks',
'deformations')
rbf_radius = (RegistrationOverTime.Params & self).fetch1('rbf_radius')
(a11, a21, a31, a12, a22, a32, delta_x, delta_y, delta_z, landmarks,
deformations) = params
linear = torch.tensor([[a11, a12], [a21, a22], [a31, a32]])
translation = torch.tensor([delta_x, delta_y, delta_z])
landmarks = torch.from_numpy(landmarks)
deformations = torch.from_numpy(deformations)
affine_grid = registration.affine_product(grid, linear, translation)
grid_distances = torch.norm(grid.unsqueeze(-2) - landmarks, dim=-1)
grid_scores = torch.exp(-(grid_distances * (1 / rbf_radius)) ** 2)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, deformations))
pred_grid = affine_grid + warping_field
else:
raise PipelineException('Unrecognized registration.')
return pred_grid.numpy()
class Rigid(dj.Part):
definition = """ # rigid registration of a single chunk
-> RegistrationOverTime.Chunk
---
reg_x : float # (um) center of field in motor coordinate system
reg_y : float # (um) center of field in motor coordinate system
reg_z : float # (um) center of field in motor coordinate system
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class Affine(dj.Part):
definition = """ # affine matrix learned via gradient ascent
-> RegistrationOverTime.Chunk
---
a11 : float # (um) element in row 1, column 1 of the affine matrix
a21 : float # (um) element in row 2, column 1 of the affine matrix
a31 : float # (um) element in row 3, column 1 of the affine matrix
a12 : float # (um) element in row 1, column 2 of the affine matrix
a22 : float # (um) element in row 2, column 2 of the affine matrix
a32 : float # (um) element in row 3, column 2 of the affine matrix
reg_x : float # (um) element in row 1, column 4 of the affine matrix
reg_y : float # (um) element in row 2, column 4 of the affine matrix
reg_z : float # (um) element in row 3, column 4 of the affine matrix
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class NonRigid(dj.Part):
definition = """ # affine plus deformation field learned via gradient descent
-> RegistrationOverTime.Chunk
---
a11 : float # (um) element in row 1, column 1 of the affine matrix
a21 : float # (um) element in row 2, column 1 of the affine matrix
a31 : float # (um) element in row 3, column 1 of the affine matrix
a12 : float # (um) element in row 1, column 2 of the affine matrix
a22 : float # (um) element in row 2, column 2 of the affine matrix
a32 : float # (um) element in row 3, column 2 of the affine matrix
reg_x : float # (um) element in row 1, column 4 of the affine matrix
reg_y : float # (um) element in row 2, column 4 of the affine matrix
reg_z : float # (um) element in row 3, column 4 of the affine matrix
landmarks : longblob # (um) x, y position of each landmark (num_landmarks x 2) assuming center of field is at (0, 0)
deformations : longblob # (um) x, y, z deformations per landmark (num_landmarks x 3)
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class Params(dj.Part):
definition = """ # document some parameters used for the registration
-> master
---
rigid_zrange : int # microns above and below experimenter's estimate (in z) to search for rigid registration
lr_linear : float # learning rate for the linear part of the affine matrix
lr_translation : float # learning rate for the translation vector
affine_iters : int # number of iterations to learn the affine registration
random_seed : int # seed used to initialize landmark deformations
landmark_gap : int # number of microns between landmarks
rbf_radius : int # critical radius for the gaussian radial basis function
lr_deformations : float # learning rate for the deformation values
wd_deformations : float # regularization term to control size of the deformations
smoothness_factor : float # regularization term to control curvature of warping field
nonrigid_iters : int # number of iterations to optimize for the non-rigid parameters
"""
def make(self, key):
from .utils import registration
from .utils import enhancement
# Set params
rigid_zrange = 80 # microns to search above and below estimated z for rigid registration
lr_linear = 0.001 # learning rate / step size for the linear part of the affine matrix
lr_translation = 1 # learning rate / step size for the translation vector
affine_iters = 200 # number of optimization iterations to learn the affine parameters
random_seed = 1234 # seed for torch random number generator (used to initialize deformations)
landmark_gap = 100 # spacing for the landmarks
rbf_radius = 150 # critical radius for the gaussian rbf
lr_deformations = 0.1 # learning rate / step size for deformation values
wd_deformations = 1e-4 # weight decay for deformations; controls their size
smoothness_factor = 0.01 # factor to keep the deformation field smooth
nonrigid_iters = 200 # number of optimization iterations for the nonrigid parameters
# Get enhanced stack
stack_key = {'animal_id': key['animal_id'], 'session': key['stack_session'],
'stack_idx': key['stack_idx'], 'volume_id': key['volume_id'],
'channel': key['stack_channel']}
original_stack = (PreprocessedStack & stack_key).fetch1('resized')
stack = (PreprocessedStack & stack_key).fetch1('sharpened')
stack = stack[5:-5, 15:-15, 15:-15] # drop some edges
# Get corrected scan
field_key = {'animal_id': key['animal_id'], 'session': key['scan_session'],
'scan_idx': key['scan_idx'], 'field': key['field'],
'channel': key['scan_channel']}
pipe = (reso if reso.ScanInfo & field_key else meso if meso.ScanInfo & field_key
else None)
scan = RegistrationOverTime._get_corrected_scan(field_key)
# Get initial estimate of field depth from experimenters
field_z = (pipe.ScanInfo.Field & field_key).fetch1('z')
stack_z = (CorrectedStack & stack_key).fetch1('z')
z_limits = stack_z - stack.shape[0] / 2, stack_z + stack.shape[0] / 2
if field_z < z_limits[0] or field_z > z_limits[1]:
print('Warning: Estimated depth ({}) outside stack range ({}-{}).'.format(
field_z, *z_limits))
# Compute best chunk size: each lasts the same (~15 minutes)
fps = (pipe.ScanInfo & field_key).fetch1('fps')
num_frames = scan.shape[-1]
overlap = int(round(3 * 60 * fps)) # ~ 3 minutes
num_chunks = int(np.ceil((num_frames - overlap) / (15 * 60 * fps - overlap)))
chunk_size = int(np.floor((num_frames - overlap) / num_chunks + overlap)) # *
# * distributes frames in the last (incomplete) chunk to the other chunks
# Insert in RegistrationOverTime and Params (once per field)
self.insert1(key)
self.Params.insert1(
{**key, 'rigid_zrange': rigid_zrange, 'lr_linear': lr_linear,
'lr_translation': lr_translation, 'affine_iters': affine_iters,
'random_seed': random_seed, 'landmark_gap': landmark_gap,
'rbf_radius': rbf_radius, 'lr_deformations': lr_deformations,
'wd_deformations': wd_deformations, 'smoothness_factor': smoothness_factor,
'nonrigid_iters': nonrigid_iters})
# Iterate over chunks
for initial_frame in range(0, num_frames - chunk_size, chunk_size - overlap):
# Get next chunk
final_frame = initial_frame + chunk_size
chunk = scan[..., initial_frame: final_frame]
# Enhance field
field_dims = ((reso.ScanInfo if pipe == reso else meso.ScanInfo.Field) &
field_key).fetch1('um_height', 'um_width')
original_field = registration.resize(chunk.mean(-1), field_dims,
desired_res=1)
field = enhancement.sharpen_2pimage(enhancement.lcn(original_field, 15), 1)
field = field[15:-15, 15:-15] # drop some edges
# TODO: From here until Insert is taken verbatim from Registration, refactor
# RIGID REGISTRATION
from skimage import feature
# Run registration with no rotations
px_z = field_z - stack_z + stack.shape[0] / 2 - 0.5
mini_stack = stack[max(0, int(round(px_z - rigid_zrange))): int(round(
px_z + rigid_zrange))]
corrs = np.stack([feature.match_template(s, field, pad_input=True) for s in
mini_stack])
smooth_corrs = ndimage.gaussian_filter(corrs, 0.7)
# Get results
min_z = max(0, int(round(px_z - rigid_zrange)))
min_y = int(round(0.05 * stack.shape[1]))
min_x = int(round(0.05 * stack.shape[2]))
mini_corrs = smooth_corrs[:, min_y:-min_y, min_x:-min_x]
rig_z, rig_y, rig_x = np.unravel_index(np.argmax(mini_corrs),
mini_corrs.shape)
# Rewrite coordinates with respect to original z
rig_z = (min_z + rig_z + 0.5) - stack.shape[0] / 2
rig_y = (min_y + rig_y + 0.5) - stack.shape[1] / 2
rig_x = (min_x + rig_x + 0.5) - stack.shape[2] / 2
del px_z, mini_stack, corrs, smooth_corrs, min_z, min_y, min_x, mini_corrs
# AFFINE REGISTRATION
import torch
from torch import optim
# Create field grid (height x width x 2)
grid = registration.create_grid(field.shape)
# Create torch tensors
stack_ = torch.as_tensor(stack, dtype=torch.float32)
field_ = torch.as_tensor(field, dtype=torch.float32)
grid_ = torch.as_tensor(grid, dtype=torch.float32)
# Define parameters and optimizer
linear = torch.nn.Parameter(torch.eye(3)[:, :2]) # first two columns of rotation matrix
translation = torch.nn.Parameter(torch.tensor([rig_x, rig_y, rig_z])) # translation vector
affine_optimizer = optim.Adam([{'params': linear, 'lr': lr_linear},
{'params': translation, 'lr': lr_translation}])
# Optimize
for i in range(affine_iters):
# Zero gradients
affine_optimizer.zero_grad()
# Compute gradients
pred_grid = registration.affine_product(grid_, linear, translation) # w x h x 3
pred_field = registration.sample_grid(stack_, pred_grid)
corr_loss = -(pred_field * field_).sum() / (torch.norm(pred_field) *
torch.norm(field_))
print('Corr at iteration {}: {:5.4f}'.format(i, -corr_loss))
corr_loss.backward()
# Update
affine_optimizer.step()
# Save them (originals will be modified during non-rigid registration)
affine_linear = linear.detach().clone()
affine_translation = translation.detach().clone()
# NON-RIGID REGISTRATION
# Inspired by the the Demon's Algorithm (Thirion, 1998)
torch.manual_seed(random_seed) # we use random initialization below
# Create landmarks (and their corresponding deformations)
first_y = int(round((field.shape[0] % landmark_gap) / 2))
first_x = int(round((field.shape[1] % landmark_gap) / 2))
landmarks = grid_[first_x::landmark_gap,
first_y::landmark_gap].contiguous().view(-1, 2) # num_landmarks x 2
# Compute rbf scores between landmarks and grid coordinates and between landmarks
grid_distances = torch.norm(grid_.unsqueeze(-2) - landmarks, dim=-1)
grid_scores = torch.exp(-(grid_distances * (1 / rbf_radius)) ** 2) # w x h x num_landmarks
landmark_distances = torch.norm(landmarks.unsqueeze(-2) - landmarks, dim=-1)
landmark_scores = torch.exp(-(landmark_distances * (1 / 200)) ** 2) # num_landmarks x num_landmarks
# Define parameters and optimizer
deformations = torch.nn.Parameter(torch.randn((landmarks.shape[0], 3)) / 10) # N(0, 0.1)
nonrigid_optimizer = optim.Adam([deformations], lr=lr_deformations,
weight_decay=wd_deformations)
# Optimize
for i in range(nonrigid_iters):
# Zero gradients
affine_optimizer.zero_grad() # we reuse affine_optimizer so the affine matrix changes slowly
nonrigid_optimizer.zero_grad()
# Compute grid with radial basis
affine_grid = registration.affine_product(grid_, linear, translation)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, deformations))
pred_grid = affine_grid + warping_field
pred_field = registration.sample_grid(stack_, pred_grid)
# Compute loss
corr_loss = -(pred_field * field_).sum() / (torch.norm(pred_field) *
torch.norm(field_))
# Compute cosine similarity between landmarks (and weight em by distance)
norm_deformations = deformations / torch.norm(deformations, dim=-1,
keepdim=True)
cosine_similarity = torch.mm(norm_deformations, norm_deformations.t())
reg_term = -((cosine_similarity * landmark_scores).sum() /
landmark_scores.sum())
# Compute gradients
loss = corr_loss + smoothness_factor * reg_term
print('Corr/loss at iteration {}: {:5.4f}/{:5.4f}'.format(i, -corr_loss,
loss))
loss.backward()
# Update
affine_optimizer.step()
nonrigid_optimizer.step()
# Save final results
nonrigid_linear = linear.detach().clone()
nonrigid_translation = translation.detach().clone()
nonrigid_landmarks = landmarks.clone()
nonrigid_deformations = deformations.detach().clone()
# COMPUTE SCORES (USING THE ENHANCED AND CROPPED VERSION OF THE FIELD)
# Rigid
pred_grid = registration.affine_product(grid_, torch.eye(3)[:, :2],
torch.tensor([rig_x, rig_y, rig_z]))
pred_field = registration.sample_grid(stack_, pred_grid).numpy()
rig_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# Affine
pred_grid = registration.affine_product(grid_, affine_linear,
affine_translation)
pred_field = registration.sample_grid(stack_, pred_grid).numpy()
affine_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# Non-rigid
affine_grid = registration.affine_product(grid_, nonrigid_linear,
nonrigid_translation)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, nonrigid_deformations))
pred_grid = affine_grid + warping_field
pred_field = registration.sample_grid(stack_, pred_grid).numpy()
nonrigid_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# FIND FIELDS IN STACK
# Create grid of original size (h x w x 2)
original_grid = registration.create_grid(original_field.shape)
# Create torch tensors
original_stack_ = torch.as_tensor(original_stack, dtype=torch.float32)
original_grid_ = torch.as_tensor(original_grid, dtype=torch.float32)
# Rigid
pred_grid = registration.affine_product(original_grid_, torch.eye(3)[:, :2],
torch.tensor([rig_x, rig_y, rig_z]))
rig_field = registration.sample_grid(original_stack_, pred_grid).numpy()
# Affine
pred_grid = registration.affine_product(original_grid_, affine_linear,
affine_translation)
affine_field = registration.sample_grid(original_stack_, pred_grid).numpy()
# Non-rigid
affine_grid = registration.affine_product(original_grid_, nonrigid_linear,
nonrigid_translation)
original_grid_distances = torch.norm(original_grid_.unsqueeze(-2) -
nonrigid_landmarks, dim=-1)
original_grid_scores = torch.exp(-(original_grid_distances *
(1 / rbf_radius)) ** 2)
warping_field = torch.einsum('whl,lt->wht', (original_grid_scores,
nonrigid_deformations))
pred_grid = affine_grid + warping_field
nonrigid_field = registration.sample_grid(original_stack_, pred_grid).numpy()
# Insert chunk
stack_z, stack_y, stack_x = (CorrectedStack & stack_key).fetch1('z', 'y', 'x')
frame_num = int(round((initial_frame + final_frame) / 2))
self.Chunk.insert1({**key, 'frame_num': frame_num + 1,
'initial_frame': initial_frame + 1,
'final_frame': final_frame, 'avg_chunk': original_field})
self.Rigid.insert1({**key, 'frame_num': frame_num + 1,
'reg_x': stack_x + rig_x, 'reg_y': stack_y + rig_y,
'reg_z': stack_z + rig_z, 'score': rig_score,
'reg_field': rig_field})
self.Affine.insert1({**key, 'frame_num': frame_num + 1,
'a11': affine_linear[0, 0].item(),
'a21': affine_linear[1, 0].item(),
'a31': affine_linear[2, 0].item(),
'a12': affine_linear[0, 1].item(),
'a22': affine_linear[1, 1].item(),
'a32': affine_linear[2, 1].item(),
'reg_x': stack_x + affine_translation[0].item(),
'reg_y': stack_y + affine_translation[1].item(),
'reg_z': stack_z + affine_translation[2].item(),
'score': affine_score,
'reg_field': affine_field})
self.NonRigid.insert1({**key, 'frame_num': frame_num + 1,
'a11': nonrigid_linear[0, 0].item(),
'a21': nonrigid_linear[1, 0].item(),
'a31': nonrigid_linear[2, 0].item(),
'a12': nonrigid_linear[0, 1].item(),
'a22': nonrigid_linear[1, 1].item(),
'a32': nonrigid_linear[2, 1].item(),
'reg_x': stack_x + nonrigid_translation[0].item(),
'reg_y': stack_y + nonrigid_translation[1].item(),
'reg_z': stack_z + nonrigid_translation[2].item(),
'landmarks': nonrigid_landmarks.numpy(),
'deformations': nonrigid_deformations.numpy(),
'score': nonrigid_score, 'reg_field': nonrigid_field})
# self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
frame_num, zs, scores = (self.Affine & key).fetch('frame_num', 'reg_z', 'score')
plt.plot(frame_num, -zs, zorder=1)
plt.scatter(frame_num, -zs, marker='*', s=scores * 70, zorder=2, color='r')
plt.title('Registration over time (star size represents confidence)')
plt.ylabel('z (surface at 0)')
plt.xlabel('Frames')
img_filename = '/tmp/{}.png'.format(key_hash(key))
plt.savefig(img_filename)
plt.close()
msg = ('registration over time of {animal_id}-{scan_session}-{scan_idx} field '
'{field} to {animal_id}-{stack_session}-{stack_idx}')
msg = msg.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key &
{'session': key['stack_session']})
slack_user.notify(file=img_filename, file_title=msg)
def _get_corrected_scan(key):
# Read scan
scan_filename = (experiment.Scan & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Get some params
pipe = reso if (reso.ScanInfo() & key) else meso
# Map: Correct scan in parallel
f = performance.parallel_correct_scan # function to map
raster_phase = (pipe.RasterCorrection & key).fetch1('raster_phase')
fill_fraction = (pipe.ScanInfo & key).fetch1('fill_fraction')
y_shifts, x_shifts = (pipe.MotionCorrection & key).fetch1('y_shifts', 'x_shifts')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction,
'y_shifts': y_shifts, 'x_shifts': x_shifts}
results = performance.map_frames(f, scan, field_id=key['field'] - 1,
channel=key['channel'] - 1, kwargs=kwargs)
# Reduce: Make a single array (height x width x num_frames)
height, width, _ = results[0][1].shape
corrected_scan = np.zeros([height, width, scan.num_frames], dtype=np.float32)
for frames, chunk in results:
corrected_scan[..., frames] = chunk
return corrected_scan
def session_plot(self):
""" Create a registration plot for the session"""
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Check that plot is restricted to a single stack and a single session
regot_key = self.fetch('KEY', limit=1)[0]
stack_key = {n: regot_key[n] for n in ['animal_id', 'stack_session', 'stack_idx',
'volume_id']}
session_key = {n: regot_key[n] for n in ['animal_id', 'scan_session']}
if len(self & stack_key) != len(self):
raise PipelineException('Plot can only be generated for one stack at a time')
if len(self & session_key) != len(self):
raise PipelineException('Plot can only be generated for one session at a '
'time')
# Get field times and depths
ts = []
zs = []
session_ts = (experiment.Session & regot_key &
{'session': regot_key['scan_session']}).fetch1('session_ts')
for key in self.fetch('KEY'):
field_key = {'animal_id': key['animal_id'], 'session': key['scan_session'],
'scan_idx': key['scan_idx'], 'field': key['field']}
scan_ts = (experiment.Scan & field_key).fetch1('scan_ts')
fps = (reso.ScanInfo & field_key or meso.ScanInfo & field_key).fetch1('fps')
frame_nums, field_zs = (RegistrationOverTime.Affine & key).fetch('frame_num',
'reg_z')
field_ts = (scan_ts - session_ts).seconds + frame_nums / fps # in seconds
ts.append(field_ts)
zs.append(field_zs)
# Plot
fig = plt.figure(figsize=(20, 8))
for ts_, zs_ in zip(ts, zs):
plt.plot(ts_ / 3600, zs_)
plt.title('Registered zs for {animal_id}-{scan_session} into {animal_id}-'
'{stack_session}-{stack_idx} starting at {t}'.format(t=session_ts,
**regot_key))
plt.ylabel('Registered zs')
plt.xlabel('Hours')
# Plot formatting
plt.gca().invert_yaxis()
plt.gca().yaxis.set_major_locator(ticker.MultipleLocator(10))
plt.grid(linestyle='--', alpha=0.8)
return fig
@schema
class Drift(dj.Computed):
definition = """ # assuming a linear drift, compute the rate of drift (of the affine registration)
-> RegistrationOverTime
---
z_slope : float # (um/hour) drift of the center of the field
y_slope : float # (um/hour) drift of the center of the field
x_slope : float # (um/hour) drift of the center of the field
z_rmse : float # (um) root mean squared error of the fit
y_rmse : float # (um) root mean squared error of the fit
x_rmse : float # (um) root mean squared error of the fit
"""
@property
def key_source(self):
return RegistrationOverTime.aggr(RegistrationOverTime.Chunk.proj(),
nchunks='COUNT(*)') & 'nchunks > 1'
def _make_tuples(self, key):
from sklearn import linear_model
# Get drifts per axis
frame_nums, zs, ys, xs = (RegistrationOverTime.Affine & key).fetch('frame_num',
'reg_z', 'reg_y', 'reg_x')
# Get scan fps
field_key = {**key, 'session': key['scan_session']}
fps = (reso.ScanInfo() & field_key or meso.ScanInfo() & field_key).fetch1('fps')
# Fit a line through the values (robust regression)
slopes = []
rmses = []
X = frame_nums.reshape(-1, 1)
for y in [zs, ys, xs]:
model = linear_model.TheilSenRegressor()
model.fit(X, y)
slopes.append(model.coef_[0] * fps * 3600)
rmses.append(np.sqrt(np.mean(zs - model.predict(X)) ** 2))
self.insert1({**key, 'z_slope': slopes[0], 'y_slope': slopes[1],
'x_slope': slopes[2], 'z_rmse': rmses[0], 'y_rmse': rmses[1],
'x_rmse': rmses[2]})
def session_plot(self):
""" Create boxplots for the session (one per scan)."""
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Check that plot is restricted to a single stack and a single session
regot_key = self.fetch('KEY', limit=1)[0]
stack_key = {n: regot_key[n] for n in ['animal_id', 'stack_session', 'stack_idx',
'volume_id']}
session_key = {n: regot_key[n] for n in ['animal_id', 'scan_session']}
if len(self & stack_key) != len(self):
raise PipelineException('Plot can only be generated for one stack at a time')
if len(self & session_key) != len(self):
raise PipelineException('Plot can only be generated for one session at a '
'time')
# Get field times and depths
z_slopes = []
scan_idxs = np.unique(self.fetch('scan_idx'))
for scan_idx in scan_idxs:
scan_slopes = (self & {**session_key, 'scan_idx': scan_idx}).fetch('z_slope')
z_slopes.append(scan_slopes)
# Plot
fig = plt.figure(figsize=(7, 4))
plt.boxplot(z_slopes)
plt.title('Z drift for {animal_id}-{scan_session} into {animal_id}-'
'{stack_session}-{stack_idx}'.format(**regot_key))
plt.ylabel('Z drift (um/hour)')
plt.xlabel('Scans')
plt.xticks(range(1, len(scan_idxs) + 1), scan_idxs)
# Plot formatting
plt.gca().invert_yaxis()
plt.gca().yaxis.set_major_locator(ticker.MultipleLocator(5))
plt.grid(linestyle='--', alpha=0.8)
return fig
@schema
class StackSet(dj.Computed):
definition = """ # match segmented masks by proximity in the stack
-> CorrectedStack.proj(stack_session='session') # animal_id, stack_session, stack_idx, volume_id
-> shared.RegistrationMethod
-> shared.SegmentationMethod
---
min_distance :tinyint # distance used as threshold to accept two masks as the same
max_height :tinyint # maximum allowed height of a joint mask
"""
@property
def key_source(self):
return (CorrectedStack.proj(stack_session='session') *
shared.RegistrationMethod.proj() * shared.SegmentationMethod.proj() &
Registration & {'segmentation_method': 6})
class Unit(dj.Part):
definition = """ # a unit in the stack
-> master
munit_id :int # unique id in the stack
---
munit_x :float # (um) position of centroid in motor coordinate system
munit_y :float # (um) position of centroid in motor coordinate system
munit_z :float # (um) position of centroid in motor coordinate system
"""
class Match(dj.Part):
definition = """ # Scan unit to stack unit match (n:1 relation)
-> master
-> experiment.Scan.proj(scan_session='session') # animal_id, scan_session, scan_idx
unit_id :int # unit id from ScanSet.Unit
---
-> StackSet.Unit
"""
class MatchedUnit():
""" Coordinates for a set of masks that form a single cell."""
def __init__(self, key, x, y, z, plane_id):
self.keys = [key]
self.xs = [x]
self.ys = [y]
self.zs = [z]
self.plane_ids = [plane_id]
self.centroid = [x, y, z]
def join_with(self, other):
self.keys += other.keys
self.xs += other.xs
self.ys += other.ys
self.zs += other.zs
self.plane_ids += other.plane_ids
self.centroid = [np.mean(self.xs), np.mean(self.ys), np.mean(self.zs)]
def __lt__(self, other):
""" Used for sorting. """
return True
def make(self, key):
from scipy.spatial import distance
import bisect
# Set some params
min_distance = 10
max_height = 20
# Create list of units
units = [] # stands for matched units
for field in Registration & key:
# Edge case: when two channels are registered, we don't know which to use
if len(Registration.proj(ignore='scan_channel') & field) > 1:
msg = ('More than one channel was registered for {animal_id}-'
'{scan_session}-{scan_idx} field {field}'.format(**field))
raise PipelineException(msg)
# Get registered grid
field_key = {'animal_id': field['animal_id'],
'session': field['scan_session'], 'scan_idx': field['scan_idx'],
'field': field['field']}
pipe = reso if reso.ScanInfo & field_key else meso
um_per_px = ((reso.ScanInfo if pipe == reso else meso.ScanInfo.Field) &
field_key).microns_per_pixel
grid = (Registration & field).get_grid(type='affine', desired_res=um_per_px)
# Create cell objects
for channel_key in (pipe.ScanSet & field_key &
{'segmentation_method': key['segmentation_method']}): # *
somas = pipe.MaskClassification.Type & {'type': 'soma'}
field_somas = pipe.ScanSet.Unit & channel_key & somas
unit_keys, xs, ys = (pipe.ScanSet.UnitInfo & field_somas).fetch('KEY',
'px_x', 'px_y')
px_coords = np.stack([ys, xs])
xs, ys, zs = [ndimage.map_coordinates(grid[..., i], px_coords, order=1)
for i in range(3)]
units += [StackSet.MatchedUnit(*args, key_hash(channel_key)) for args in
zip(unit_keys, xs, ys, zs)]
# * Separating masks per channel allows masks in diff channels to be matched
print(len(units), 'initial units')
def find_close_units(centroid, centroids, min_distance):
""" Finds centroids that are closer than min_distance to centroid. """
dists = distance.cdist(np.expand_dims(centroid, 0), centroids)
indices = np.flatnonzero(dists < min_distance)
return indices, dists[0, indices]
def is_valid(unit1, unit2, max_height):
""" Checks that units belong to different fields and that the resulting unit
would not be bigger than 20 microns."""
different_fields = len(set(unit1.plane_ids) & set(unit2.plane_ids)) == 0
acceptable_height = (max(unit1.zs + unit2.zs) - min(
unit1.zs + unit2.zs)) < max_height
return different_fields and acceptable_height
# Create distance matrix
# For memory efficiency we use an adjacency list with only the units at less than 10 microns
centroids = np.stack([u.centroid for u in units])
distance_list = [] # list of triples (distance, unit1, unit2)
for i in range(len(units)):
indices, distances = find_close_units(centroids[i], centroids[i + 1:],
min_distance)
for dist, j in zip(distances, i + 1 + indices):
if is_valid(units[i], units[j], max_height):
bisect.insort(distance_list, (dist, units[i], units[j]))
print(len(distance_list), 'possible pairings')
# Join units
while (len(distance_list) > 0):
# Get next pair of units
d, unit1, unit2 = distance_list.pop(0)
# Remove them from lists
units.remove(unit1)
units.remove(unit2)
f = lambda x: (unit1 not in x[1:]) and (unit2 not in x[1:])
distance_list = list(filter(f, distance_list))
# Join them
unit1.join_with(unit2)
# Recalculate distances
centroids = [u.centroid for u in units]
indices, distances = find_close_units(unit1.centroid, centroids, min_distance)
for dist, j in zip(distances, indices):
if is_valid(unit1, units[j], max_height):
bisect.insort(distance_list, (d, unit1, units[j]))
# Insert new unit
units.append(unit1)
print(len(units), 'number of final masks')
# Insert
self.insert1({**key, 'min_distance': min_distance, 'max_height': max_height})
for munit_id, munit in zip(itertools.count(start=1), units):
new_unit = {**key, 'munit_id': munit_id, 'munit_x': munit.centroid[0],
'munit_y': munit.centroid[1], 'munit_z': munit.centroid[2]}
self.Unit().insert1(new_unit)
for subunit_key in munit.keys:
new_match = {**key, 'munit_id': munit_id, **subunit_key,
'scan_session': subunit_key['session']}
self.Match().insert1(new_match, ignore_extra_fields=True)
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
fig = (StackSet() & key).plot_centroids3d()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename)
plt.close(fig)
msg = ('StackSet for {animal_id}-{stack_session}-{stack_idx}: {num_units} final '
'units').format(**key, num_units=len(self.Unit & key))
slack_user = notify.SlackUser & (experiment.Session & key &
{'session': key['stack_session']})
slack_user.notify(file=img_filename, file_title=msg)
def plot_centroids3d(self):
""" Plots the centroids of all units in the motor coordinate system (in microns)
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from mpl_toolkits.mplot3d import Axes3D
# Get centroids
xs, ys, zs = (StackSet.Unit & self).fetch('munit_x', 'munit_y', 'munit_z')
# Plot
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
ax.scatter(xs, ys, zs, alpha=0.5)
ax.invert_zaxis()
ax.set_xlabel('x (um)')
ax.set_ylabel('y (um)')
ax.set_zlabel('z (um)')
return fig
@schema
class Area(dj.Computed):
definition = """ # transform area masks from annotated retinotopic maps into stack space
-> PreprocessedStack.proj(stack_session='session',stack_channel='channel')
-> experiment.Scan.proj(scan_session='session')
-> shared.Channel.proj(scan_channel='channel')
-> shared.RegistrationMethod
-> shared.AreaMaskMethod
ret_idx : smallint # retinotopy map index for each animal
ret_hash : varchar(32) # single attribute representation of the key (used to avoid going over 16 attributes in the key)
---
"""
class Mask(dj.Part):
definition = """ # mask per area indicating membership
-> master
-> anatomy.Area
---
mask : blob # 2D mask of pixel area membership
"""
@property
def key_source(self):
# anatomy code outputs masks per field for aim 2pScan and per concatenated plane for aim widefield
map_rel = (anatomy.AreaMask.proj('ret_idx', scan_session='session') &
(experiment.Scan & 'aim="2pScan"').proj(scan_session='session'))
stack_rel = Registration & 'registration_method = 5'
heading = list(set(list(map_rel.heading.attributes) + list(stack_rel.heading.attributes)))
heading.remove('field')
heading.remove('brain_area')
key_source = dj.U(*heading, 'mask_method') & (map_rel * stack_rel * shared.AreaMaskMethod)
return key_source
def make(self, key):
from scipy.interpolate import griddata
import cv2
#same as key source but retains brain area attribute
key['ret_hash'] = key_hash(key)
map_rel = (anatomy.AreaMask.proj('ret_idx', scan_session='session') &
(experiment.Scan & 'aim="2pScan"').proj(stack_session='session'))
stack_rel = Registration & 'registration_method = 5'
heading = list(set(list(map_rel.heading.attributes) + list(stack_rel.heading.attributes)))
heading.remove('field')
area_keys = (dj.U(*heading, 'mask_method') & (map_rel * stack_rel * shared.AreaMaskMethod) & key).fetch('KEY')
fetch_str = ['x', 'y', 'um_width', 'um_height', 'px_width', 'px_height']
stack_rel = CorrectedStack.proj(*fetch_str, stack_session='session') & key
cent_x, cent_y, um_w, um_h, px_w, px_h = stack_rel.fetch1(*fetch_str)
# subtract edges so that all coordinates are relative to the field
stack_edges = np.array((cent_x - um_w / 2, cent_y - um_h / 2))
stack_px_dims = np.array((px_w, px_h))
stack_um_dims = np.array((um_w, um_h))
# 0.5 displacement returns the center of each pixel
stack_px_grid = np.meshgrid(*[np.arange(d) + 0.5 for d in stack_px_dims])
# for each area, transfer mask from all fields into the stack
area_masks = []
for area_key in area_keys:
mask_rel = anatomy.AreaMask & area_key
field_keys, masks = mask_rel.fetch('KEY', 'mask')
stack_masks = []
for field_key, field_mask in zip(field_keys, masks):
field_res = (meso.ScanInfo.Field & field_key).microns_per_pixel
grid_key = {**key, 'field': field_key['field']}
# fetch transformation grid using built in function
field2stack_um = (Registration & grid_key).get_grid(type='affine', desired_res=field_res)
field2stack_um = (field2stack_um[..., :2]).transpose([2, 0, 1])
# convert transformation grid into stack pixel space
field2stack_px = [(grid - edge) * px_per_um for grid, edge, px_per_um
in zip(field2stack_um, stack_edges, stack_px_dims / stack_um_dims)]
grid_locs = np.array([f2s.ravel() for f2s in field2stack_px]).T
grid_vals = field_mask.ravel()
grid_query = np.array([stack_grid.ravel() for stack_grid in stack_px_grid]).T
# griddata because scipy.interpolate.interp2d wasn't working for some reason
# linear because nearest neighbor doesn't handle nans at the edge of the image
stack_mask = griddata(grid_locs, grid_vals, grid_query, method='linear')
stack_mask = np.round(np.reshape(stack_mask, (px_h, px_w)))
stack_masks.append(stack_mask)
# flatten all masks for area
stack_masks = np.array(stack_masks)
stack_masks[np.isnan(stack_masks)] = 0
area_mask = np.max(stack_masks, axis=0)
# close gaps in mask with 100 um kernel
kernel_width = 100
kernel = np.ones(np.round(kernel_width * (stack_px_dims / stack_um_dims)).astype(int))
area_mask = cv2.morphologyEx(area_mask, cv2.MORPH_CLOSE, kernel)
area_masks.append(area_mask)
# locate areas where masks overlap and set to nan
overlap_locs = np.sum(area_masks, axis=0) > 1
# create reference map of non-overlapping area masks
mod_masks = np.stack(area_masks.copy())
mod_masks[:, overlap_locs] = np.nan
ref_mask = np.max([mm * (i + 1) for i, mm in enumerate(mod_masks)], axis=0)
# interpolate overlap pixels into reference mask
non_nan_idx = np.invert(np.isnan(ref_mask))
grid_locs = np.array([stack_grid[non_nan_idx].ravel() for stack_grid in stack_px_grid]).T
grid_vals = ref_mask[non_nan_idx].ravel()
grid_query = np.array([stack_grid[overlap_locs] for stack_grid in stack_px_grid]).T
mask_assignments = griddata(grid_locs, grid_vals, grid_query, method='nearest')
for loc, assignment in zip((np.array(grid_query) - 0.5).astype(int), mask_assignments):
mod_masks[:, loc[1], loc[0]] = 0
mod_masks[int(assignment - 1)][loc[1]][loc[0]] = 1
area_keys = [{**area_key,**key,'mask': mod_mask} for area_key, mod_mask in zip(area_keys, mod_masks)]
self.insert1(key)
self.Mask.insert(area_keys)
| lgpl-3.0 |
LiaoPan/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/scipy/signal/filter_design.py | 7 | 139256 | """Filter design.
"""
from __future__ import division, print_function, absolute_import
import math
import operator
import warnings
import numpy
import numpy as np
from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array,
mintypecode)
from numpy.polynomial.polynomial import polyval as npp_polyval
from scipy import special, optimize, fftpack
from scipy.special import comb, factorial
from scipy._lib._numpy_compat import polyvalfromroots
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
'sosfreqz', 'iirnotch', 'iirpeak']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N, kind='ba'):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system, where the coefficients
are ordered from highest to lowest degree. Or, the roots of the
transfer function numerator and denominator (i.e. zeroes and poles).
N : int
The length of the array to be computed.
kind : str {'ba', 'zp'}, optional
Specifies whether the numerator and denominator are specified by their
polynomial coefficients ('ba'), or their roots ('zp').
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
if kind == 'ba':
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
elif kind == 'zp':
ep = atleast_1d(den) + 0j
tz = atleast_1d(num) + 0j
else:
raise ValueError("input must be one of {'ba', 'zp'}")
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqs_zpk(z, p, k, worN=None):
"""
Compute frequency response of analog filter.
Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its
frequency response::
(jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1])
H(w) = k * ----------------------------------------
(jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1])
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqz : Compute the frequency response of a digital filter in TF form
freqz_zpk : Compute the frequency response of a digital filter in ZPK form
Notes
-----
.. versionadded: 0.19.0
Examples
--------
>>> from scipy.signal import freqs_zpk, iirfilter
>>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1',
... output='zpk')
>>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
k = np.asarray(k)
if k.size > 1:
raise ValueError('k must be a single scalar gain')
if worN is None:
w = findfreqs(z, p, 200, kind='zp')
elif isinstance(worN, int):
N = worN
w = findfreqs(z, p, N, kind='zp')
else:
w = worN
w = atleast_1d(w)
s = 1j * w
num = polyvalfromroots(s, z)
den = polyvalfromroots(s, p)
h = k * num/den
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + ... + b[M]e
H(e ) = ------ = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + ... + a[N]e
Parameters
----------
b : array_like
Numerator of a linear filter. If `b` has dimension greater than 1,
it is assumed that the coefficients are stored in the first dimension,
and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies
array must be compatible for broadcasting.
a : array_like
Denominator of a linear filter. If `b` has dimension greater than 1,
it is assumed that the coefficients are stored in the first dimension,
and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies
array must be compatible for broadcasting.
worN : {None, int, array_like}, optional
If None (default), then compute at 512 equally spaced frequencies.
If a single integer, then compute at that many frequencies. This is
a convenient alternative to::
np.linspace(0, 2*pi if whole else pi, N, endpoint=False)
Using a number that is fast for FFT computations can result in
faster computations (see Notes).
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz_zpk
sosfreqz
Notes
-----
Using Matplotlib's :func:`matplotlib.pyplot.plot` function as the callable
for `plot` produces unexpected results, as this plots the real part of the
complex transfer function, not the magnitude.
Try ``lambda w, h: plot(w, np.abs(h))``.
A direct computation via (R)FFT is used to compute the frequency response
when the following conditions are met:
1. An integer value is given for `worN`.
2. `worN` is fast to compute via FFT (i.e.,
`next_fast_len(worN) <scipy.fftpack.next_fast_len>` equals `worN`).
3. The denominator coefficients are a single value (``a.shape[0] == 1``).
4. `worN` is at least as long as the numerator coefficients
(``worN >= b.shape[0]``).
5. If ``b.ndim > 1``, then ``b.shape[-1] == 1``.
For long FIR filters, the FFT approach can have lower error and be much
faster than the equivalent direct polynomial calculation.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
Broadcasting Examples
Suppose we have two FIR filters whose coefficients are stored in the
rows of an array with shape (2, 25). For this demonstration we'll
use random data:
>>> np.random.seed(42)
>>> b = np.random.rand(2, 25)
To compute the frequency response for these two filters with one call
to `freqz`, we must pass in ``b.T``, because `freqz` expects the first
axis to hold the coefficients. We must then extend the shape with a
trivial dimension of length 1 to allow broadcasting with the array
of frequencies. That is, we pass in ``b.T[..., np.newaxis]``, which has
shape (25, 2, 1):
>>> w, h = signal.freqz(b.T[..., np.newaxis], worN=1024)
>>> w.shape
(1024,)
>>> h.shape
(2, 1024)
Now suppose we have two transfer functions, with the same numerator
coefficients ``b = [0.5, 0.5]``. The coefficients for the two denominators
are stored in the first dimension of the two-dimensional array `a`::
a = [ 1 1 ]
[ -0.25, -0.5 ]
>>> b = np.array([0.5, 0.5])
>>> a = np.array([[1, 1], [-0.25, -0.5]])
Only `a` is more than one-dimensional. To make it compatible for
broadcasting with the frequencies, we extend it with a trivial dimension
in the call to `freqz`:
>>> w, h = signal.freqz(b, a[..., np.newaxis], worN=1024)
>>> w.shape
(1024,)
>>> h.shape
(2, 1024)
"""
b = atleast_1d(b)
a = atleast_1d(a)
if worN is None:
worN = 512
h = None
try:
worN = operator.index(worN)
except TypeError: # not int-like
w = atleast_1d(worN)
else:
if worN < 0:
raise ValueError('worN must be nonnegative, got %s' % (worN,))
lastpoint = 2 * pi if whole else pi
w = np.linspace(0, lastpoint, worN, endpoint=False)
if (a.size == 1 and worN >= b.shape[0] and
fftpack.next_fast_len(worN) == worN and
(b.ndim == 1 or (b.shape[-1] == 1))):
# if worN is fast, 2 * worN will be fast, too, so no need to check
n_fft = worN if whole else worN * 2
if np.isrealobj(b) and np.isrealobj(a):
fft_func = np.fft.rfft
else:
fft_func = fftpack.fft
h = fft_func(b, n=n_fft, axis=0)[:worN]
h /= a
if fft_func is np.fft.rfft and whole:
# exclude DC and maybe Nyquist (no need to use axis_reverse
# here because we can build reversal with the truncation)
stop = -1 if n_fft % 2 == 1 else -2
h_flip = slice(stop, 0, -1)
h = np.concatenate((h, h[h_flip].conj()))
if b.ndim > 1:
# Last axis of h has length 1, so drop it.
h = h[..., 0]
# Rotate the first axis of h to the end.
h = np.rollaxis(h, 0, h.ndim)
del worN
if h is None: # still need to compute using freqs w
zm1 = exp(-1j * w)
h = (npp_polyval(zm1, b, tensor=False) /
npp_polyval(zm1, a, tensor=False))
if plot is not None:
plot(w, h)
return w, h
def freqz_zpk(z, p, k, worN=None, whole=False):
r"""
Compute the frequency response of a digital filter in ZPK form.
Given the Zeros, Poles and Gain of a digital filter, compute its frequency
response::
:math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])`
where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are
the `poles`.
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If single integer (default 512, same as None), then compute at `worN`
frequencies equally spaced around the unit circle. If an array_like,
compute the response at the frequencies given (in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqs_zpk : Compute the frequency response of an analog filter in ZPK form
freqz : Compute the frequency response of a digital filter in TF form
Notes
-----
.. versionadded: 0.19.0
Examples
--------
>>> from scipy import signal
>>> z, p, k = signal.butter(4, 0.2, output='zpk')
>>> w, h = signal.freqz_zpk(z, p, k)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
z, p = map(atleast_1d, (z, p))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
w = numpy.linspace(0, lastpoint, 512, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(1j * w)
h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def sosfreqz(sos, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
Using a number that is fast for FFT computations can result in
faster computations (see Notes of `freqz`).
If an array_like, compute the response at the frequencies given (in
radians/sample; must be 1D).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.sosfreqz(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... [r'$-\\pi$', r'$-\\pi/2$', '0', r'$\\pi/2$', r'$\\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for row in sos:
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole)
h *= rowh
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print(zc)
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print(zr)
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _align_nums(nums):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2d
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = asarray(nums)
if not np.issubdtype(nums.dtype, np.number):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max(num.size for num in nums)
# pre-allocate
aligned_nums = np.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2d array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1d.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1d
array. A 2d-array if the input `num` is a 2d array.
den: 1d-array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
"""
num, den = b, a
den = np.atleast_1d(den)
num = np.atleast_2d(_align_nums(num))
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if np.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for col in num.T:
if np.allclose(col, 0, atol=1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn <= 0) or numpy.any(Wn >= 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 < Wn < 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2.0*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase'):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Magnitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_'+norm)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
# TODO: Make this a real public function scipy.misc.ff
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498 , and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2*n - k, n)
den = 2**(n - k) * factorial(k, exact=True)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return asarray([-1+0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n+1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j*y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1/(x[k] - x[k+1:]))
beta[k] += np.sum(1/(x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N+0.5, 1/x)
# First derivative of above
def fp(x):
return (special.kve(N-0.5, 1/x)/(2*x**2) -
special.kve(N+0.5, 1/x)/(x**2) +
special.kve(N+1.5, 1/x)/(2*x**2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = asarray(p, dtype=complex)
def G(w):
"""
Gain of filter
"""
return abs(k / prod(1j*w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1/np.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase'):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g. rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g. 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
Returns
-------
z : ndarray
Zeros of the transfer function. Is always an empty array.
p : ndarray
Poles of the transfer function.
k : scalar
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", :arXiv:`1105.0957`.
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
:DOI:`10.1145/363067.363115`
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998, http://www.rane.com/note147.html
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1/_bessel_zeros(N)
a_last = _falling_factorial(2*N, N) // 2**N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor**-N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10**(-math.log10(a_last)/N)
k = 1
else:
raise ValueError('normalization not understood')
return asarray([]), asarray(p, dtype=complex), float(k)
def iirnotch(w0, Q):
"""
Design second-order IIR notch digital filter.
A notch filter is a band-stop filter with a narrow bandwidth
(high quality factor). It rejects a narrow frequency band and
leaves the rest of the spectrum little changed.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirpeak
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the 60Hz component from a
signal sampled at 200Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 60.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design notch filter
>>> b, a = signal.iirnotch(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-25, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "notch")
def iirpeak(w0, Q):
"""
Design second-order IIR peak (resonant) digital filter.
A peak filter is a band-pass filter with a narrow bandwidth
(high quality factor). It rejects components outside a narrow
frequency band.
Parameters
----------
w0 : float
Normalized frequency to be retained in a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding
to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
peak filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirnotch
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the frequencies other than the 300Hz
component from a signal sampled at 1000Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 300.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design peak filter
>>> b, a = signal.iirpeak(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-50, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "peak")
def _design_notch_peak_filter(w0, Q, ftype):
"""
Design notch or peak digital filter.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : str
The type of IIR filter to design:
- notch filter : ``notch``
- peak filter : ``peak``
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
"""
# Guarantee that the inputs are floats
w0 = float(w0)
Q = float(Q)
# Checks if w0 is within the range
if w0 > 1.0 or w0 < 0.0:
raise ValueError("w0 should be such that 0 < w0 < 1")
# Get bandwidth
bw = w0/Q
# Normalize inputs
bw = bw*np.pi
w0 = w0*np.pi
# Compute -3dB atenuation
gb = 1/np.sqrt(2)
if ftype == "notch":
# Compute beta: formula 11.3.4 (p.575) from reference [1]
beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0)
elif ftype == "peak":
# Compute beta: formula 11.3.19 (p.579) from reference [1]
beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0)
else:
raise ValueError("Unknown ftype.")
# Compute gain: formula 11.3.6 (p.575) from reference [1]
gain = 1.0/(1.0+beta)
# Compute numerator b and denominator a
# formulas 11.3.7 (p.575) and 11.3.21 (p.579)
# from reference [1]
if ftype == "notch":
b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0])
else:
b = (1.0-gain)*np.array([1.0, 0.0, -1.0])
a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)])
return b, a
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
| mit |
pianomania/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
merenlab/anvio | anvio/taxonomyops/__init__.py | 1 | 79041 | # -*- coding: utf-8
# pylint: disable=line-too-long
"""
Common functions and classes for SCG/TRNA taxonomy.
"""
import os
import sys
import gzip
import copy
import hashlib
import argparse
import numpy as np
import pandas as pd
import multiprocessing
from collections import OrderedDict, Counter
import anvio
import anvio.tables as t
import anvio.utils as utils
import anvio.hmmops as hmmops
import anvio.terminal as terminal
import anvio.constants as constants
import anvio.filesnpaths as filesnpaths
import anvio.ccollections as ccollections
from anvio.errors import ConfigError
from anvio.drivers.blast import BLAST
from anvio.drivers.diamond import Diamond
from anvio.tables.scgtaxonomy import TableForSCGTaxonomy
from anvio.tables.trnataxonomy import TableForTRNATaxonomy
from anvio.tables.miscdata import TableForLayerAdditionalData
from anvio.dbops import ContigsSuperclass, ContigsDatabase, ProfileSuperclass, ProfileDatabase
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "A. Murat Eren"
__email__ = "[email protected]"
run_quiet = terminal.Run(log_file_path=None, verbose=False)
progress_quiet = terminal.Progress(verbose=False)
pp = terminal.pretty_print
HASH = lambda d: str(hashlib.sha224(''.join([str(d[level]) for level in constants.levels_of_taxonomy]).encode('utf-8')).hexdigest()[0:8])
class TerminologyHelper(object):
def __init__(self):
proper_foci = ['trnas', 'scgs']
if not hasattr(self.ctx, 'focus'):
raise ConfigError("You are lost :/ Any class initializes from shared taxonomy classes "
"must have a `self.ctx.focus` variable with either of these values: %s." % ', '.join(proper_foci))
if self.ctx.focus not in proper_foci:
raise ConfigError("Unknown focus %s :/" % (self.ctx.focus))
self.scgs_focus = self.ctx.focus == 'scgs'
self.trna_focus = self.ctx.focus == 'trnas'
C = lambda x, y: x if self.scgs_focus else y
self._DELIVERABLE = C("SCG taxonomy", "tRNA taxonomy")
self._ITEM = C("SCG", 'anticodon')
self._ITEMS = C("SCGs", 'anticodons')
self._SUPPORTING_ITEMS = C("supporting_scgs", 'supporting_anticodons')
self._TOTAL_ITEMS = C("total_scgs", 'total_anticodons')
self._SOURCE_DATA = C("single-copy core gene", "tRNA gene")
self._SETUP_PROGRAM = C("anvi-setup-scg-taxonomy", "anvi-setup-trna-taxonomy")
self._COMPUTE_COVS_FLAG = C("--compute-scg-coverages", "--compute-anticodon-coverages")
self._ITEM_FOR_METAGENOME_MODE_PARAM = C("--scg-name-for-metagenome-mode", "--anticodon-for-metagenome-mode")
self._VARIABLE_NAME_IN_TABLE = C("gene_name", "anticodon")
class AccessionIdToTaxonomy(object):
"""A base classs that populates `self.accession_to_taxonomy_dict`"""
def __init__(self):
self.accession_to_taxonomy_dict = {}
if not os.path.exists(self.accession_to_taxonomy_file_path):
return None
letter_to_level = dict([(l.split('_')[1][0], l) for l in self.levels_of_taxonomy])
self.progress.new("Reading the accession to taxonomy file")
self.progress.update('...')
with gzip.open(self.accession_to_taxonomy_file_path, 'rb') as taxonomy_file:
for line in taxonomy_file.readlines():
line = line.decode('utf-8')
if line.startswith('#'):
continue
accession, taxonomy_text = line.strip('\n').split('\t')
# taxonomy_text kinda looks like these:
#
# d__Bacteria;p__Proteobacteria;c__Gammaproteobacteria;o__Burkholderiales;f__Burkholderiaceae;g__Alcaligenes;s__Alcaligenes faecalis_C
# d__Bacteria;p__Firmicutes;c__Bacilli;o__Lactobacillales;f__Enterococcaceae;g__Enterococcus_B;s__Enterococcus_B faecalis
# d__Bacteria;p__Proteobacteria;c__Gammaproteobacteria;o__Pseudomonadales;f__Moraxellaceae;g__Acinetobacter;s__Acinetobacter sp1
# d__Bacteria;p__Firmicutes;c__Bacilli;o__Bacillales;f__Bacillaceae_G;g__Bacillus_A;s__Bacillus_A cereus_AU
# d__Bacteria;p__Firmicutes_A;c__Clostridia;o__Tissierellales;f__Helcococcaceae;g__Finegoldia;s__Finegoldia magna_H
d = {}
for letter, taxon in [e.split('__', 1) for e in taxonomy_text.split(';')]:
if letter in letter_to_level:
# NOTE: This is VERY important. Here we are basically removing subclades GTDB defines for
# simplicity. We may have to change this behavior later. So basically, Enterococcus_B will
# become Enterococcus
if '_' in taxon:
if letter != 's':
d[letter_to_level[letter]] = '_'.join(taxon.split('_')[:-1])
else:
# special treatment for species level taxonomy string.
# the genus is copied for the species level taxonomy, such as this one, 'Bacillus_A cereus', or
# species itmay have a subclade, such as this one, 'Corynebacterium aurimucosum_C', so we
# neeed to make sure the subclades are removed from all words in the species level
# taxonomy string.
d[letter_to_level[letter]] = ' '.join(['_'.join(word.split('_')[:-1]) if '_' in word else word for word in taxon.split(' ')])
else:
d[letter_to_level[letter]] = taxon
else:
self.run.warning("Some weird letter found in '%s' :(" % taxonomy_text)
self.accession_to_taxonomy_dict[accession] = d
# let's add one more accession for all those missing accessions
self.accession_to_taxonomy_dict['unknown_accession'] = dict([(taxon, None) for taxon in self.levels_of_taxonomy])
self.progress.end()
class TaxonomyEstimatorSingle(TerminologyHelper):
def __init__(self, skip_init=False):
TerminologyHelper.__init__(self)
# update variables
# FIXME: there is a better way to do it.
self.ctx.items = self.ctx.SCGs if self.scgs_focus else self.ctx.anticodons
self.report_item_frequencies_path = self.report_scg_frequencies_path if self.scgs_focus else self.report_anticodon_frequencies_path
self.compute_item_coverages = self.compute_scg_coverages if self.scgs_focus else self.compute_anticodon_coverages
self.item_name_for_metagenome_mode = self.scg_name_for_metagenome_mode if self.scgs_focus else self.anticodon_for_metagenome_mode
self.per_item_output_file = self.per_scg_output_file if self.scgs_focus else self.per_anticodon_output_file
# these dictionaries that will be initiated later
self.contigs_db_project_name = "Unknown"
self.item_name_to_gene_caller_id_dict = {}
self.frequency_of_items_with_taxonomy = {}
self.gene_callers_id_to_item_taxonomy_dict = {}
self.split_name_to_gene_caller_ids_dict = {}
self.gene_callers_id_to_split_name_dict = {}
self.sample_names_in_profile_db = None
self.initialized = False
self.run.info('Contigs DB', self.contigs_db_path)
self.run.info('Profile DB', self.profile_db_path)
self.run.info('Metagenome mode', self.metagenome_mode)
if self.metagenome_mode:
self.run.info(f'{self._ITEM} for metagenome', self.item_name_for_metagenome_mode)
if not skip_init:
self.init()
def init(self):
self.init_items_data()
if self.report_item_frequencies_path:
with open(self.report_item_frequencies_path, 'w') as output:
for item_name, frequency in self.frequency_of_items_with_taxonomy.items():
output.write("%s\t%d\n" % (item_name, frequency))
self.run.info(f'{self._ITEM} frequencies in contigs db', self.report_item_frequencies_path, nl_before=1)
sys.exit()
if self.profile_db_path:
self.sample_names_in_profile_db = ProfileDatabase(self.profile_db_path).samples
self.initialized = True
def init_items_data(self):
"""Initialize {self._ITEM} taxonomy for the entire contigs database"""
if not self.contigs_db_path:
return None
self.progress.new('Initializing')
self.progress.update(f'{self._ITEM} taxonomy dictionary')
for item_name in self.ctx.items:
self.item_name_to_gene_caller_id_dict[item_name] = set([])
if self.scgs_focus:
anvio_taxonomy_table_name = t.scg_taxonomy_table_name
elif self.trna_focus:
anvio_taxonomy_table_name = t.trna_taxonomy_table_name
else:
anvio_taxonomy_table_name = None
contigs_db = ContigsDatabase(self.contigs_db_path, run=self.run, progress=self.progress)
self.contigs_db_project_name = contigs_db.meta['project_name']
anvio_taxonomy_table = contigs_db.db.get_table_as_dict(anvio_taxonomy_table_name)
genes_in_splits = contigs_db.db.get_some_columns_from_table(t.genes_in_splits_table_name, "split, gene_callers_id")
min_contig_length_in_contigs_db = contigs_db.db.get_max_value_in_column(t.contigs_info_table_name, "length", return_min_instead=True)
contigs_db.disconnect()
# this is important. before we begin, we need to filter out gene caller ids and splits from main dictionaries if
# they shouldn't be there. read the warning below to see the utility of this step.
if self.profile_db_path and self.compute_item_coverages:
split_names_in_profile_db = set(utils.get_all_item_names_from_the_database(self.profile_db_path))
split_names_in_contigs_db = set([tpl[0] for tpl in genes_in_splits])
splits_missing_in_profile_db = split_names_in_contigs_db.difference(split_names_in_profile_db)
min_contig_length_in_profile_db = ProfileDatabase(self.profile_db_path).meta['min_contig_length']
if len(splits_missing_in_profile_db):
self.progress.reset()
self.run.warning(f"Please note that anvi'o found {pp(len(split_names_in_contigs_db))} splits in your contigs database. "
f"But only {pp(len(split_names_in_profile_db))} of them appeared ot be in the profile database. As a "
f"result, anvi'o will now remove the {pp(len(splits_missing_in_profile_db))} splits that occur only in "
f"the contigs db from all downstream analyses here (if you didn't use the flag `{self._COMPUTE_COVS_FLAG}` "
f"this wouldn't have been necessary, but with the current settings this is really the best for everyone). "
f"Where is this difference coming from though? Well. This is often the case because the 'minimum contig "
f"length parameter' set during the `anvi-profile` step can exclude many contigs from downstream analyses "
f"(often for good reasons, too). For instance, in your case the minimum contig length goes as low as "
f"{pp(min_contig_length_in_contigs_db)} nts in your contigs database. Yet, the minimum contig length set "
f"in the profile databaes is {pp(min_contig_length_in_profile_db)} nts. Hence the difference. Anvi'o "
f"hopes that this explaines some things.")
self.progress.update("Removing %s splits missing form the profile db" % pp(len(splits_missing_in_profile_db)))
genes_in_splits = [tpl for tpl in genes_in_splits if tpl[0] not in splits_missing_in_profile_db]
# so now we know the final list of split names and gene caller ids as they are stored in the updated
# `genes_in_splits` variable. time to clean up the `anvio_taxonomy_table` dictionary as well.
final_set_of_gene_caller_ids = set([tpl[1] for tpl in genes_in_splits if tpl[0] not in splits_missing_in_profile_db])
entry_ids_to_remove = [entry for entry in anvio_taxonomy_table if anvio_taxonomy_table[entry]['gene_callers_id'] not in final_set_of_gene_caller_ids]
[anvio_taxonomy_table.pop(e) for e in entry_ids_to_remove]
# NOTE: This will modify the taxonomy strings read from the contigs database. see the
# function header for `trim_taxonomy_dict_entry` for more information.
if self.simplify_taxonomy_information:
self.progress.update(f'{self._DELIVERABLE} dicts ... trimming main')
for key in anvio_taxonomy_table:
anvio_taxonomy_table[key] = self.trim_taxonomy_dict_entry(anvio_taxonomy_table[key])
self.progress.update(f'{self._DELIVERABLE} dicts ... building g->tax')
for entry in anvio_taxonomy_table.values():
gene_callers_id = entry['gene_callers_id']
self.gene_callers_id_to_item_taxonomy_dict[gene_callers_id] = entry
self.progress.update(f'{self._DELIVERABLE} dicts ... building tax->g')
for entry in self.gene_callers_id_to_item_taxonomy_dict.values():
item_gene_name = entry[self._VARIABLE_NAME_IN_TABLE]
gene_callers_id = entry['gene_callers_id']
self.item_name_to_gene_caller_id_dict[item_gene_name].add(gene_callers_id)
self.progress.update(f'{self._DELIVERABLE} dicts ... building s->g + g->s')
for split_name, gene_callers_id in genes_in_splits:
if gene_callers_id not in self.gene_callers_id_to_item_taxonomy_dict:
continue
if split_name not in self.split_name_to_gene_caller_ids_dict:
self.split_name_to_gene_caller_ids_dict[split_name] = set()
self.split_name_to_gene_caller_ids_dict[split_name].add(gene_callers_id)
self.gene_callers_id_to_split_name_dict[gene_callers_id] = split_name
self.progress.end()
self.frequency_of_items_with_taxonomy = OrderedDict(sorted([(g, len(self.item_name_to_gene_caller_id_dict[g])) for g in self.item_name_to_gene_caller_id_dict], key = lambda x: x[1], reverse=True))
if self.metagenome_mode or anvio.DEBUG:
self.run.info_single(f"A total of %s {self._SOURCE_DATA}s with taxonomic affiliations were successfully initialized "
f"from the contigs database 🎉 Following shows the frequency of these {self._ITEMS}: %s." % \
(pp(len(self.gene_callers_id_to_item_taxonomy_dict)),
', '.join(["%s (%d)" % (g, self.frequency_of_items_with_taxonomy[g]) \
for g in self.frequency_of_items_with_taxonomy])), nl_before=1)
def trim_taxonomy_dict_entry(self, taxonomy_dict_entry):
""" Remove excess information from taxonomy information.
The purpose of this is to give an option to the user to simplify GTDB names, that
will have a text information for every level of taxonomy depending on what branches
genomes fit, but it is not always helpful to the user. Such as this one:
t_domain Bacteria
t_phylum Firmicutes
t_class Clostridia
t_order Monoglobales
t_family UBA1381
t_genus CAG-41
t_species CAG-41 sp900066215
in this case the user may want to get this instead:
t_domain Bacteria
t_phylum Firmicutes
t_class Clostridia
t_order Monoglobales
t_family None
t_genus None
t_species None
So this function will take a taxonomy dict entry , and will return a simplified
version of it if trimming is applicable.
Paremeters
==========
taxonomy_dict_entry: dict
a dictionary that contains keys for all taxon names. such as this one:
{[...],
't_domain': 'Bacteria',
't_phylum': 'Firmicutes',
't_class': 'Clostridia',
't_order': 'Oscillospirales',
't_family': 'Acutalibacteraceae',
't_genus': 'Ruminococcus',
't_species': 'Ruminococcus sp002491825'
}
"""
# for optimization, these letters should all have three characters. if that
# behavior needs to change, the code down below must be updates. the purpose
# of this is not to have a comprehensive list of EVERY single GTDB-specific
# clade designations, but to make sure teh vast majority of names are covered.
GTDB_specific_clade_prefixes = ['CAG', 'GCA', 'UBA', 'FUL', 'PAL', '2-0', 'Fen', 'RF3', 'TAN']
taxonomic_levels_to_nullify = []
for taxonomic_level in self.ctx.levels_of_taxonomy[::-1]:
if not taxonomy_dict_entry[taxonomic_level]:
continue
if taxonomic_level == 't_species':
species_name = taxonomy_dict_entry[taxonomic_level].split(' ')[1]
try:
int(species_name[2])
taxonomic_levels_to_nullify.append(taxonomic_level)
except:
None
else:
if taxonomy_dict_entry[taxonomic_level][0:3] in GTDB_specific_clade_prefixes:
taxonomic_levels_to_nullify.append(taxonomic_level)
# this is the best way to make sure we are not going to nullify order, but leave behind a family name.
if taxonomic_levels_to_nullify:
level_below_which_to_nullify = min([self.ctx.levels_of_taxonomy.index(l) for l in taxonomic_levels_to_nullify])
for taxonomic_level in self.ctx.levels_of_taxonomy[level_below_which_to_nullify:]:
taxonomy_dict_entry[taxonomic_level] = None
return taxonomy_dict_entry
def get_blank_hit_template_dict(self):
hit = {}
for level in self.ctx.levels_of_taxonomy[::-1]:
hit[level] = None
return hit
def get_consensus_taxonomy(self, items_taxonomy_dict):
"""Takes in a items_taxonomy_dict, returns a final taxonomic string that summarize all"""
if not len(items_taxonomy_dict):
return dict([(l, None) for l in self.ctx.levels_of_taxonomy])
pd.set_option('mode.chained_assignment', None)
item_hits = list([v for v in items_taxonomy_dict.values() if v['t_domain']])
if not len(item_hits):
return self.get_blank_hit_template_dict()
df = pd.DataFrame.from_records(item_hits)
# we have already stored a unique hash for taxonomy strings. here we will figure out most frequent
# hash values in the df
tax_hash_counts = df['tax_hash'].value_counts()
tax_hash_df = tax_hash_counts.rename_axis('tax_hash').reset_index(name='frequency')
max_frequency = tax_hash_df.frequency.max()
tax_hash_df_most_frequent = tax_hash_df[tax_hash_df.frequency == max_frequency]
if len(tax_hash_df_most_frequent.index) == 1:
# if there is only a single winner, we're golden
winner_tax_hash = tax_hash_df_most_frequent.tax_hash[0]
# get the consensus hit based on the winner hash
consensus_hit = df[df.tax_hash == winner_tax_hash].head(1)
# turn it into a Python dict before returning
return consensus_hit.to_dict('records')[0]
else:
# if there are competing hashes, we need to be more careful to decide
# which taxonomic level should we use to cut things off.
consensus_hit = self.get_blank_hit_template_dict()
for level in self.ctx.levels_of_taxonomy[::-1]:
if len(df[level].unique()) == 1:
consensus_hit[level] = df[level].unique()[0]
return consensus_hit
def print_taxonomy_hits_in_splits(self, hits, bin_name=None):
self.progress.reset()
self.run.warning(None, header='Hits for %s' % (bin_name if bin_name else "a bunch of splits"), lc="green")
if len(hits):
if self.scgs_focus:
header = [self._ITEM, 'gene', 'pct id', 'taxonomy']
field_names = [self._VARIABLE_NAME_IN_TABLE, 'percent_identity', 'gene_callers_id']
else:
header = [self._ITEM, 'amino_acid', 'gene', 'pct id', 'taxonomy']
field_names = [self._VARIABLE_NAME_IN_TABLE, 'amino_acid', 'percent_identity', 'gene_callers_id']
table = []
for hit in hits:
taxon_text = ' / '.join([hit[l] if hit[l] else '' for l in self.ctx.levels_of_taxonomy])
# if the hit we are working on sent here as 'consensus', we will color it up a bit so it shows up
# more clearly in the debug output.
if hit[self._VARIABLE_NAME_IN_TABLE] == 'CONSENSUS':
taxon_text = terminal.c(taxon_text, color='red')
for field_name in field_names:
hit[field_name] = terminal.c(hit[field_name], color='red')
if self.scgs_focus:
table.append([hit[self._VARIABLE_NAME_IN_TABLE], str(hit['gene_callers_id']), str(hit['percent_identity']), taxon_text])
else:
table.append([hit[self._VARIABLE_NAME_IN_TABLE], str(hit['amino_acid']), str(hit['gene_callers_id']), str(hit['percent_identity']), taxon_text])
anvio.TABULATE(table, header)
else:
self.run.info_single("No hits :/")
def get_taxonomy_dict(self, gene_caller_ids, bin_name=None):
items_taxonomy_dict = {}
improper_gene_caller_ids = [g for g in gene_caller_ids if g not in self.gene_callers_id_to_item_taxonomy_dict]
if improper_gene_caller_ids:
raise ConfigError("Something weird is going on. Somehow anvi'o has a bunch of gene caller ids for which it is "
"supposed to estimate taxonomy. However, %d of them do not occur in a key dictionary. The code "
"here does not know what to suggest :( Apologies." % len(improper_gene_caller_ids))
for gene_callers_id in gene_caller_ids:
items_taxonomy_dict[gene_callers_id] = self.gene_callers_id_to_item_taxonomy_dict[gene_callers_id]
items_taxonomy_dict[gene_callers_id]["tax_hash"] = HASH(self.gene_callers_id_to_item_taxonomy_dict[gene_callers_id])
return items_taxonomy_dict
def estimate_for_list_of_splits(self, split_names=None, bin_name=None):
"""Estimate {self._ITEM} taxonomy for a bunch of splits that belong to a single population.
The purpose of this function is to to do critical things: identify genes we use for taxonomy in `split_names`,
and generate a consensus taxonomy with the assumption that these are coming from splits that represents a
single population.
It will return a dictionary with multiple items, including a dictionary that contains the final consensus\
taxonomy, another one that includes every {self._ITEM} and their raw associations with taxon names (from which the\
consensus taxonomy was computed), as well as information about how many {self._ITEMS} were analyzed and supported the\
consesnus.
"""
if self.metagenome_mode:
raise ConfigError("Someone is attempting to estimate taxonomy for a set of splits using a class inherited in "
"`metagenome mode`. If you are a programmer please note that it is best to use the member "
"function `estimate` directly.")
consensus_taxonomy = None
gene_caller_ids_of_interest = self.get_gene_caller_ids_for_splits(split_names)
items_taxonomy_dict = self.get_taxonomy_dict(gene_caller_ids_of_interest)
try:
consensus_taxonomy = self.get_consensus_taxonomy(items_taxonomy_dict)
consensus_taxonomy[self._VARIABLE_NAME_IN_TABLE] = 'CONSENSUS'
consensus_taxonomy['percent_identity'] = '--'
consensus_taxonomy['gene_callers_id'] = '--'
except Exception as e:
self.print_taxonomy_hits_in_splits(list(items_taxonomy_dict.values()))
raise ConfigError(f"While trying to sort out the consensus taxonomy for %s anvi'o failed :( The list of {self._ITEM} taxon hits that "
f"caused the failure is printed in your terminal. But the actual error message that came from the depths "
f"of the codebase was this: '%s'." % (('the bin "%s"' % bin_name) if bin_name else 'a bunch of splits', e))
if anvio.DEBUG:
self.print_taxonomy_hits_in_splits(list(items_taxonomy_dict.values()) + [consensus_taxonomy], bin_name)
# set some useful information. `total_items` is the number of items with taxonomy found in the collection of splits. the
# `supporting_items` shows how many of them supports the consensus taxonomy fully
total_items = len(items_taxonomy_dict)
supporting_items = 0
consensus_taxonomy_levels_occupied = [level for level in self.ctx.levels_of_taxonomy if consensus_taxonomy[level]]
consensus_taxonomy_str = ' / '.join([consensus_taxonomy[level] for level in consensus_taxonomy_levels_occupied])
for item_taxonomy_hit in items_taxonomy_dict.values():
item_taxonomy_hit_str = ' / '.join([str(item_taxonomy_hit[level]) for level in consensus_taxonomy_levels_occupied])
if item_taxonomy_hit_str == consensus_taxonomy_str:
item_taxonomy_hit['supporting_consensus'] = True
supporting_items += 1
else:
item_taxonomy_hit['supporting_consensus'] = False
return {'consensus_taxonomy': consensus_taxonomy,
self._ITEMS.lower(): items_taxonomy_dict,
self._TOTAL_ITEMS: total_items,
self._SUPPORTING_ITEMS: supporting_items,
'metagenome_mode': False}
def estimate_for_bins_in_collection(self):
bins_taxonomy_dict = {}
bin_name_to_split_names_dict = ccollections.GetSplitNamesInBins(self.args).get_dict()
self.run.info_single("%s split names associated with %s bins of in collection '%s' have been "
"successfully recovered 🎊" % (pp(sum([len(v) for v in bin_name_to_split_names_dict.values()])),
pp(len(bin_name_to_split_names_dict)),
self.collection_name), nl_before=1)
for bin_name in bin_name_to_split_names_dict:
split_names = bin_name_to_split_names_dict[bin_name]
bins_taxonomy_dict[bin_name] = self.estimate_for_list_of_splits(split_names, bin_name)
return bins_taxonomy_dict
def estimate_for_contigs_db_for_genome(self):
contigs_db_taxonomy_dict = {}
item_frequencies = self.frequency_of_items_with_taxonomy.values()
if len([sf for sf in item_frequencies if sf > 1]) * 100 / len(item_frequencies) > 20:
if self.scgs_focus:
if self.just_do_it:
self.run.warning("Because you asked anvi'o to just do it, it will do it, but you seem to have too much contamination "
"in this contigs database for it to represent a genome. So probably taxonomy estimations are all "
"garbage, but hey, at least it runs?")
else:
raise ConfigError("Because you haven't used the `--metagenome-mode` flag, anvi'o was trying to treat your contigs "
"database as a genome. But there seems to be too much redundancy of single-copy core genes in this "
"contigs database to assign taxonomy with any confidence :/ A more proper way to do this is to use the "
"`--metagenome-mode` flag. Or you can also tell anvi'o to `--just-do-it`. It is your computer after "
"all :( But you should still be aware that in that case you would likely get a completely irrelevant "
"answer from this program.")
elif self.trna_focus:
reminder = ("Please note that since you haven't used the `--metagenome-mode` flag, anvi'o will treat this contigs "
"database as a genome and not a metagenome and will try to report a 'consensus' taxonomy based on all "
"the tRNA seqeunces and their taxonomic affiliations. ")
if not self.per_item_output_file:
reminder += ("Since this will collapse all anticodons into a single result, you may want to use the flag "
"`--per-anticodon-output-file` to your command line if you would like to see individual "
"anticodons and their taxonomy for your genome.")
self.run.warning(reminder)
splits_in_contigs_database = self.split_name_to_gene_caller_ids_dict.keys()
contigs_db_taxonomy_dict[self.contigs_db_project_name] = self.estimate_for_list_of_splits(split_names=splits_in_contigs_database,
bin_name=self.contigs_db_project_name)
return contigs_db_taxonomy_dict
def estimate_for_contigs_db_for_metagenome(self):
"""Treat a given contigs database as a metagenome.
This function deserves some attention. It relies on a single SCG or anticodon to estimate the composition of a metagenome.
For instance, its sister function, `estimate_for_contigs_db_for_genome`, works with a list of splits that are
assumed to belong to the same genome. In which case a consensus taxonomy learned from all {self._ITEMS} is most
appropriate. In this case, however, we don't know which split will go together, hence, we can't pull together
{self._ITEMS} to learn a consensus taxonomy for independent populations in the metagenome. The best we can do is to stick
with a single {self._ITEM} with the hope that (1) it will cut through as many populations as possible and (2) will have
reasonable power to resolve taxonomy all by itself. These independent assumptions will both work in some cases
and both fail in others.
"""
# we first need to decide which {self._ITEM} we should use to survey taxonomy
most_frequent_item = next(iter(self.frequency_of_items_with_taxonomy))
if self.item_name_for_metagenome_mode:
frequency_of_user_chosen_item = self.frequency_of_items_with_taxonomy[self.item_name_for_metagenome_mode]
frequency_of_most_frequent_item = self.frequency_of_items_with_taxonomy[most_frequent_item]
if frequency_of_user_chosen_item < frequency_of_most_frequent_item:
additional_note = f" And just so you know, there is another {self._ITEM} that was observed more times (i.e., \
{most_frequent_item}; {frequency_of_most_frequent_item} times) in this metagenome compared \
to yours (i.e., {frequency_of_most_frequent_item} times). You're the boss, of course."
else:
additional_note = ""
self.run.warning(f"As per your request anvi'o set '{self.item_name_for_metagenome_mode}' to be THE {self._SOURCE_DATA} \
to survey your metagenome for its taxonomic composition.{additional_note}")
else:
self.item_name_for_metagenome_mode = most_frequent_item
self.run.warning(f"Anvi'o automatically set '{self.item_name_for_metagenome_mode}' to be THE {self._SOURCE_DATA} to "
f"survey your metagenome for its taxonomic composition. If you are not happy with that, you could "
f"change it with the parameter `{self._ITEM_FOR_METAGENOME_MODE_PARAM}`.")
gene_caller_ids_of_interest = self.item_name_to_gene_caller_id_dict[self.item_name_for_metagenome_mode]
items_taxonomy_dict = self.get_taxonomy_dict(gene_caller_ids=gene_caller_ids_of_interest,
bin_name=self.contigs_db_project_name)
return {self.contigs_db_project_name: {self._ITEMS.lower(): items_taxonomy_dict,
'metagenome_mode': True}}
def get_items_taxonomy_super_dict(self):
"""Function that returns the `items_taxonomy_super_dict` for SCGs or anticodons.
`items_taxonomy_super_dict` contains a wealth of information regarding samples, genes,
gene taxonomic affiliations, consensus taxonomy, and coverages of genes across samples.
"""
items_taxonomy_super_dict = {}
if not self.initialized:
self.init()
if self.profile_db_path and not self.metagenome_mode:
items_taxonomy_super_dict['taxonomy'] = self.estimate_for_bins_in_collection()
elif not self.profile_db_path and not self.metagenome_mode:
items_taxonomy_super_dict['taxonomy'] = self.estimate_for_contigs_db_for_genome()
elif self.metagenome_mode:
items_taxonomy_super_dict['taxonomy'] = self.estimate_for_contigs_db_for_metagenome()
else:
raise ConfigError("This class doesn't know how to deal with that yet :/")
if self.compute_item_coverages and self.metagenome_mode:
items_taxonomy_super_dict['coverages'] = self.get_item_coverages_across_samples_dict_in_metagenome_mode(items_taxonomy_super_dict)
elif self.compute_item_coverages and not self.metagenome_mode:
items_taxonomy_super_dict['coverages'] = self.get_item_coverages_across_samples_dict_in_genome_mode(items_taxonomy_super_dict)
else:
items_taxonomy_super_dict['coverages'] = None
return items_taxonomy_super_dict
def estimate(self):
items_taxonomy_super_dict = self.get_items_taxonomy_super_dict()
if self.update_profile_db_with_taxonomy:
self.add_taxonomy_as_additional_layer_data(items_taxonomy_super_dict)
self.print_items_taxonomy_super_dict(items_taxonomy_super_dict)
if self.output_file_path:
self.store_items_taxonomy_super_dict(items_taxonomy_super_dict)
if self.per_item_output_file:
self.store_taxonomy_per_item(items_taxonomy_super_dict)
def print_items_taxonomy_super_dict(self, items_taxonomy_super_dict):
self.progress.reset()
if self.collection_name:
self.run.warning(None, header='Estimated taxonomy for collection "%s"' % self.collection_name, lc="green")
elif self.metagenome_mode:
self.run.warning(None, header='Taxa in metagenome "%s"' % self.contigs_db_project_name, lc="green")
else:
self.run.warning(None, header='Estimated taxonomy for "%s"' % self.contigs_db_project_name, lc="green")
d = self.get_print_friendly_items_taxonomy_super_dict(items_taxonomy_super_dict)
ordered_bin_names = sorted(list(d.keys()))
if self.metagenome_mode:
header = ['percent_identity', 'taxonomy']
else:
header = ['', self._TOTAL_ITEMS, self._SUPPORTING_ITEMS, 'taxonomy']
# if we are in `--compute-xxx-coverages` mode, and more than 5 sample names, we are in trouble since they will\
# unlikely fit into the display while printing them. so here we will cut it to make sure things look OK.
samples_not_shown = 0
sample_names_to_display = None
if self.compute_item_coverages:
sample_names_to_display = sorted(self.sample_names_in_profile_db)[0:5]
samples_not_shown = sorted(self.sample_names_in_profile_db)[5:]
header += sample_names_to_display
if samples_not_shown:
header += ['... %d more' % len(samples_not_shown)]
# since we know coverages and sample names, we have a chance here to order the output
# based on coverage. so let's do that.
if self.metagenome_mode:
sorted_bin_coverage_tuples = sorted([(bin_name, sum([d[bin_name]['coverages'][sample_name] for sample_name in self.sample_names_in_profile_db])) for bin_name in d], key=lambda x: x[1], reverse=True)
else:
sorted_bin_coverage_tuples = sorted([(bin_name, sum([(d[bin_name]['coverages'][sample_name] if d[bin_name][self._SUPPORTING_ITEMS] else 0) for sample_name in self.sample_names_in_profile_db])) for bin_name in d], key=lambda x: x[1], reverse=True)
ordered_bin_names = [tpl[0] for tpl in sorted_bin_coverage_tuples]
table = []
for bin_name in ordered_bin_names:
bin_data = d[bin_name]
# set the taxonomy text depending on how much room we have. if there are sample coverages, keep it simple,
# otherwise show the entire taxonomy text.
if self.compute_item_coverages:
taxon_text_l = ['(%s) %s' % (l.split('_')[1][0], bin_data[l]) for l in self.ctx.levels_of_taxonomy[::-1] if bin_data[l]]
taxon_text = taxon_text_l[0] if taxon_text_l else '(NA) NA'
else:
taxon_text = ' / '.join([bin_data[l] if bin_data[l] else '' for l in self.ctx.levels_of_taxonomy])
# setting up the table columns here.
if self.metagenome_mode:
row = [bin_name, str(bin_data['percent_identity']), taxon_text]
else:
row = [bin_name, str(bin_data[self._TOTAL_ITEMS]), str(bin_data[self._SUPPORTING_ITEMS]), taxon_text]
# if there are coverages, add samples to the display too
if self.compute_item_coverages:
row += [d[bin_name]['coverages'][sample_name] for sample_name in sample_names_to_display]
if samples_not_shown:
row += ['... %d more' % len(samples_not_shown)]
table.append(row)
# if we are not in metagenome mode let's sort the output table based on total and
# supporting items
if not self.metagenome_mode:
table = sorted(table, key=lambda x: (int(x[1]), int(x[2])), reverse=True)
anvio.TABULATE(table, header)
def store_items_taxonomy_super_dict(self, items_taxonomy_super_dict):
d = self.get_print_friendly_items_taxonomy_super_dict(items_taxonomy_super_dict)
if self.metagenome_mode:
if self.scgs_focus:
headers = ['scg_name', 'percent_identity']
else:
headers = ['anticodon', 'amino_acid', 'percent_identity']
else:
headers = ['bin_name', self._TOTAL_ITEMS, self._SUPPORTING_ITEMS]
headers += self.ctx.levels_of_taxonomy
if self.compute_item_coverages:
headers_for_samples = sorted(self.sample_names_in_profile_db)
else:
headers_for_samples = []
with open(self.output_file_path, 'w') as output:
output.write('\t'.join(headers + headers_for_samples) + '\n')
for item in d:
line = [item] + [d[item][h] for h in headers[1:]]
if self.compute_item_coverages:
for sample_name in headers_for_samples:
line.append(d[item]['coverages'][sample_name])
output.write('\t'.join([str(f) for f in line]) + '\n')
self.run.info("Output file", self.output_file_path, nl_before=1)
def store_taxonomy_per_item(self, items_taxonomy_super_dict):
if self.scgs_focus:
headers = ['bin_name', 'gene_name', 'percent_identity']
else:
headers = ['bin_name','anticodon', 'amino_acid', 'percent_identity']
headers += self.ctx.levels_of_taxonomy
if self.compute_item_coverages:
headers_for_samples = sorted(self.sample_names_in_profile_db)
else:
headers_for_samples = []
with open(self.per_item_output_file, 'w') as output:
output.write('\t'.join(headers + headers_for_samples) + '\n')
for bin_name in items_taxonomy_super_dict["taxonomy"]:
for item_entry in items_taxonomy_super_dict["taxonomy"][bin_name][self._ITEMS.lower()].values():
line = [bin_name] + [item_entry[h] for h in headers[1:]]
if self.compute_item_coverages:
for sample_name in headers_for_samples:
line.append(item_entry['coverages'][sample_name])
output.write('\t'.join([str(f) for f in line]) + '\n')
self.run.info(f"A detailed {self._ITEM} output file", self.per_item_output_file)
def get_print_friendly_items_taxonomy_super_dict(self, items_taxonomy_super_dict):
d = {}
if self.metagenome_mode:
for item_hit in items_taxonomy_super_dict['taxonomy'][self.contigs_db_project_name][self._ITEMS.lower()].values():
item_hit_name = '%s_%d' % (item_hit[self._VARIABLE_NAME_IN_TABLE], item_hit['gene_callers_id'])
d[item_hit_name] = item_hit
if self.compute_item_coverages:
d[item_hit_name]['coverages'] = items_taxonomy_super_dict['coverages'][item_hit['gene_callers_id']]
else:
for bin_name in items_taxonomy_super_dict['taxonomy']:
d[bin_name] = items_taxonomy_super_dict['taxonomy'][bin_name]['consensus_taxonomy']
d[bin_name][self._TOTAL_ITEMS] = items_taxonomy_super_dict['taxonomy'][bin_name][self._TOTAL_ITEMS]
d[bin_name][self._SUPPORTING_ITEMS] = items_taxonomy_super_dict['taxonomy'][bin_name][self._SUPPORTING_ITEMS]
if self.compute_item_coverages:
d[bin_name]['coverages'] = items_taxonomy_super_dict['coverages'][bin_name]
return d
def add_taxonomy_as_additional_layer_data(self, items_taxonomy_super_dict):
"""A function that adds taxonomy to additional data tables of a given profile
database. This will only work in metagenome mode."""
if not self.metagenome_mode or not self.compute_item_coverages:
return
self.progress.new("Adding summary taxonomy for samples")
self.progress.update('...')
items_dict = list(items_taxonomy_super_dict['taxonomy'].values())[0][self._ITEMS.lower()]
# at this stage each items_dict entry will look like this, and most critically will
# have the same items
#
# "7660": {
# "gene_callers_id": 7660,
# "gene_name": "Ribosomal_S6",
# "accession": "CONSENSUS",
# "percent_identity": "98.9",
# "t_domain": "Bacteria",
# "t_phylum": "Firmicutes",
# "t_class": "Bacilli",
# "t_order": "Staphylococcales",
# "t_family": "Staphylococcaceae",
# "t_genus": "Staphylococcus",
# "t_species": null,
# "tax_hash": "b310c392"
# },
#
# this will enable us to learn the which item has been used to calculate coverage
# information by only looking at a single entry:
item_name = list(items_dict.values())[0][self._VARIABLE_NAME_IN_TABLE]
# the might for loop to go through all taxonomic levels one by one
for level in self.ctx.levels_of_taxonomy[::-1]:
# setting the data group early on:
data_group = '%s_%s' % (item_name, level[2:])
self.progress.update('Working on %s-level data' % level)
data_dict = {}
data_keys_list = set([])
for sample_name in self.sample_names_in_profile_db:
data_dict[sample_name] = Counter()
for gene_callers_id in items_dict:
# starting with a tiny hack to fill in missing values. here we first find
# the most highly resolved level of taxonomy that is not null for this
# particular item taxonomy
i = 0
for i in range(self.ctx.levels_of_taxonomy.index(level), 0, -1):
if items_dict[gene_callers_id][self.ctx.levels_of_taxonomy[i]]:
break
# just some abbreviations
l = self.ctx.levels_of_taxonomy[i][2:]
m = items_dict[gene_callers_id][self.ctx.levels_of_taxonomy[i]]
# if the best level we found in the previous step is matching to the level
# set by the main for loop, we're good to go with that name:
if level == self.ctx.levels_of_taxonomy[i]:
taxon_name = m
# otherwise we will try to replace that None name with something that is more
# sensible:
else:
taxon_name = "Unknown_%s_%s_%d" % (l, m, gene_callers_id)
# a key that will turn these data into stacked bar charts in the interface once
# they are added to the database:
key = '%s!%s' % (data_group, taxon_name)
# step where we add up all the values for each identical taxon names as we build
# the data dictionary:
data_dict[sample_name][key] += items_taxonomy_super_dict['coverages'][gene_callers_id][sample_name]
data_keys_list.add(key)
# next few lines demonstrate the power of anvi'o quite nicely:
self.progress.update("Updating additional data tables...")
args = argparse.Namespace(profile_db=self.profile_db_path, target_data_group=data_group, just_do_it=True)
T = TableForLayerAdditionalData(args, r=run_quiet, p=progress_quiet)
T.add(data_dict, list(data_keys_list))
self.progress.reset()
self.run.info_single("%s level taxonomy is added to the profile database." % (level.capitalize()))
self.progress.end()
def get_gene_caller_ids_for_splits(self, split_names_list):
"""Returns gene caller ids found in a list of splits"""
gene_caller_ids_for_splits = set([])
for split_name in split_names_list:
if split_name in self.split_name_to_gene_caller_ids_dict:
gene_caller_ids_for_splits.update(self.split_name_to_gene_caller_ids_dict[split_name])
return gene_caller_ids_for_splits
def get_split_names_for_items_taxonomy_super_dict(self, items_taxonomy_super_dict):
"""Returns a list of split names associated with items found in a items_taxonomy_super_dict."""
if self._ITEMS.lower() not in list(items_taxonomy_super_dict['taxonomy'].values())[0]:
raise ConfigError("Someone called this function with something that doesn't look like the kind "
"of input data it was expecting (sorry for the vagueness of the message, but "
"anvi'o hopes that will be able to find out why it is happening).")
split_names = set([])
for entry_name in items_taxonomy_super_dict['taxonomy']:
for gene_callers_id in items_taxonomy_super_dict['taxonomy'][entry_name][self._ITEMS.lower()]:
split_names.add(self.gene_callers_id_to_split_name_dict[gene_callers_id])
return split_names
def get_item_coverages_across_samples_dict_in_genome_mode(self, items_taxonomy_super_dict):
self.progress.reset()
self.run.info_single(f"Anvi'o will now attempt to recover {self._ITEM} coverages in GENOME MODE from the profile "
"database, which contains %d samples." % (len(self.sample_names_in_profile_db)), nl_before=1, nl_after=1)
item_coverages_across_samples_dict = self.get_item_coverages_across_samples_dict(items_taxonomy_super_dict)
bin_avg_coverages_across_samples_dict = {}
for bin_name in items_taxonomy_super_dict['taxonomy']:
bin_avg_coverages_across_samples_dict[bin_name] = dict([(sample_name, None) for sample_name in self.sample_names_in_profile_db])
for sample_name in self.sample_names_in_profile_db:
average_coverage_across_samples = [item_coverages_across_samples_dict[gene_callers_id][sample_name] for gene_callers_id in items_taxonomy_super_dict['taxonomy'][bin_name][self._ITEMS.lower()]]
if average_coverage_across_samples:
bin_avg_coverages_across_samples_dict[bin_name][sample_name] = np.mean(average_coverage_across_samples)
self.run.warning(f"Anvi'o has just finished recovering {self._ITEM} coverages from the profile database to estimate "
f"the average coverage of your bins across your samples. Please note that anvi'o {self._DELIVERABLE} "
f"framework is using only {len(self.ctx.items)} {self._ITEMS} to estimate taxonomy. Which means, even a highly complete bin "
f"may be missing all of them. In which case, the coverage of that bin will be `None` across all "
f"your samples. The best way to prevent any misleading insights is take these results with a "
f"huge grain of salt, and use the `anvi-summarize` output for critical applications.",
header="FRIENDLY REMINDER", lc="blue")
return bin_avg_coverages_across_samples_dict
def get_item_coverages_across_samples_dict_in_metagenome_mode(self, items_taxonomy_super_dict):
"""Get item coverages in metagenome mode."""
if not self.metagenome_mode:
raise ConfigError("You're calling the wrong function. Your class is not in metagenome mode.")
self.progress.reset()
self.run.info_single(f"Anvi'o will now attempt to recover {self._ITEM} coverages from the profile database, which "
f"contains {len(self.sample_names_in_profile_db)} samples.", nl_before=1, nl_after=1)
return self.get_item_coverages_across_samples_dict(items_taxonomy_super_dict)
def get_item_coverages_across_samples_dict(self, items_taxonomy_super_dict):
"""Get item coverages"""
item_coverages_across_samples_dict = {}
self.progress.new('Recovering coverages')
self.progress.update(f'Learning all split names affiliated with {self._ITEMS} ..')
split_names_of_interest = self.get_split_names_for_items_taxonomy_super_dict(items_taxonomy_super_dict)
self.progress.end()
# initialize split coverages for splits that have anything to do with our items
args = copy.deepcopy(self.args)
args.split_names_of_interest = split_names_of_interest
args.collection_name = None
profile_db = ProfileSuperclass(args, p=self.progress, r=run_quiet)
profile_db.init_split_coverage_values_per_nt_dict()
# recover all gene caller ids that occur in our taxonomy estimation dictionary
# and ge their coverage stats from the profile super
gene_caller_ids_of_interest = set([])
for bin_name in items_taxonomy_super_dict['taxonomy']:
for gene_callers_id in items_taxonomy_super_dict['taxonomy'][bin_name][self._ITEMS.lower()]:
gene_caller_ids_of_interest.add(gene_callers_id)
# at this point we have everything. splits of interest are loaded in memory in `profile_db`, and we know
# which gene caller ids we are interested in recovering coverages for. the way to access to gene coverages
# is a bit convoluted in the dbops for historical reasons, but it is quite straightforward. the most
# weird part is that we need a copy of a contigs super. so we will start with that:
self.progress.new(f"Recovering {self._ITEM} coverages")
self.progress.update("Initiating the contigs super class")
contigs_db = ContigsSuperclass(self.args, r=run_quiet, p=progress_quiet)
for split_name in split_names_of_interest:
self.progress.update("Working with %s" % split_name)
# note for the curious: yes, here we are sending the same gene caller ids of interest over and over to
# the `get_gene_level_coverage_stats` for each split, but that function is smart enough to not spend any
# time on those gene caller ids that do not occur in the split name we are interested in.
all_item_stats_in_split = profile_db.get_gene_level_coverage_stats(split_name, contigs_db, gene_caller_ids_of_interest=gene_caller_ids_of_interest)
for item_stats in all_item_stats_in_split.values():
for entry in item_stats.values():
gene_callers_id = int(entry['gene_callers_id'])
sample_name = entry['sample_name']
coverage = entry['non_outlier_mean_coverage']
if gene_callers_id not in item_coverages_across_samples_dict:
item_coverages_across_samples_dict[gene_callers_id] = dict([(sample_name, 0) for sample_name in self.sample_names_in_profile_db])
item_coverages_across_samples_dict[gene_callers_id][sample_name] = coverage
self.progress.end()
return item_coverages_across_samples_dict
class PopulateContigsDatabaseWithTaxonomy(TerminologyHelper):
"""Search sequences in corresponding databases, and store consensus taxonomy per seqeunce in the database.
Depending on the context, this class currently deals with single-copy core genes or tRNA sequences. The
key function here is the BLAST/DIAMOND search against the matching database, get back all the significant
hits (or as many as `max_target_seqs` if hits exceed that), and try to find a LCA of all matches (hence it
is important to not set `max_target_seqs` a small number).
The class will recover sequences of interest from an anvi'o contigs database given the context. That said,
it can also be used in an ad-hoc manner, where the user can search a single sequence.
"""
def __init__(self, args):
self.taxonomy_dict = OrderedDict()
self.mutex = multiprocessing.Lock()
TerminologyHelper.__init__(self)
if self.contigs_db_path:
utils.is_contigs_db(self.contigs_db_path)
contigs_db = ContigsDatabase(self.contigs_db_path, run=self.run, progress=self.progress)
self.contigs_db_project_name = contigs_db.meta['project_name']
contigs_db.disconnect()
else:
self.contigs_db_project_name = "UNKNOWN"
def get_sequences_dict_from_contigs_db(self):
"""Returns a dictionary of all HMM hits per SCG/anticodon of interest"""
contigs_db = ContigsSuperclass(self.args, r=run_quiet, p=progress_quiet)
splits_dict = {contigs_db.a_meta['project_name']: list(contigs_db.splits_basic_info.keys())}
if self.scgs_focus:
hmm_source = self.ctx.hmm_source_for_scg_taxonomy
default_items_for_taxonomy = self.ctx.default_scgs_for_taxonomy
return_amino_acid_sequences = True
elif self.trna_focus:
hmm_source = self.ctx.hmm_source_for_trna_genes
default_items_for_taxonomy = self.ctx.default_anticodons_for_taxonomy
return_amino_acid_sequences = False
else:
raise ConfigError("Anvi'o is lost.")
s = hmmops.SequencesForHMMHits(self.args.contigs_db, sources=hmm_source, run=run_quiet, progress=progress_quiet)
hmm_sequences_dict = s.get_sequences_dict_for_hmm_hits_in_splits(splits_dict, return_amino_acid_sequences=return_amino_acid_sequences)
# a trick for trnas specifically since tRNA gene names contain two features and formed as `AMINOACID_ANTICODON`.
# here we replace those gene names with `ANTICODON` only
if self.trna_focus:
for entry in hmm_sequences_dict:
hmm_sequences_dict[entry]['gene_name'] = hmm_sequences_dict[entry]['gene_name'].split('_')[1]
hmm_sequences_dict = utils.get_filtered_dict(hmm_sequences_dict, 'gene_name', set(default_items_for_taxonomy))
if not len(hmm_sequences_dict):
return None
self.progress.reset()
self.run.info(f'Num relevant {self._ITEMS} in contigs db', '%s' % (pp(len(hmm_sequences_dict))))
item_sequences_dict = {}
for entry_id in hmm_sequences_dict:
entry = hmm_sequences_dict[entry_id]
item_name = entry['gene_name']
if item_name in item_sequences_dict:
item_sequences_dict[item_name][entry_id] = entry
else:
item_sequences_dict[item_name] = {entry_id: entry}
return item_sequences_dict
def populate_contigs_database(self):
"""Populates SCG/tRNA taxonomy tables in a contigs database"""
# get an instnce for the tables for taxonomy early on.
if self.scgs_focus:
anvio_taxonomy_table_name = t.scg_taxonomy_table_name
self.tables_for_taxonomy = TableForSCGTaxonomy(self.contigs_db_path, self.run, self.progress)
database_version = self.ctx.scg_taxonomy_database_version
elif self.trna_focus:
anvio_taxonomy_table_name = t.trna_taxonomy_table_name
self.tables_for_taxonomy = TableForTRNATaxonomy(self.contigs_db_path, self.run, self.progress)
database_version = self.ctx.trna_taxonomy_database_version
else:
anvio_taxonomy_table_name = None
self.tables_for_taxonomy = None
database_version = None
# get the dictionary that shows all hits for each self._ITEM of interest
self.progress.new('Contigs bleep bloop')
self.progress.update(f'Recovering the {self._ITEMS} dictionary')
item_sequences_dict = self.get_sequences_dict_from_contigs_db()
self.progress.end()
if not item_sequences_dict:
self.run.warning(f"This contigs database contains no {self._SOURCE_DATA} sequences that are used by the "
f"anvi'o taxonomy headquarters in Lausanne. Somewhat disappointing but totally OK.")
# even if there are no SCGs to use for taxonomy later, we did attempt ot populate the
# contigs database, so we shall note that in the self table to make sure the error from
# `anvi-estimate-genome-taxonomy` is not "you seem to have not run taxonomy".
self.tables_for_taxonomy.update_db_self_table_values(taxonomy_was_run=True, database_version=database_version)
# return empty handed like a goose in the job market in 2020
return None
log_file_path = filesnpaths.get_temp_file_path()
self.run.info('Taxonomy', self.ctx.accession_to_taxonomy_file_path)
self.run.info('Database reference', self.ctx.search_databases_dir_path)
self.run.info(f'Number of {self._ITEMS}', len(item_sequences_dict))
self.run.warning('', header='Parameters for search', lc='green')
self.run.info('Max number of target sequences', self.max_target_seqs)
self.run.info('Max e-value to report alignments', self.evalue)
self.run.info('Min percent identity to report alignments', self.min_pct_id)
self.run.info('Num aligment tasks running in parallel', self.num_parallel_processes)
self.run.info('Num CPUs per aligment task', self.num_threads)
self.run.info('Log file path', log_file_path)
self.tables_for_taxonomy.delete_contents_of_table(anvio_taxonomy_table_name, warning=False)
self.tables_for_taxonomy.update_db_self_table_values(taxonomy_was_run=False, database_version=None)
total_num_processes = len(item_sequences_dict)
self.progress.new('Performing search', progress_total_items=total_num_processes)
self.progress.update('Initializing %d process...' % int(self.num_parallel_processes))
manager = multiprocessing.Manager()
input_queue = manager.Queue()
output_queue = manager.Queue()
error_queue = manager.Queue()
search_output = []
item_sequences_fate = {}
for item_name in item_sequences_dict:
item_sequences_fate[item_name] = {'in_contigs_db': len(item_sequences_dict[item_name]), 'in_search_results': 0}
sequence = ""
for entry in item_sequences_dict[item_name].values():
if 'sequence' not in entry or 'gene_name' not in entry:
raise ConfigError(f"The `get_filtered_dict` function got a parameter that does not look like "
f"the way we expected it. This function expects a dictionary that contains "
f"keys `gene_name` and `sequence`.")
sequence = sequence + ">" + str(entry['gene_callers_id']) + "\n" + entry['sequence'] + "\n"
entry['hits'] = []
input_queue.put([item_name, sequence])
workers = []
for i in range(0, int(self.num_parallel_processes)):
worker = multiprocessing.Process(target=self.blast_search_worker, args=(input_queue, output_queue, error_queue, log_file_path))
workers.append(worker)
worker.start()
num_finished_processes = 0
while num_finished_processes < total_num_processes:
# check error
error_text = error_queue.get()
if error_text:
self.progress.reset()
for worker in workers:
worker.terminate()
if error_text and error_text is type(str) and 'incompatible' in error_text:
raise ConfigError(f"Your current databases are incompatible with the diamond version you have on your computer. "
f"Please run the command `{self._SETUP_PROGRAM} --redo-databases` and come back.")
else:
if self.scgs_focus:
raise ConfigError(f"Bad news. The database search operation failed somewhere :( It is very hard for anvi'o "
f"to know what happened, but this is what we heard last: '{error_text}'. If this error message "
f"makes sense ot you, great. Otherwise a LIKELY reason for this failure is that you have a diamond version "
f"installed on your system that is incompatible with anvi'o :/ The best course of action for that "
f"is to make sure running `diamond --version` on your terminal returns `0.9.14`. If not, "
f"try to upgrade/downgrade your diamond to match this version. If you are in a conda environmnet "
f"you can try running `conda install diamond=0.9.14`. Please feel free to contact us if the problem "
f"persists. We apologize for the inconvenience.")
else:
raise ConfigError(f"Bad news. The database search operation failed somewhere :( It is very hard for anvi'o "
f"to know what happened, but this is what we heard last: '{error_text}'. If this error message "
f"makes sense ot you, great. Otherwise a LIKELY reason for this is that you have a BLAST version "
f"installed on your system that is incompatible with anvi'o :/ If you see `2.10.1` or higher "
f"version numbers when you type `blastn -version` in your terminal, it may be wortwhile to "
f"get in touch with anvi'o developers. Note for developers: if the user has a newer version of "
f"blast, this may be due to changing database extensions generated by `makeblastdb`.")
try:
search_output += output_queue.get()
if self.write_buffer_size > 0 and len(search_output) % self.write_buffer_size == 0:
self.tables_for_taxonomy.add(search_output)
for s in search_output:
item_sequences_fate[s[1]]['in_search_results'] += 1
search_output = []
num_finished_processes += 1
self.progress.increment(increment_to=num_finished_processes)
self.progress.update(f"%s of %s {self._ITEMS} are finished in %s processes with %s threads." \
% (num_finished_processes, total_num_processes, int(self.num_parallel_processes), self.num_threads))
except KeyboardInterrupt:
print("Anvi'o profiler recieved SIGINT, terminating all processes...")
break
for worker in workers:
worker.terminate()
# finally the remaining hits are written to the database, and we are done
self.tables_for_taxonomy.add(search_output)
for s in search_output:
item_sequences_fate[s[1]]['in_search_results'] += 1
# time to update the self table:
self.tables_for_taxonomy.update_db_self_table_values(taxonomy_was_run=True, database_version=database_version)
self.progress.end()
# let user know about what happened to their sequences in the database
header = [f"{self._ITEM}", "in_contigs_db", "in_search_results", "percent_annotation"]
table = []
for item_name in item_sequences_fate:
table.append([item_name, item_sequences_fate[item_name]['in_contigs_db'], item_sequences_fate[item_name]['in_search_results'], round(item_sequences_fate[item_name]['in_search_results'] * 100 / item_sequences_fate[item_name]['in_contigs_db'], 1)])
table.sort(key=lambda x: x[0])
self.run.warning(f"Please take a careful look at the table below. It shows the number of sequences found in the contigs database "
f"for a given {self._ITEM}, and how many of them actually was annotated based on sequences in GTDB. The discrepancy "
f"between these two numbers can occur due to multiple reasons. You may have bacterial or archaeal sequences in your "
f"data that are way too novel to hit anything in the GTDB above the percent identity cutoff set by the parameter"
f"`--min-percent-identity` to report alignments (which is set to {self.min_pct_id} in the current run). Or, "
f"you may have a lot of eukaryotic organisms, {self._SOURCE_DATA} sequences of which may have no representation "
f"in the GTDB, in which case decreasing `--min-percent-identity` will only give you erronous results.")
anvio.TABULATE(table, header, numalign="center")
def store_hits_gene_callers_id(self, gene_callers_id, gene_sequence, item_name, hits):
if self.trna_focus:
header = ['db_name', 'gene_callers_id', 'anticodon', 'amino_acid', 'accession', 'percent_identity', 'bitscore'] + constants.levels_of_taxonomy
elif self.scgs_focus:
header = ['db_name', 'gene_callers_id', 'gene_name', 'accession', 'percent_identity', 'bitscore'] + constants.levels_of_taxonomy
else:
header = None
if not os.path.exists(self.all_hits_output_file_path):
with open(self.all_hits_output_file_path, 'w') as output:
output.write('\t'.join(header) + '\n')
if self.trna_focus:
line = [str(f) for f in [self.contigs_db_project_name, gene_callers_id, item_name, constants.anticodon_to_AA[item_name]]]
elif self.scgs_focus:
line = [str(f) for f in [self.contigs_db_project_name, gene_callers_id, item_name]]
with open(self.all_hits_output_file_path, 'a') as output:
for hit in hits:
if self.trna_focus:
output.write('\t'.join(line + [str(hit[k]) for k in header[4:]]) + '\n')
elif self.scgs_focus:
output.write('\t'.join(line + [str(hit[k]) for k in header[3:]]) + '\n')
def show_hits_gene_callers_id(self, gene_callers_id, gene_sequence, item_name, hits):
self.progress.reset()
self.run.warning(None, header=f"Hits for '{item_name}' w/gene caller id '{gene_callers_id}'", lc="green")
self.run.info_single(gene_sequence, nl_after=1, level=0)
if len(hits):
header = ['%id', 'bitscore', 'accession', 'taxonomy']
table = []
for hit in hits:
if hit['accession'] == 'CONSENSUS':
accession = terminal.c(hit['accession'], color='red')
taxon_text = terminal.c(' / '.join([hit[l] if hit[l] else '' for l in self.ctx.levels_of_taxonomy]), color='red')
table.append([str(hit['percent_identity']), str(hit['bitscore']), accession, taxon_text])
else:
table.append([str(hit['percent_identity']), str(hit['bitscore']), hit['accession'], ' / '.join([hit[l] if hit[l] else '' for l in self.ctx.levels_of_taxonomy])])
anvio.TABULATE(table, header)
else:
self.run.info_single(f"No hits to anything in {self.ctx.target_database_name} {self.ctx.target_database_release} at minimum percent identity of {self.min_pct_id} :/", nl_after=1, mc="red")
def update_dict_with_taxonomy(self, d, mode=None):
"""Takes a dictionary that includes a key `accession` and populates the dictionary with taxonomy"""
if not mode:
if not 'accession' in d:
raise ConfigError("`add_taxonomy_to_dict` is speaking: the dictionary sent here does not have a member "
"with key `accession`.")
if d['accession'] in self.ctx.accession_to_taxonomy_dict:
d.update(self.ctx.accession_to_taxonomy_dict[d['accession']])
else:
d.update(self.ctx.accession_to_taxonomy_dict['unknown_accession'])
elif mode == 'list_of_dicts':
if len([entry for entry in d if 'accession' not in entry]):
raise ConfigError("`add_taxonomy_to_dict` is speaking: you have a bad formatted data here :/")
for entry in d:
print(self.taxonomy_dict[entry['accession']])
else:
raise ConfigError("An unknown mode (%s) is set to `add_taxonomy_to_dict` :/" % (mode))
return d
def get_gene_estimation_output(self, item_name, fasta_formatted_sequence, log_file_path, show_all_hits=False):
genes_estimation_output=[]
if self.scgs_focus:
target_database_path = self.ctx.SCGs[item_name]['db']
diamond = Diamond(target_database_path, run=run_quiet, progress=progress_quiet)
diamond.max_target_seqs = self.max_target_seqs
diamond.evalue = self.evalue
diamond.min_pct_id = self.min_pct_id
diamond.num_threads = self.num_threads
diamond.run.log_file_path = log_file_path
raw_search_output = diamond.blastp_stdin_multi(fasta_formatted_sequence)
elif self.trna_focus:
target_database_path = self.ctx.anticodons[item_name]['db']
blast = BLAST(None, target_database_path, run=run_quiet, progress=progress_quiet)
blast.search_program = 'blastn'
blast.max_target_seqs = self.max_target_seqs
blast.evalue = self.evalue
blast.min_pct_id = self.min_pct_id
blast.num_threads = self.num_threads
blast.run.log_file_path = log_file_path
raw_search_output = blast.blast_stdin(fasta_formatted_sequence)
else:
raise ConfigError("You must be joking, Mr. Feynman.")
hits_per_gene = {}
for blastp_hit in raw_search_output.split('\n'):
if len(blastp_hit) and not blastp_hit.startswith('Query'):
fields = blastp_hit.split('\t')
try:
gene_callers_id = int(fields[0])
except:
gene_callers_id = fields[0]
hit = dict(zip(['accession', 'percent_identity', 'bitscore'], [fields[1], float(fields[2]), float(fields[11])]))
hit = self.update_dict_with_taxonomy(hit)
if gene_callers_id not in hits_per_gene:
hits_per_gene[gene_callers_id] = {}
if item_name not in hits_per_gene[gene_callers_id]:
hits_per_gene[gene_callers_id][item_name] = []
hits_per_gene[gene_callers_id][item_name].append(hit)
# logging and reporting block
if (anvio.DEBUG or self.all_hits_output_file_path) and not len(hits_per_gene):
with self.mutex:
if anvio.DEBUG or show_all_hits:
self.progress.reset()
self.show_hits_gene_callers_id(fasta_formatted_sequence.split('\n')[0][1:], fasta_formatted_sequence.split('\n')[1], item_name, [])
if self.all_hits_output_file_path:
self.store_hits_gene_callers_id(fasta_formatted_sequence.split('\n')[0][1:], fasta_formatted_sequence.split('\n')[1], item_name, [])
for gene_callers_id, raw_hits in hits_per_gene.items():
if len(raw_hits.keys()) > 1:
self.run.warning("As crazy as it sounds, the gene callers id `%d` seems to have hit more than one SCG o_O Anvi'o will only use "
"one of them almost absolutely randomly. Here are the SCGs the gene sequence matches: '%s'" % [s for s in raw_hits.keys()])
item_name = list(raw_hits.keys())[0]
raw_hits = raw_hits[item_name]
consensus_hit = self.get_consensus_hit(raw_hits)
consensus_hit['accession'] = 'CONSENSUS'
if anvio.DEBUG or show_all_hits or self.all_hits_output_file_path:
# avoid race conditions when priting this information when `--debug` is true:
with self.mutex:
if anvio.DEBUG or show_all_hits:
self.progress.reset()
self.show_hits_gene_callers_id(gene_callers_id, fasta_formatted_sequence.split('\n')[1], item_name, raw_hits + [consensus_hit])
if self.all_hits_output_file_path:
self.store_hits_gene_callers_id(gene_callers_id, fasta_formatted_sequence.split('\n')[1], item_name, raw_hits + [consensus_hit])
genes_estimation_output.append([gene_callers_id, item_name, [consensus_hit]])
return genes_estimation_output
def blast_search_worker(self, input_queue, output_queue, error_queue, log_file_path):
"""BLAST each SCG or tRNA sequence identified in the contigs database against the corresopinding
target local database of GTDB seqeunces in parallel.
"""
while True:
item_name, fasta_formatted_sequence = input_queue.get(True)
try:
genes_estimation_output = self.get_gene_estimation_output(item_name, fasta_formatted_sequence, log_file_path)
error_queue.put(None)
except Exception as e:
error_queue.put(e)
output_queue.put(genes_estimation_output)
def get_consensus_hit(self, raw_hits):
pd.set_option('mode.chained_assignment', None)
df = pd.DataFrame.from_records(raw_hits)
# remove hits that are null at the phylum level if there are still hits
# in the df that are not null:
not_null_hits = df[df.t_phylum.notnull()]
if len(not_null_hits):
df = not_null_hits
# find the max percent identity score in the df
max_percent_identity = max(df['percent_identity'])
# subset the data frame to those with percent identity that match to `max_percent_identity`
df_max_identity = df.loc[df.percent_identity == max_percent_identity]
# if some of the competing names have null species deignations, remove them from consideration
if len(df_max_identity.t_species.unique()) > 1:
df_max_identity = df_max_identity[df_max_identity.t_species.notnull()]
# find the taxonomic level where the number of unique taxon names is one
for taxonomic_level in self.ctx.levels_of_taxonomy[::-1]:
if len(df_max_identity[taxonomic_level].unique()) == 1:
break
# take one of the hits from `df_max_identity`, and assign None to all taxonomic levels
# beyond `taxonomic_level`, which, after the loop above shows the proper level of
# assignment for this set
final_hit = df_max_identity.head(1)
for taxonomic_level_to_nullify in self.ctx.levels_of_taxonomy[self.ctx.levels_of_taxonomy.index(taxonomic_level) + 1:]:
final_hit.at[0, taxonomic_level_to_nullify] = None
# FIXME: final hit is still not what we can trust. next, we should find out whether the percent identity
# for the level of taxonomy at `taxonomic_level` is higher than the minimum percent identity for all sequences
# considered that are affiliated with final_hit[taxonomic_level]
# turn it into a Python dict before returning
final_hit_dict = final_hit.to_dict('records')[0]
return final_hit_dict
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/units/artist_tests.py | 6 | 1475 | """
Test unit support with each of the matplotlib primitive artist types
The axes handles unit conversions and the artists keep a pointer to
their axes parent, so you must init the artists with the axes instance
if you want to initialize them with unit data, or else they will not
know how to convert the units to scalars
"""
import random
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.text as text
import matplotlib.collections as collections
from basic_units import cm, inch
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.xaxis.set_units(cm)
ax.yaxis.set_units(cm)
if 0:
# test a line collection
# Not supported at present.
verts = []
for i in range(10):
# a random line segment in inches
verts.append(zip(*inch*10*np.random.rand(2, random.randint(2,15))))
lc = collections.LineCollection(verts, axes=ax)
ax.add_collection(lc)
# test a plain-ol-line
line = lines.Line2D([0*cm, 1.5*cm], [0*cm, 2.5*cm], lw=2, color='black', axes=ax)
ax.add_line(line)
if 0:
# test a patch
# Not supported at present.
rect = patches.Rectangle( (1*cm, 1*cm), width=5*cm, height=2*cm, alpha=0.2, axes=ax)
ax.add_patch(rect)
t = text.Text(3*cm, 2.5*cm, 'text label', ha='left', va='bottom', axes=ax)
ax.add_artist(t)
ax.set_xlim(-1*cm, 10*cm)
ax.set_ylim(-1*cm, 10*cm)
#ax.xaxis.set_units(inch)
ax.grid(True)
ax.set_title("Artists with units")
plt.show()
| mit |
kagayakidan/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 77 | 1820 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
lht142934/trading-with-python | lib/yahooFinance.py | 76 | 8290 | # -*- coding: utf-8 -*-
# Author: Jev Kuznetsov <[email protected]>
# License: BSD
"""
Toolset working with yahoo finance data
This module includes functions for easy access to YahooFinance data
Functions
----------
- `getHistoricData` get historic data for a single symbol
- `getQuote` get current quote for a symbol
- `getScreenerSymbols` load symbols from a yahoo stock screener file
Classes
---------
- `HistData` a class for working with multiple symbols
"""
from datetime import datetime, date
import urllib2
from pandas import DataFrame, Index, HDFStore, WidePanel
import numpy as np
import os
from extra import ProgressBar
def parseStr(s):
''' convert string to a float or string '''
f = s.strip()
if f[0] == '"':
return f.strip('"')
elif f=='N/A':
return np.nan
else:
try: # try float conversion
prefixes = {'M':1e6, 'B': 1e9}
prefix = f[-1]
if prefix in prefixes: # do we have a Billion/Million character?
return float(f[:-1])*prefixes[prefix]
else: # no, convert to float directly
return float(f)
except ValueError: # failed, return original string
return s
class HistData(object):
''' a class for working with yahoo finance data '''
def __init__(self, autoAdjust=True):
self.startDate = (2008,1,1)
self.autoAdjust=autoAdjust
self.wp = WidePanel()
def load(self,dataFile):
"""load data from HDF"""
if os.path.exists(dataFile):
store = HDFStore(dataFile)
symbols = [str(s).strip('/') for s in store.keys() ]
data = dict(zip(symbols,[store[symbol] for symbol in symbols]))
self.wp = WidePanel(data)
store.close()
else:
raise IOError('Data file does not exist')
def save(self,dataFile):
""" save data to HDF"""
print 'Saving data to', dataFile
store = HDFStore(dataFile)
for symbol in self.wp.items:
store[symbol] = self.wp[symbol]
store.close()
def downloadData(self,symbols='all'):
''' get data from yahoo '''
if symbols == 'all':
symbols = self.symbols
#store = HDFStore(self.dataFile)
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
try:
df = getSymbolData(symbol,sDate=self.startDate,verbose=False)
if self.autoAdjust:
df = _adjust(df,removeOrig=True)
if len(self.symbols)==0:
self.wp = WidePanel({symbol:df})
else:
self.wp[symbol] = df
except Exception,e:
print e
p.animate(idx+1)
def getDataFrame(self,field='close'):
''' return a slice on wide panel for a given field '''
return self.wp.minor_xs(field)
@property
def symbols(self):
return self.wp.items.tolist()
def __repr__(self):
return str(self.wp)
def getQuote(symbols):
''' get current yahoo quote, return a DataFrame '''
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
if not isinstance(symbols,list):
symbols = [symbols]
header = ['symbol','last','change_pct','PE','time','short_ratio','prev_close','eps','market_cap']
request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1'])
data = dict(zip(header,[[] for i in range(len(header))]))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request)
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
for line in lines:
fields = line.strip().split(',')
#print fields, len(fields)
for i,field in enumerate(fields):
data[header[i]].append( parseStr(field))
idx = data.pop('symbol')
return DataFrame(data,index=idx)
def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]):
"""
generate url
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
return urlStr
def getHistoricData(symbols, **options):
'''
get data from Yahoo finance and return pandas dataframe
Will get OHLCV data frame if sinle symbol is provided.
If many symbols are provided, it will return a wide panel
Parameters
------------
symbols: Yahoo finanance symbol or a list of symbols
sDate: start date (y,m,d)
eDate: end date (y,m,d)
adjust : T/[F] adjust data based on adj_close
'''
assert isinstance(symbols,(list,str)), 'Input must be a string symbol or a list of symbols'
if isinstance(symbols,str):
return getSymbolData(symbols,**options)
else:
data = {}
print 'Downloading data:'
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
p.animate(idx+1)
data[symbol] = getSymbolData(symbol,verbose=False,**options)
return WidePanel(data)
def getSymbolData(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3], adjust=False, verbose=True):
"""
get data from Yahoo finance and return pandas dataframe
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
return None
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
#print line
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
if verbose:
print 'Got %i days of data' % len(df)
if adjust:
return _adjust(df,removeOrig=True)
else:
return df
def _adjust(df, removeOrig=False):
'''
_adjustust hist data based on adj_close field
'''
c = df['close']/df['adj_close']
df['adj_open'] = df['open']/c
df['adj_high'] = df['high']/c
df['adj_low'] = df['low']/c
if removeOrig:
df=df.drop(['open','close','high','low'],axis=1)
renames = dict(zip(['adj_open','adj_close','adj_high','adj_low'],['open','close','high','low']))
df=df.rename(columns=renames)
return df
def getScreenerSymbols(fileName):
''' read symbols from a .csv saved by yahoo stock screener '''
with open(fileName,'r') as fid:
lines = fid.readlines()
symbols = []
for line in lines[3:]:
fields = line.strip().split(',')
field = fields[0].strip()
if len(field) > 0:
symbols.append(field)
return symbols
| bsd-3-clause |
f3r/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 12 | 3888 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
jundongl/scikit-feature | skfeature/example/test_MIFS.py | 3 | 1501 | import scipy.io
from sklearn.metrics import accuracy_score
from sklearn import cross_validation
from sklearn import svm
from skfeature.function.information_theoretical_based import MIFS
def main():
# load data
mat = scipy.io.loadmat('../data/BASEHOCK.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 10 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the index of each feature on the training set
idx = MIFS.mifs(X[train], y[train], n_selected_features=num_fea)
# obtain the dataset on the selected features
features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
print acc
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main() | gpl-2.0 |
fengzhyuan/scikit-learn | sklearn/neural_network/rbm.py | 206 | 12292 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
mhue/scikit-learn | sklearn/ensemble/gradient_boosting.py | 126 | 65552 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
kanchenxi04/vnpy-app | vn.trader/ctaAlgo/strategy_MACD_v02.py | 1 | 26731 | # encoding: UTF-8
# 首先写系统内置模块
from datetime import datetime, timedelta, date
from time import sleep
# 其次,导入vnpy的基础模块
import sys
# sys.path.append('C:\\vnpy_1.5\\vnpy-master\\vn.trader')
sys.path.append('../')
from vtConstant import EMPTY_STRING, EMPTY_INT, DIRECTION_LONG, DIRECTION_SHORT, OFFSET_OPEN, STATUS_CANCELLED
from utilSinaClient import UtilSinaClient
# 然后是自己编写的模块
from ctaTemplate import *
from ctaBase import *
from ctaLineBar import *
from ctaPosition import *
from ctaPolicy import *
from ctaBacktesting import BacktestingEngine
class Strategy_MACD_01(CtaTemplate):
"""螺纹钢、15分钟级别+60分钟级别,MACD策略
v1:15f上多空仓开仓
v2:60f上的仓位管理体系
本版本现存问题:
无
已解决问题:
15f上按照百分比开仓
"""
className = 'Strategy_MACD'
author = u'横纵19950206'
# 策略在外部设置的参数
# ----------------------------------------------------------------------
def __init__(self, ctaEngine, setting=None):
"""Constructor"""
super(Strategy_MACD_01, self).__init__(ctaEngine, setting)
# 增加监控参数项目
# 增加监控变量项目
self.varList.append('pos') # 仓位,这里的仓位通常是手数
self.varList.append('entrust') # 是否正在委托,0表示没有委托,1多仓,-1空仓
self.varList.append('percentLimit') # 当前账户最大仓位
self.varList.append('percent') # 当前仓位
self.curDateTime = None # 当前Tick时间
self.curTick = None # 最新的tick
self.lastOrderTime = None # 上一次委托时间
self.cancelSeconds = 60 # 撤单时间(秒)
# 定义日内的交易窗口
self.openWindow = False # 开市窗口
self.tradeWindow = False # 交易窗口
self.closeWindow = False # 收市平仓窗口
self.inited = False # 是否完成了策略初始化
self.backtesting = False # 是否回测
self.lineM15 = None # 5分钟K线
self.lineM60 = None # 60分钟k线
# 增加仓位管理模块
self.position = CtaPosition(self)
# self.position.longPos多头持仓,self.position.shorPos多头持仓、
# self.position.pos持仓状态,self.position.maxPos最大持仓
# 增加ctabacktesing中的仓位管理
if not ctaEngine:
self.engine = BacktestingEngine()
else:
self.engine = ctaEngine
# 当前资金,当前可用资金,仓位比例,仓位比例上限
self.capital, self.available, self.percent, self.percentLimit = self.engine.getAccountInfo()
if setting:
# 根据配置文件更新参数
self.setParam(setting)
# 创建的M15 K线
lineM15Setting = {}
lineM15Setting['name'] = u'M15' # k线名称
lineM15Setting['barTimeInterval'] = 60 * 15 # K线的Bar时长
lineM15Setting['inputMacdFastPeriodLen'] = 12 # DIF快线
lineM15Setting['inputMacdSlowPeriodLen'] = 26 # DEA慢线
lineM15Setting['inputMacdSignalPeriodLen'] = 9 # MACD中绿柱
lineM15Setting['shortSymbol'] = self.shortSymbol
self.lineM15 = CtaLineBar(self, self.onBarM15, lineM15Setting)
try:
mode = setting['mode']
if mode != EMPTY_STRING:
self.lineM15.setMode(setting['mode'])
except KeyError:
self.lineM15.setMode(self.lineM15.TICK_MODE)
lineM60Setting = {}
lineM60Setting['name'] = u'M60' # k线名称
lineM60Setting['barTimeInterval'] = 60 * 60 # K线的Bar时长
lineM60Setting['inputMacdFastPeriodLen'] = 12 # DIF快线
lineM60Setting['inputMacdSlowPeriodLen'] = 26 # DEA慢线
lineM60Setting['inputMacdSignalPeriodLen'] = 9 # MACD中绿柱
lineM60Setting['shortSymbol'] = self.shortSymbol
self.lineM60 = CtaLineBar(self, self.onBarM60, lineM60Setting)
try:
mode = setting['mode']
if mode != EMPTY_STRING:
self.lineM60.setMode(setting['mode'])
except KeyError:
self.lineM60.setMode(self.lineM60.TICK_MODE)
self.onInit()
# ----------------------------------------------------------------------
def onInit(self, force=False):
"""初始化 """
if force:
self.writeCtaLog(u'策略强制初始化')
self.inited = False
self.trading = False # 控制是否启动交易
else:
self.writeCtaLog(u'策略初始化')
if self.inited:
self.writeCtaLog(u'已经初始化过,不再执行')
return
self.position.pos = EMPTY_INT # 初始化持仓
self.entrust = EMPTY_INT # 初始化委托状态
self.percent = EMPTY_INT # 初始化仓位状态
if not self.backtesting:
# 这里需要加载前置数据哦。
if not self.__initDataFromSina():
return
self.inited = True # 更新初始化标识
self.trading = True # 启动交易
self.putEvent()
self.writeCtaLog(u'策略初始化完成')
def __initDataFromSina(self):
"""从sina初始化5分钟数据"""
sina = UtilSinaClient(self)
ret = sina.getMinBars(symbol=self.symbol, minute=15, callback=self.lineM15.addBar)
if not ret:
self.writeCtaLog(u'获取M15数据失败')
return False
ret = sina.getMinBars(symbol=self.symbol, minute=60, callback=self.lineM60.addBar)
if not ret:
self.writeCtaLog(u'获取M60数据失败')
return False
return True
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'启动')
# ----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.uncompletedOrders.clear()
self.position.pos = EMPTY_INT
self.entrust = EMPTY_INT
self.percent = EMPTY_INT
self.writeCtaLog(u'停止')
self.putEvent()
# ----------------------------------------------------------------------
def onTrade(self, trade):
"""交易更新"""
self.writeCtaLog(u'{0},OnTrade(),当前持仓:{1},当前仓位:{2} '.format(self.curDateTime, self.position.pos, self.percent))
# ----------------------------------------------------------------------
def onOrder(self, order):
"""报单更新"""
self.writeCtaLog(
u'OnOrder()报单更新,orderID:{0},{1},totalVol:{2},tradedVol:{3},offset:{4},price:{5},direction:{6},status:{7}'
.format(order.orderID, order.vtSymbol, order.totalVolume, order.tradedVolume,
order.offset, order.price, order.direction, order.status))
# 委托单主键,vnpy使用 "gateway.orderid" 的组合
orderkey = order.gatewayName + u'.' + order.orderID
if orderkey in self.uncompletedOrders:
if order.totalVolume == order.tradedVolume:
# 开仓,平仓委托单全部成交
# 平空仓完成(cover)
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_LONG and order.offset != OFFSET_OPEN:
self.writeCtaLog(u'平空仓完成,原持仓:{0},原仓位{1}'.format(self.position.pos, self.percent))
self.position.closePos(direction=DIRECTION_LONG, vol=order.tradedVolume)
self.writeCtaLog(u'新持仓:{0},新仓位{1}'.format(self.position.pos, self.percent))
# 平多仓完成(sell)
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_SHORT and order.offset != OFFSET_OPEN:
self.writeCtaLog(u'平空仓完成,原持仓:{0},原仓位{1}'.format(self.position.pos, self.percent))
self.position.closePos(direction=DIRECTION_SHORT, vol=order.tradedVolume)
self.writeCtaLog(u'新持仓:{0},新仓位{1}'.format(self.position.pos, self.percent))
# 开多仓完成
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_LONG and order.offset == OFFSET_OPEN:
self.writeCtaLog(u'平空仓完成,原持仓:{0},原仓位{1}'.format(self.position.pos, self.percent))
self.position.openPos(direction=DIRECTION_LONG, vol=order.tradedVolume, price=order.price)
self.writeCtaLog(u'新持仓:{0},新仓位{1}'.format(self.position.pos, self.percent))
# 开空仓完成
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_SHORT and order.offset == OFFSET_OPEN:
# 更新仓位
self.writeCtaLog(u'平空仓完成,原持仓:{0},原仓位{1}'.format(self.position.pos, self.percent))
self.position.openPos(direction=DIRECTION_SHORT, vol=order.tradedVolume, price=order.price)
self.writeCtaLog(u'新持仓:{0},新仓位{1}'.format(self.position.pos, self.percent))
del self.uncompletedOrders[orderkey]
if len(self.uncompletedOrders) == 0:
self.entrust = 0
self.lastOrderTime = None
elif order.tradedVolume > 0 and not order.totalVolume == order.tradedVolume and order.offset != OFFSET_OPEN:
# 平仓委托单部分成交
pass
elif order.offset == OFFSET_OPEN and order.status == STATUS_CANCELLED:
# 开仓委托单被撤销
self.entrust = 0
pass
else:
self.writeCtaLog(u'OnOrder()委托单返回,total:{0},traded:{1}'
.format(order.totalVolume, order.tradedVolume, ))
self.putEvent() # 更新监控事件
# ----------------------------------------------------------------------
def onStopOrder(self, orderRef):
"""停止单更新"""
self.writeCtaLog(u'{0},停止单触发,orderRef:{1}'.format(self.curDateTime, orderRef))
pass
# ----------------------------------------------------------------------
def onTick(self, tick):
"""行情更新
:type tick: object
"""
self.curTick = tick
if (tick.datetime.hour >= 3 and tick.datetime.hour <= 8) or (
tick.datetime.hour >= 16 and tick.datetime.hour <= 20):
self.writeCtaLog(u'休市/集合竞价排名时数据不处理')
return
# 更新策略执行的时间(用于回测时记录发生的时间)
self.curDateTime = tick.datetime
# 2、计算交易时间和平仓时间
self.__timeWindow(self.curDateTime)
# 推送Tick到lineM15
self.lineM15.onTick(tick)
# 首先检查是否是实盘运行还是数据预处理阶段
if not (self.inited and len(self.lineM15.inputMacdSlowPeriodLen) > 0):
return
# ----------------------------------------------------------------------
def onBar(self, bar):
"""分钟K线数据更新(仅用于回测时,从策略外部调用)"""
# 更新策略执行的时间(用于回测时记录发生的时间)
# 回测数据传送的bar.datetime,为bar的开始时间,所以,到达策略时,当前时间为bar的结束时间
self.curDateTime = bar.datetime + timedelta(seconds=self.lineM15.barTimeInterval)
# 2、计算交易时间和平仓时间
self.__timeWindow(bar.datetime)
# 推送tick到15分钟K线
self.lineM15.addBar(bar)
self.lineM60.addBar(bar)
# 4、交易逻辑
# 首先检查是否是实盘运行还是数据预处理阶段
if not self.inited:
if len(self.lineM15.lineBar) > 120 + 5:
self.inited = True
else:
return
# ----------------------------------------------------------------------
def onBarM60(self, bar):
"""60分钟上的多空仓决定仓位管理"""
# 调用lineM60的显示bar内容
self.writeCtaLog(self.lineM60.displayLastBar())
# 未初始化完成
if not self.inited:
if len(self.lineM60.lineBar) > 120 + 5:
self.inited = True
else:
return
# 执行撤单逻辑
self.__cancelLogic(dt=self.curDateTime)
# ----------------------------------------------------------------------
def onBarM15(self, bar):
"""分钟K线数据更新,实盘时,由self.lineM15的回调"""
# 调用lineM15的显示bar内容
self.writeCtaLog(self.lineM15.displayLastBar())
# 未初始化完成
if not self.inited:
if len(self.lineM15.lineBar) > 120 + 5:
self.inited = True
else:
return
# 执行撤单逻辑
self.__cancelLogic(dt=self.curDateTime)
if self.lineM15.mode == self.lineM15.TICK_MODE:
idx = 2
else:
idx = 1
jincha_60f = self.lineM60.lineDif[-1 - idx] < self.lineM60[-1 - idx] \
and self.lineM60.lineDif[0 - idx] > self.lineM60.lineDea[0 - idx]
sicha_60f = self.lineM60.lineDif[-1 - idx] > self.lineM60.lineDea[-1 - idx] \
and self.lineM60.lineDif[0 - idx] < self.lineM60.lineDea[0 - idx]
jincha_15f = self.lineM15.lineDif[-1 - idx] < self.lineM15.lineDea[-1 - idx] \
and self.lineM15.lineDif[0 - idx] > self.lineM15.lineDea[0 - idx] \
and abs(self.lineM15.lineMacd[0 - idx]) >= 2
sicha_15f = self.lineM15.lineDif[-1 - idx] > self.lineM15.lineDea[-1 - idx] \
and self.lineM15.lineDif[0 - idx] < self.lineM15.lineDea[0 - idx] \
and abs(self.lineM15.lineMacd[0 - idx]) > 2
# 如果未持仓,检查是否符合开仓逻辑
if self.position.pos == 0:
"""
15f上决定开仓方向,60f与15f方向是否相同决定开仓仓位
"""
# 60分钟上金叉
if jincha_60f:
# 60F金叉的同时15f上金叉,做多,双金叉,仓位放至40%
# 如果要macd大于2的代码and abs(self.lineM15.lineMacd[0 - idx]) > 2
if jincha_15f:
self.percentLimit = 0.4
vol = self.getAvailablePos(bar)
if not vol:
return
self.writeCtaLog(u'{0},开仓多单{1}手,价格:{2}'.format(bar.datetime, vol, bar.close))
orderid = self.buy(price=bar.close, volume=vol, orderTime=self.curDateTime)
if orderid:
self.lastOrderTime = self.curDateTime
return
# 60f金叉的同时15f上死叉,60f与15f不共振,原有多仓
if sicha_15f:
self.percentLimit = 0.2
vol = self.getAvailablePos(bar)
if not vol:
return
self.writeCtaLog(u'{0},开仓空单{1}手,价格:{2}'.format(bar.datetime, vol, bar.close))
orderid = self.short(price=bar.close, volume=vol, orderTime=self.curDateTime)
if orderid:
self.lastOrderTime = self.curDateTime
return
# 60分钟上死叉
if sicha_60f:
# 60F死叉的同时15f上金叉,做多,双金叉,仓位放至20%
# 如果要macd大于2的代码and abs(self.lineM15.lineMacd[0 - idx]) > 2
if jincha_15f:
self.percentLimit = 0.2
vol = self.getAvailablePos(bar)
if not vol:
return
self.writeCtaLog(u'{0},开仓多单{1}手,价格:{2}'.format(bar.datetime, vol, bar.close))
orderid = self.buy(price=bar.close, volume=vol, orderTime=self.curDateTime)
if orderid:
self.lastOrderTime = self.curDateTime
return
# 60f死叉的同时15f上死叉,60f与15f不共振,仓位放至40%
if sicha_15f:
self.percentLimit = 0.4
vol = self.getAvailablePos(bar)
if not vol:
return
self.writeCtaLog(u'{0},开仓空单{1}手,价格:{2}'.format(bar.datetime, vol, bar.close))
orderid = self.short(price=bar.close, volume=vol, orderTime=self.curDateTime)
if orderid:
self.lastOrderTime = self.curDateTime
return
"""
如果原来有持仓
60f金叉+15f金叉的时
"""
elif self.position.pos >= 1 and self.percent >= 30 %: # 最大仓位限定在40%且多头仓位的时候,即60f金叉+15f金叉
if jincha_60f and sicha_15f: # 40%的多仓全平同时开20%的空仓
self.writeCtaLog(u'{0},平仓多单{1}手,价格:{2}'.format(bar.datetime, self.position.pos, bar.close))
orderid = self.sell(price=bar.close, volume=self.position.pos, orderTime=self.curDateTime)
if orderid:
self.lastOrderTime = self.curDateTime
if sicha_60f:
if sicha_60f and sicha_15f:
# ----------------------------------------------------------------------
def __cancelLogic(self, dt, force=False):
"撤单逻辑"""
if len(self.uncompletedOrders) < 1:
return
if not self.lastOrderTime:
self.writeCtaLog(u'异常,上一交易时间为None')
return
# 平仓检查时间比开开仓时间需要短一倍
if (self.position.pos >= 0 and self.entrust == 1) \
or (self.position.pos <= 0 and self.entrust == -1):
i = 1
else:
i = 1 # 原来是2,暂时取消
canceled = False
if ((dt - self.lastOrderTime).seconds > self.cancelSeconds / i) \
or force: # 超过设置的时间还未成交
for order in self.uncompletedOrders.keys():
self.writeCtaLog(u'{0}超时{1}秒未成交,取消委托单:{2}'.format(dt, (dt - self.lastOrderTime).seconds, order))
self.cancelOrder(str(order))
canceled = True
# 取消未完成的订单
self.uncompletedOrders.clear()
if canceled:
self.entrust = 0
else:
self.writeCtaLog(u'异常:没有撤单')
def __timeWindow(self, dt):
"""交易与平仓窗口"""
# 交易窗口 避开早盘和夜盘的前5分钟,防止隔夜跳空。
self.closeWindow = False
self.tradeWindow = False
self.openWindow = False
# 初始化当日的首次交易
# if (tick.datetime.hour == 9 or tick.datetime.hour == 21) and tick.datetime.minute == 0 and tick.datetime.second ==0:
# self.firstTrade = True
# 开市期,波动较大,用于判断止损止盈,或开仓
if (dt.hour == 9 or dt.hour == 21) and dt.minute < 2:
self.openWindow = True
# 日盘
if dt.hour == 9 and dt.minute >= 0:
self.tradeWindow = True
return
if dt.hour == 10:
if dt.minute <= 15 or dt.minute >= 30:
self.tradeWindow = True
return
if dt.hour == 11 and dt.minute <= 30:
self.tradeWindow = True
return
if dt.hour == 13 and dt.minute >= 30:
self.tradeWindow = True
return
if dt.hour == 14:
if dt.minute < 59:
self.tradeWindow = True
return
if dt.minute == 59: # 日盘平仓
self.closeWindow = True
return
# 夜盘
if dt.hour == 21 and dt.minute >= 0:
self.tradeWindow = True
return
# 上期 贵金属, 次日凌晨2:30
if self.shortSymbol in NIGHT_MARKET_SQ1:
if dt.hour == 22 or dt.hour == 23 or dt.hour == 0 or dt.hour == 1:
self.tradeWindow = True
return
if dt.hour == 2:
if dt.minute < 29: # 收市前29分钟
self.tradeWindow = True
return
if dt.minute == 29: # 夜盘平仓
self.closeWindow = True
return
return
# 上期 有色金属,黑色金属,沥青 次日01:00
if self.shortSymbol in NIGHT_MARKET_SQ2:
if dt.hour == 22 or dt.hour == 23:
self.tradeWindow = True
return
if dt.hour == 0:
if dt.minute < 59: # 收市前29分钟
self.tradeWindow = True
return
if dt.minute == 59: # 夜盘平仓
self.closeWindow = True
return
return
# 上期 天然橡胶 23:00
if self.shortSymbol in NIGHT_MARKET_SQ3:
if dt.hour == 22:
if dt.minute < 59: # 收市前1分钟
self.tradeWindow = True
return
if dt.minute == 59: # 夜盘平仓
self.closeWindow = True
return
# 郑商、大连 23:30
if self.shortSymbol in NIGHT_MARKET_ZZ or self.shortSymbol in NIGHT_MARKET_DL:
if dt.hour == 22:
self.tradeWindow = True
return
if dt.hour == 23:
if dt.minute < 29: # 收市前1分钟
self.tradeWindow = True
return
if dt.minute == 29 and dt.second > 30: # 夜盘平仓
self.closeWindow = True
return
return
# ----------------------------------------------------------------------
def strToTime(self, t, ms):
"""从字符串时间转化为time格式的时间"""
hh, mm, ss = t.split(':')
tt = datetime.time(int(hh), int(mm), int(ss), microsecond=ms)
return tt
# ----------------------------------------------------------------------
def saveData(self, id):
"""保存过程数据"""
# 保存K线
if not self.backtesting:
return
# ----------------------------------------------------------------------
# add by xy 12th August 2017
def getAvailablePos(self, bar):
capital, avail, _, _ = self.engine.getAccountInfo()
avail = min(avail, capital * self.percentLimit)
midPrice = (bar.high - bar.low) / 2 + bar.low
pricePerLot = self.engine.moneyPerLot(midPrice, self.vtSymbol)
if pricePerLot:
return int(avail / pricePerLot)
else:
return None
def testRbByTick():
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为Tick
engine.setBacktestingMode(engine.TICK_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20100101')
# 设置回测用的数据结束日期
engine.setEndDate('20160330')
# engine.connectMysql()
engine.setDatabase(dbName='stockcn', symbol='rb')
# 设置产品相关参数
engine.setSlippage(0.5) # 1跳(0.1)2跳0.2
engine.setRate(float(0.0001)) # 万1
engine.setSize(10) # 合约大小
settings = {}
settings['vtSymbol'] = 'RB'
settings['shortSymbol'] = 'RB'
settings['name'] = 'MACD'
settings['mode'] = 'tick'
settings['backtesting'] = True
# 在引擎中创建策略对象
engine.initStrategy(Strategy_MACD_01, setting=settings)
# 使用简单复利模式计算
engine.usageCompounding = False # True时,只针对FINAL_MODE有效
# 启用实时计算净值模式REALTIME_MODE / FINAL_MODE 回测结束时统一计算模式
engine.calculateMode = engine.REALTIME_MODE
engine.initCapital = 100000 # 设置期初资金
engine.percentLimit = 30 # 设置资金使用上限比例(%)
engine.barTimeInterval = 60 * 5 # bar的周期秒数,用于csv文件自动减时间
engine.fixCommission = 10 # 固定交易费用(每次开平仓收费)
# 开始跑回测
engine.runBacktestingWithMysql()
# 显示回测结果
engine.showBacktestingResult()
def testRbByBar():
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为Tick
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20170101')
# 设置回测用的数据结束日期
engine.setEndDate('20170605')
# engine.setDatabase(dbName='stockcn', symbol='rb')
engine.setDatabase(dbName='stockcn', symbol='RB')
# 设置产品相关参数
engine.setSlippage(0.5) # 1跳(0.1)2跳0.2
engine.setRate(float(0.0003)) # 万3
engine.setSize(10) # 合约大小
settings = {}
# settings['vtSymbol'] = 'rb'
settings['vtSymbol'] = 'RB'
settings['shortSymbol'] = 'RB'
settings['name'] = 'MACD'
settings['mode'] = 'bar'
settings['backtesting'] = True
settings['percentLimit'] = 30
# 在引擎中创建策略对象
engine.initStrategy(Strategy_MACD_01, setting=settings)
# 使用简单复利模式计算
engine.usageCompounding = False # True时,只针对FINAL_MODE有效
# 启用实时计算净值模式REALTIME_MODE / FINAL_MODE 回测结束时统一计算模式
engine.calculateMode = engine.REALTIME_MODE
engine.initCapital = 100000 # 设置期初资金
engine.percentLimit = 40 # 设置资金使用上限比例(%)
engine.barTimeInterval = 300 # bar的周期秒数,用于csv文件自动减时间
# 开始跑回测
engine.runBackTestingWithBarFile(os.getcwd() + '/cache/RB99_20100101_20170605_15m.csv')
# 显示回测结果
engine.showBacktestingResult()
# 从csv文件进行回测
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktesting import *
from setup_logger import setup_logger
setup_logger(
filename=u'TestLogs/{0}_{1}.log'.format(Strategy_MACD_01.className, datetime.now().strftime('%m%d_%H%M')),
debug=False
)
# 回测螺纹
testRbByBar()
| mit |
nok/sklearn-porter | examples/estimator/classifier/BernoulliNB/js/basics.pct.py | 1 | 1033 | # %% [markdown]
# # sklearn-porter
#
# Repository: [https://github.com/nok/sklearn-porter](https://github.com/nok/sklearn-porter)
#
# ## BernoulliNB
#
# Documentation: [sklearn.naive_bayes.BernoulliNB](http://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.BernoulliNB.html)
# %%
import sys
sys.path.append('../../../../..')
# %% [markdown]
# ### Load data
# %%
from sklearn.datasets import load_iris
iris_data = load_iris()
X = iris_data.data
y = iris_data.target
print(X.shape, y.shape)
# %% [markdown]
# ### Train classifier
# %%
from sklearn.naive_bayes import BernoulliNB
clf = BernoulliNB()
clf.fit(X, y)
# %% [markdown]
# ### Transpile classifier
# %%
from sklearn_porter import Porter
porter = Porter(clf, language='js')
output = porter.export()
print(output)
# %% [markdown]
# ### Run classification in JavaScript
# %%
# Save classifier:
# with open('BernoulliNB.js', 'w') as f:
# f.write(output)
# Run classification:
# if hash node 2/dev/null; then
# node BernoulliNB.js 1 2 3 4
# fi
| mit |
xzh86/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
hyperspy/hyperspy | doc/conf.py | 2 | 9490 | # -*- coding: utf-8 -*-
#
# HyperSpy documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 18 11:10:55 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from hyperspy import Release
import sys
from datetime import datetime
sys.path.append('../')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.imgmath',
'sphinx.ext.graphviz',
'sphinx.ext.autosummary',
'sphinx_toggleprompt',
'sphinxcontrib.towncrier',
]
try:
import sphinxcontrib.spelling
extensions.append('sphinxcontrib.spelling')
except BaseException:
pass
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HyperSpy'
copyright = f'2011-{datetime.today().year}, The HyperSpy development team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = Release.version
# The full version, including alpha/beta/rc tags.
release = Release.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {'collapsiblesidebar': True}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/hyperspy_logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/hyperspy.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'HyperSpydoc'
# Add the documentation for __init__() methods and the class docstring to the
# built documentation
autoclass_content = 'both'
# -- Options for LaTeX output --------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'HyperSpy.tex', 'HyperSpy Documentation',
'The HyperSpy Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hyperspy', 'HyperSpy Documentation',
['The HyperSpy developers'], 1)
]
# -- Options for towncrier_draft extension -----------------------------------
# Options: draft/sphinx-version/sphinx-release
towncrier_draft_autoversion_mode = 'draft'
towncrier_draft_include_empty = False
towncrier_draft_working_directory = ".."
# Add the hyperspy website to the intersphinx domains
intersphinx_mapping = {'python': ('https://docs.python.org/3', None),
'hyperspyweb': ('https://hyperspy.org/', None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'dask': ('https://docs.dask.org/en/latest', None),
'astroML': ('https://www.astroml.org/', None),
'sklearn': ('https://scikit-learn.org/stable', None),
'skimage': ('https://scikit-image.org/docs/stable', None),
}
graphviz_output_format = "svg"
def run_apidoc(_):
# https://www.sphinx-doc.org/en/master/man/sphinx-apidoc.html
# https://www.sphinx-doc.org/es/1.2/ext/autodoc.html
import os
os.environ['SPHINX_APIDOC_OPTIONS'] = 'members,private-members,no-undoc-members,show-inheritance,ignore-module-all'
from sphinx.ext.apidoc import main
cur_dir = os.path.normpath(os.path.dirname(__file__))
output_path = os.path.join(cur_dir, 'api')
modules = os.path.normpath(os.path.join(cur_dir, "../hyperspy"))
exclude_pattern = ["../hyperspy/tests",
"../hyperspy/external",
"../hyperspy/io_plugins/unbcf_fast.pyx"]
main(['-e', '-f', '-P', '-o', output_path, modules, *exclude_pattern])
def setup(app):
app.connect('builder-inited', run_apidoc)
| gpl-3.0 |
pelson/cartopy | lib/cartopy/examples/logo.py | 6 | 1535 | """
Cartopy Logo
------------
The actual code to produce cartopy's logo.
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib.textpath
import matplotlib.patches
from matplotlib.font_manager import FontProperties
import numpy as np
def main():
fig = plt.figure(figsize=[12, 6])
ax = fig.add_subplot(1, 1, 1, projection=ccrs.Robinson())
ax.coastlines()
ax.gridlines()
# generate a matplotlib path representing the word "cartopy"
fp = FontProperties(family='Bitstream Vera Sans', weight='bold')
logo_path = matplotlib.textpath.TextPath((-175, -35), 'cartopy',
size=1, prop=fp)
# scale the letters up to sensible longitude and latitude sizes
logo_path._vertices *= np.array([80, 160])
# add a background image
im = ax.stock_img()
# clip the image according to the logo_path. mpl v1.2.0 does not support
# the transform API that cartopy makes use of, so we have to convert the
# projection into a transform manually
plate_carree_transform = ccrs.PlateCarree()._as_mpl_transform(ax)
im.set_clip_path(logo_path, transform=plate_carree_transform)
# add the path as a patch, drawing black outlines around the text
patch = matplotlib.patches.PathPatch(logo_path,
facecolor='none', edgecolor='black',
transform=ccrs.PlateCarree())
ax.add_patch(patch)
plt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
aruneral01/auto-sklearn | autosklearn/util/options.py | 5 | 3315 | from autosklearn.external.configargparse import ArgParser
def get_options():
parser = ArgParser()
parser.add_argument("-c", "--config", help="Path to a configuration file.",
is_config_file=True)
# Arguments concerning the target dataset
parser.add_argument("--dataset-name", type=str,
help="Name of the target dataset.")
parser.add_argument("--data-dir", type=str,
help="Directory where the dataset resides.")
# Arguments concerning the file output
parser.add_argument("--output-dir", type=str, default=None,
help="AutoSklearn output directory. If not specified, "
"a new directory under /tmp/ will be generated.")
parser.add_argument("--temporary-output-directory", type=str,
help="Temporary output directory. If not specified, "
"a new directory under /tmp/ will be generated.",
default=None)
parser.add_argument("--keep-output", action='store_true', default=False,
help="If output_dir and temporary_output_dir are not "
"specified, setting this to False will make "
"autosklearn not delete these two directories.")
# Arguments concerning the configuration procedure
parser.add_argument("--time-limit", type=int, default=3600,
help="Total runtime of the AutoSklearn package in "
"seconds.")
parser.add_argument("--per-run-time-limit", type=int, default=360,
help="Runtime for a single call of a target algorithm.")
parser.add_argument("--ml-memory-limit", type=int, default=3000,
help="Memory limit for the machine learning pipeline "
"being optimized")
# Arguments concerning the metalearning part
parser.add_argument("--metalearning-configurations", type=int, default=25,
help="Number of configurations which will be used as "
"initial challengers for SMAC.")
# Arguments concerning the algorithm selection part
parser.add_argument("--ensemble-size", type=int, default=1,
help="Maximum number of models in the ensemble. Set "
"this to one in order to evaluate the single "
"best.")
parser.add_argument("--ensemble-nbest", type=int, default=1,
help="Consider only the best n models in the ensemble "
"building process.")
# Other
parser.add_argument("-s", "--seed", type=int, default=1,
help="Seed for random number generators.")
parser.add_argument('--exec-dir', type=str,
help="Execution directory.")
parser.add_argument("--metadata-directory",
help="DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING."
"\nReads the metadata from a different directory. "
"This feature should only be used to perform studies "
"on the performance of AutoSklearn.")
return parser | bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.