repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
hyperion-rt/hyperion
|
docs/tutorials/scripts/pure_scattering_plot.py
|
2
|
1264
|
import matplotlib.pyplot as plt
from hyperion.model import ModelOutput
from hyperion.util.constants import pc
mo = ModelOutput('pure_scattering.rtout')
image_fnu = mo.get_image(inclination=0, units='MJy/sr', distance=300. * pc)
image_pol = mo.get_image(inclination=0, stokes='linpol')
fig = plt.figure(figsize=(8, 8))
# Make total intensity sub-plot
ax = fig.add_axes([0.1, 0.3, 0.4, 0.4])
ax.imshow(image_fnu.val[:, :, 0], extent=[-13, 13, -13, 13],
interpolation='none', cmap=plt.cm.gist_heat,
origin='lower', vmin=0., vmax=4e9)
ax.set_xlim(-13., 13.)
ax.set_ylim(-13., 13.)
ax.set_xlabel("x (solar radii)")
ax.set_ylabel("y (solar radii)")
ax.set_title("Surface brightness")
# Make linear polarization sub-plot
ax = fig.add_axes([0.51, 0.3, 0.4, 0.4])
im = ax.imshow(image_pol.val[:, :, 0] * 100., extent=[-13, 13, -13, 13],
interpolation='none', cmap=plt.cm.gist_heat,
origin='lower', vmin=0., vmax=100.)
ax.set_xlim(-13., 13.)
ax.set_ylim(-13., 13.)
ax.set_xlabel("x (solar radii)")
ax.set_title("Linear Polarization")
ax.set_yticklabels('')
axcb = fig.add_axes([0.92, 0.3, 0.02, 0.4])
cb=plt.colorbar(im, cax=axcb)
cb.set_label('%')
fig.savefig('pure_scattering_inner_disk.png', bbox_inches='tight')
|
bsd-2-clause
|
karstenw/nodebox-pyobjc
|
examples/Extended Application/sklearn/examples/ensemble/plot_adaboost_multiclass.py
|
1
|
4931
|
"""
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
# plt.show()
pltshow(plt)
|
mit
|
vivekmishra1991/scikit-learn
|
sklearn/metrics/tests/test_regression.py
|
272
|
6066
|
from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
|
bsd-3-clause
|
seadsystem/ShR2
|
Web Stack/webapp/management/commands/email_interval.py
|
2
|
8924
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from webapp.models import Notification
from django.core.exceptions import ObjectDoesNotExist
import boto3
from botocore.exceptions import ClientError
from django.conf import settings
from matplotlib import pyplot as plt
import numpy as np
from django.template import Context, Template
import os
from time import gmtime, strftime
from influxdb.influxdb08 import client as influxdb
from microdata.models import Device
from sets import Set
import re
import datetime
from calendar import monthrange
import random
from math import factorial
from webapp.models import IntervalNotification
class Object:
def __init__(self, device, value, hungriest):
self.device = device
self.value = value
self.hungriest = hungriest
def get_average_usage(user, notification):
start = 'now() - 1w'
unit = 'h'
time_interval = notification.recurrences.occurrences()[1] - notification.recurrences.occurrences()[0]
if time_interval == datetime.timedelta(days=30):
start = 'now() - 1M'
unit = 'd'
elif time_interval == datetime.timedelta(days=1):
start = 'now() - 1d'
unit = 'm'
elif time_interval == datetime.timedelta(days=365):
start = 'now() - 1y'
unit = 'd'
stop = 'now()'
db = influxdb.InfluxDBClient(settings.INFLUXDB_URI,8086,'root','root','seads')
result = db.query('list series')[0]
averages = {}
for device in Device.objects.filter(owner=user):
appliances = Set()
for series in result['points']:
rg = re.compile('device.'+str(device.serial))
if re.match(rg, series[1]):
appliance = series[1].split('device.'+str(device.serial)+'.')
if (len(appliance) < 2): continue
else:
appliances.add(appliance[-1])
average_wattage = 0
hungriest_appliance = [None, 0]
for appliance in appliances:
try:
wattage = db.query('select * from 1'+unit+'.device.'+str(device.serial)+'.'+appliance +\
' where time > '+start+' and time < '+stop)[0]['points'][0][2]
average_wattage += wattage
if wattage > hungriest_appliance[1]:
hungriest_appliance = [appliance, int(wattage)]
except:
pass
averages[str(device.serial)] = [int(average_wattage), hungriest_appliance]
return averages
def render_chart(user, notification):
date_today = datetime.datetime.today()
date_gmtime = gmtime()
randbits = str(random.getrandbits(128))
start = 'now() - 1w'
unit = 'h'
time_interval = notification.recurrences.occurrences()[1] - notification.recurrences.occurrences()[0]
interval_keyword = 'weekly'
if time_interval == datetime.timedelta(days=30):
start = 'now() - 1M'
unit = 'd'
interval_keyword = 'monthly'
elif time_interval == datetime.timedelta(days=1):
start = 'now() - 1d'
unit = 'm'
interval_keyword = 'daily'
elif time_interval == datetime.timedelta(days=365):
start = 'now() - 1y'
unit = 'd'
interval_keyword = 'annually'
stop = 'now()'
db = influxdb.InfluxDBClient(settings.INFLUXDB_URI,8086,'root','root','seads')
fig = plt.figure(figsize=(10, 5), dpi=100) # 1000px * 500px figure
plt.ylabel('Watts')
for device in Device.objects.filter(owner=user):
points = {}
result = db.query('list series')[0]
appliances = Set()
for series in result['points']:
rg = re.compile('device.'+str(device.serial))
if re.match(rg, series[1]):
appliance = series[1].split('device.'+str(device.serial)+'.')
if (len(appliance) < 2): continue
else: appliances.add(appliance[-1])
for appliance in appliances:
query = 'select * from 1'+unit+'.device.'+str(device.serial)+'.'+appliance+' where time > '+start+' and time < '+stop
try:
group = db.query(query)
except:
continue
if (len(group)): group = group[0]['points']
for s in group:
if s[0] in points:
points[s[0]] += s[2]
else:
points[s[0]] = s[2]
y = []
for key, value in points.iteritems():
y.append(value)
x = 0
if interval_keyword == 'monthly' or interval_keyword == 'annually':
x = np.array([date_today - datetime.timedelta(days=i) for i in range(len(y))])
elif interval_keyword == 'weekly':
x = np.array([date_today - datetime.timedelta(hours=i) for i in range(len(y))])
elif interval_keyword == 'daily':
x = np.array([date_today - datetime.timedelta(minutes=i) for i in range(len(y))])
if (len(y) > 0):
plt.plot(x, y, label=device)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
filepath = settings.STATIC_PATH+'/webapp/img/'
filename = interval_keyword + '_' + str(user.pk)+'_'+randbits+'_plot.png'
plt.savefig(filepath + filename, bbox_inches="tight")
s3 = boto3.resource('s3')
data = open(filepath + filename, 'rb')
bucket = s3.Bucket(settings.S3_BUCKET)
expires = datetime.datetime.today() + datetime.timedelta(days=90)
bucket.put_object(Key='email/'+filename, Body=data, ACL='public-read', Expires=str(expires))
resource_url = 'https://'+settings.S3_BUCKET+'.s3.amazonaws.com/email/'+filename
os.remove(filepath + filename)
return [resource_url, strftime("%a, %d %b %Y %H:%M:%S +0000", date_gmtime)]
class Command(BaseCommand):
help = \
"""
Launches the mail service to send usage information based on the
provided interval.
This should never be run more than once per day.
Intervals:
"""
for n in IntervalNotification.objects.all():
help += n.recurrences.rrules[0].to_text() + " | "
def handle(self, *args, **options):
ses = boto3.client('ses')
# Save the current date and time
today = datetime.datetime.today()
# Loop over all users
for user in User.objects.all():
# Loop over all notifications
for notification in user.usersettings.interval_notification.all():
if notification.recurrences.occurrences()[0].day == today.day:
# specified notification is scheduled to run today
try:
destination = {'ToAddresses': [user.email]}
text = ""
f = notification.email_body
f.open(mode='r')
text = f.read()
f.close()
plot_url, str_time = render_chart(user, notification)
average_objects = []
averages = get_average_usage(user, notification)
for key, value in averages.iteritems():
average_objects.append(Object(Device.objects.get(serial=key), value[0], value[1]))
template = Template(text)
rule = notification.recurrences.rrules[0].to_text()
context = Context({
'time': str_time,
'organization': settings.ORG_NAME,
'base_url': settings.BASE_URL,
'interval': str(rule).title(),
'interval_lower': rule,
'user_firstname': user.first_name,
'plot_location': plot_url,
'average_objects': average_objects,
'devices': Device.objects.filter(owner=user),
})
message = {
'Subject': {
'Data': settings.ORG_NAME + ' ' + str(rule).title() + ' Consumption Details'
},
'Body': {
'Html': {
'Data': template.render(context)
}
}
}
print ""
print "Sending email to "+user.username
print "Time:" + str_time
print settings.ORG_NAME + ' ' + str(rule).title() + ' Consumption Details'
print "==============================="
ses.send_email(Source=settings.SES_EMAIL,
Destination=destination,
Message=message,
ReturnPath=settings.SES_EMAIL
)
except ObjectDoesNotExist:
# user has no usersettings. Skip user.
pass
except ClientError:
# user has no email or is not verified. Skip for now.
pass
|
mit
|
vivekmishra1991/scikit-learn
|
benchmarks/bench_isotonic.py
|
268
|
3046
|
"""
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
|
bsd-3-clause
|
StratsOn/zipline
|
zipline/utils/cli.py
|
4
|
6275
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import argparse
from copy import copy
from six import print_
from six.moves import configparser
import pandas as pd
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except:
PYGMENTS = False
import zipline
DEFAULTS = {
'start': '2012-01-01',
'end': '2012-12-31',
'data_frequency': 'daily',
'capital_base': '10e6',
'source': 'yahoo',
'symbols': 'AAPL'
}
def parse_args(argv, ipython_mode=False):
"""Parse list of arguments.
If a config file is provided (via -c), it will read in the
supplied options and overwrite any global defaults.
All other directly supplied arguments will overwrite the config
file settings.
Arguments:
* argv : list of strings
List of arguments, e.g. ['-c', 'my.conf']
* ipython_mode : bool <default=True>
Whether to parse IPython specific arguments
like --local_namespace
Notes:
Default settings can be found in zipline.utils.cli.DEFAULTS.
"""
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file",
metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(argv)
defaults = copy(DEFAULTS)
if args.conf_file:
config = configparser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Defaults")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
description="Zipline version %s." % zipline.__version__,
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument('--algofile', '-f')
parser.add_argument('--data-frequency',
choices=('minute', 'daily'))
parser.add_argument('--start', '-s')
parser.add_argument('--end', '-e')
parser.add_argument('--capital_base')
parser.add_argument('--source', choices=('yahoo',))
parser.add_argument('--symbols')
parser.add_argument('--output', '-o')
if ipython_mode:
parser.add_argument('--local_namespace', action='store_true')
args = parser.parse_args(remaining_argv)
return(vars(args))
def parse_cell_magic(line, cell):
"""Parse IPython magic
"""
args_list = line.split(' ')
args = parse_args(args_list, ipython_mode=True)
local_namespace = args.pop('local_namespace', False)
# By default, execute inside IPython namespace
if not local_namespace:
args['namespace'] = get_ipython().user_ns # flake8: noqa
# If we are running inside NB, do not output to file but create a
# variable instead
output_var_name = args.pop('output', None)
perf = run_pipeline(print_algo=False, algo_text=cell, **args)
if output_var_name is not None:
get_ipython().user_ns[output_var_name] = perf # flake8: noqa
def run_pipeline(print_algo=True, **kwargs):
"""Runs a full zipline pipeline given configuration keyword
arguments.
1. Load data (start and end dates can be provided a strings as
well as the source and symobls).
2. Instantiate algorithm (supply either algo_text or algofile
kwargs containing initialize() and handle_data() functions). If
algofile is supplied, will try to look for algofile_analyze.py and
append it.
3. Run algorithm (supply capital_base as float).
4. Return performance dataframe.
:Arguments:
* print_algo : bool <default=True>
Whether to print the algorithm to command line. Will use
pygments syntax coloring if pygments is found.
"""
start = pd.Timestamp(kwargs['start'], tz='UTC')
end = pd.Timestamp(kwargs['end'], tz='UTC')
symbols = kwargs['symbols'].split(',')
if kwargs['source'] == 'yahoo':
source = zipline.data.load_bars_from_yahoo(
stocks=symbols, start=start, end=end)
else:
raise NotImplementedError(
'Source %s not implemented.' % kwargs['source'])
algo_text = kwargs.get('algo_text', None)
if algo_text is None:
# Expect algofile to be set
algo_fname = kwargs['algofile']
with open(algo_fname, 'r') as fd:
algo_text = fd.read()
analyze_fname = os.path.splitext(algo_fname)[0] + '_analyze.py'
if os.path.exists(analyze_fname):
with open(analyze_fname, 'r') as fd:
# Simply append
algo_text += fd.read()
if print_algo:
if PYGMENTS:
highlight(algo_text, PythonLexer(), TerminalFormatter(),
outfile=sys.stdout)
else:
print_(algo_text)
algo = zipline.TradingAlgorithm(script=algo_text,
namespace=kwargs.get('namespace', {}),
capital_base=float(kwargs['capital_base']),
algo_filename=kwargs.get('algofile'))
perf = algo.run(source)
output_fname = kwargs.get('output', None)
if output_fname is not None:
perf.to_pickle(output_fname)
return perf
|
apache-2.0
|
xavierwu/scikit-learn
|
sklearn/metrics/tests/test_regression.py
|
272
|
6066
|
from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
|
bsd-3-clause
|
mac389/LVSI
|
src/lvsi-stains.py
|
1
|
1519
|
import itertools, json
import pandas as pd
import numpy as np
from awesome_print import ap
from statsmodels.stats.inter_rater import cohens_kappa, to_table
possible_grades = ['1','2','3']
cols_with_grades = [1,2,3]
pathologists = open('../data/rater-names','rb').read().splitlines()
lvsi = {}
for (pathologist_one, pathologist_two) in itertools.combinations(pathologists,2):
df_one = pd.read_excel('../data/stains.xls',pathologist_one,parse_cols=cols_with_grades, convert_float=False)
df_two = pd.read_excel('../data/stains.xls',pathologist_two,parse_cols=cols_with_grades, convert_float=False)
patho_one_ratings = np.array([i[0][0] if len(i[0]) > 0 else -1 for i in df_one.apply(np.nonzero,axis=1).values]).astype(int)
patho_two_ratings = np.array([i[0][0] if len(i[0]) > 0 else -1 for i in df_two.apply(np.nonzero,axis=1).values]).astype(int)
#Really inefficient implementation, but too many exceptions to vectorize:
contingency_table = np.zeros((3,3))
for rating_one in patho_one_ratings:
if type(rating_one) == type(list):
rating_one = rating_one[0]
for rating_two in patho_two_ratings:
if type(rating_two) == type(list):
rating_two = rating_two[0]
print '\t %d'%rating_two
contingency_table[rating_one,rating_two] += 1
lvsi['%s-%s'%(pathologist_one,pathologist_two)] = cohens_kappa(contingency_table).kappa
json.dump(lvsi,open('../data/lvsi-stains-grades.json','wb'))
ap(np.median(lvsi.values()))
print 0.5*(np.percentile(lvsi.values(),75)-np.percentile(lvsi.values(),25))
|
apache-2.0
|
marqh/cartopy
|
lib/cartopy/tests/mpl/test_img_transform.py
|
1
|
5556
|
# (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
import operator
import unittest
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison as mpl_image_comparison
import numpy
import cartopy.crs as ccrs
import cartopy.img_transform
class TestRegrid(unittest.TestCase):
def test_array_dims(self):
# Source data
source_nx = 100
source_ny = 100
source_x = numpy.linspace(-180.0, 180.0, source_nx).astype(numpy.float64)
source_y = numpy.linspace(-90, 90.0, source_ny).astype(numpy.float64)
source_x, source_y = numpy.meshgrid(source_x, source_y)
data = numpy.arange(source_nx * source_ny,
dtype=numpy.int32).reshape(source_ny, source_nx)
source_cs = ccrs.Geodetic()
# Target grid
target_nx = 23
target_ny = 45
target_proj = ccrs.PlateCarree()
target_x, target_y, extent = cartopy.img_transform.mesh_projection(target_proj,
target_nx,
target_ny)
# Perform regrid
new_array = cartopy.img_transform.regrid(data, source_x, source_y, source_cs,
target_proj, target_x, target_y)
# Check dimensions of return array
self.assertEqual(new_array.shape, target_x.shape)
self.assertEqual(new_array.shape, target_y.shape)
self.assertEqual(new_array.shape, (target_ny, target_nx))
def test_different_dims(self):
# Source data
source_nx = 100
source_ny = 100
source_x = numpy.linspace(-180.0, 180.0, source_nx).astype(numpy.float64)
source_y = numpy.linspace(-90, 90.0, source_ny).astype(numpy.float64)
source_x, source_y = numpy.meshgrid(source_x, source_y)
data = numpy.arange(source_nx * source_ny,
dtype=numpy.int32).reshape(source_ny, source_nx)
source_cs = ccrs.Geodetic()
# Target grids (different shapes)
target_x_shape = (23, 45)
target_y_shape = (23, 44)
target_x = numpy.arange(reduce(operator.mul, target_x_shape)).reshape(target_x_shape).astype(numpy.float64)
target_y = numpy.arange(reduce(operator.mul, target_y_shape)).reshape(target_y_shape).astype(numpy.float64)
target_proj = ccrs.PlateCarree()
# Attempt regrid
with self.assertRaises(ValueError):
new_array = cartopy.img_transform.regrid(data, source_x, source_y, source_cs,
target_proj, target_x, target_y)
def image_comparison(baseline_images=None, extensions=('png', ), tol=1e-8):
# changes the mpl default to only use PNGs, and increases the tolerance to be highly rigorous.
return mpl_image_comparison(baseline_images, extensions, tol)
@image_comparison(baseline_images=['regrid_blue_marble'])
def test_regrid_blue_marble_img():
# Source data
filename = '/data/local/dataZoo/cartography/raster/blue_marble_720_360.png'
nx = 720
ny = 360
source_proj = ccrs.PlateCarree()
source_x, source_y, source_extent = cartopy.img_transform.mesh_projection(source_proj, nx, ny)
data = plt.imread(filename)
# Flip vertically to match source_x/source_y orientation
data = data[::-1]
# Target grid
target_nx = 300
target_ny = 300
target_proj = ccrs.InterruptedGoodeHomolosine()
target_x, target_y, target_extent = cartopy.img_transform.mesh_projection(target_proj,
target_nx,
target_ny)
# Perform regrid
new_array = cartopy.img_transform.regrid(data, source_x, source_y, source_proj,
target_proj, target_x, target_y)
# Plot
fig = plt.figure(figsize=(10, 10))
gs = matplotlib.gridspec.GridSpec(nrows=4, ncols=1, hspace=1.5, wspace=0.5)
# Set up axes and title
ax = plt.subplot(gs[0], frameon=False, projection=target_proj)
plt.imshow(new_array, origin='lower', extent=target_extent)
ax.coastlines()
# Plot each colour slice (tests masking)
cmaps = {'red': 'Reds', 'green': 'Greens', 'blue': 'Blues'}
for i, colour in enumerate(['red', 'green', 'blue']):
ax = plt.subplot(gs[i + 1], frameon=False, projection=target_proj)
ax.set_title(colour)
plt.pcolormesh(target_x[0, :], target_y[:, 0], new_array[:, :, i],
cmap=cmaps[colour])
ax.coastlines()
# Tighten up layout
gs.tight_layout(plt.gcf())
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False)
|
gpl-3.0
|
softwaresaved/fat
|
lowfat/management/commands/loadoldfunds.py
|
2
|
5033
|
import pandas as pd
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management.base import BaseCommand
from lowfat.models import Claimant, Fund, Expense
def conv_date(new_date):
day, month, year = new_date.split('/')
return "{}-{}-{}".format(year, month, day)
class Command(BaseCommand):
help = "Import CSV (old_funds.csv) with funds from claimants to the database."
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='old_funds.csv')
# pylint: disable=too-many-branches,too-many-locals
def handle(self, *args, **options):
data = pd.read_csv(options['csv'])
for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable
try:
if pd.notnull(line["Forename(s)"]): # Looking for missing information.
this_claimant = Claimant.objects.get(forenames=line["Forename(s)"], surname=line["Surname"], fellow=True)
if line['Event type'] == 'Attending a conference/workshop':
fund_category = 'A'
elif line['Event type'] == ' Organising a workshop (e.g. Software Carpentry)':
fund_category = 'H'
elif line['Event type'] == 'Policy related event':
fund_category = 'P'
else:
fund_category = 'O'
funds_dict = {
"claimant": this_claimant,
"category": fund_category,
"name": line["Event name"],
"url": line["Event website"] if pd.notnull(line["Event website"]) else "",
"country": line["Event Country"] if len(line["Event Country"]) == 2 else 'GB',
"city": line["Event City"],
"start_date": conv_date(line["Start date"]),
"end_date": conv_date(line["End date"]) if line["End date"] else conv_date(line["Start date"]),
"budget_request_travel": line["Travel costs"] if pd.notnull(line["Travel costs"]) else 0,
"budget_request_attendance_fees": line["Conference/Workshop attendance fees"] if pd.notnull(line["Conference/Workshop attendance fees"]) else 0,
"budget_request_subsistence_cost": line["Subsistence costs"] if pd.notnull(line["Subsistence costs"]) else 0,
"budget_request_venue_hire": line["Venue hire"] if pd.notnull(line["Venue hire"]) else 0,
"budget_request_catering": line["Catering"] if pd.notnull(line["Catering"]) else 0,
"budget_request_others": line["Other costs"] if pd.notnull(line["Other costs"]) else 0,
"budget_approved": line["Estimate"] if pd.notnull(line["Estimate"]) else 0,
"justification": line["How is the event relevant to the work of the Software Sustainability Institute?"],
"notes_from_admin": "{}\n{}\n{}".format(
line["Notes A"] if pd.notnull(line["Notes A"]) else "",
line["Notes B"] if pd.notnull(line["Notes B"]) else "",
line["Notes C"] if pd.notnull(line["Notes C"]) else "")
}
fund = Fund(**funds_dict)
fund.save()
if pd.notnull(line["Submitted"]):
fund.ad_status = 'V'
fund.status = "F"
fund.save()
else:
continue
if pd.notnull(line["Revised estimate"]):
amount_claimed = line["Revised estimate"] if pd.notnull(line["Revised estimate"]) else 0
else:
amount_claimed = line["Submitted"] if pd.notnull(line["Submitted"]) else 0
expense_dict = {
"fund": fund,
"amount_claimed": amount_claimed,
"received_date": '0001-01-01',
}
with open("upload/expenses/missing-document-template.pdf", "rb") as fake_file:
expense_dict.update({
"claim": SimpleUploadedFile('missing-proof-for-{}.txt'.format(index), fake_file.read()),
})
expense = Expense(**expense_dict)
expense.save()
if pd.notnull(line["Submitted"]):
expense.funds_from = line["Type"] if pd.notnull(line["Type"]) else 'C'
expense.status = 'F'
expense.amount_authorized_for_payment = line["Revised estimate"]
expense.save()
fund.status = 'F'
fund.save()
except BaseException as exception:
print("Error: {}\n\t{}".format(exception, line))
|
bsd-3-clause
|
Aidan-Bharath/code_and_stuffs
|
sigma_profiles.py
|
1
|
3320
|
from __future__ import division
from panelCut import *
import matplotlib as mpl
def magnitude(a):
mag = np.sqrt(a['u']**2+a['v']**2+a['w']**2)
return mag
def sigma_profile(p1,p2,adcp,gap,agap,timestart,timestop):
"""
y = adcp.minor_axis[:]
p3 = magnitude(p3)
p1 = p1[::gap]
p2 = p2[::gap]
l = len(p1.iloc[:])
a = len(p3.iloc[:])
for i in xrange(l):
print i
plt.plot(p1.iloc[i],p2.iloc[i],color =(i/l,0.2,np.abs(i-l)/l,0.5))
for i in xrange(a):
print i
plt.plot(p3.iloc[i],y,color = (0,0,0,1))
plt.title('Sigma Layer Profiles')
plt.show()
"""
y = adcp.minor_axis[:]
adcp = magnitude(adcp)
p1 = p1[::gap]
p2 = p2[::gap]
adcp = adcp[::agap]
l = len(p1.iloc[:])
a = len(adcp.iloc[:])
min, max = (0,l)
step = 30
# Setting up a colormap that's a simple transtion for probe
mymap = mpl.colors.LinearSegmentedColormap.from_list('mycolors',['blue','red'])
Z = [[0,0],[0,0]]
levels = range(min,max+step,step)
CS3 = plt.contourf(Z, levels, cmap=mymap)
plt.clf()
amin, amax = (0,a)
astep = 1
# Setting up a colormap that's a simple transtion for adcps
mymapa = mpl.colors.LinearSegmentedColormap.from_list('mycolors',['blue','red'])
Z = [[0,0],[0,0]]
levels = range(amin,amax+astep,astep)
CS2 = plt.contourf(Z, levels, cmap=mymapa)
plt.clf()
x = np.asarray(p1.iloc[:])
z = np.asarray(p2.iloc[:])
c = np.asarray(adcp.iloc[:])
Z = np.linspace(0,l,l)
A = np.linspace(0,a,a)
r = (Z)/(max)
g = 0
b = 1-r
ra = (A)/(amax)
ga = 0
ba = 1-ra
fig,ax = plt.subplots()
for i in xrange(len(Z)):
ax.plot(x[i],z[i],color=(r[i],g,b[i],0.3))
for i in xrange(len(A)):
ax.plot(c[i],y,color = (ra[i],ga,ba[i],1),linewidth=4.0)
plt.title('Decelerating Ebb Velocity Profiles between '+timestart+' and '+timestop)
plt.xlabel('Velocity (m/s)')
plt.ylabel('Depth (m)')
cbar = plt.colorbar(CS2)
cbar.ax.get_yaxis().set_ticks([])
#for j, lab in enumerate(['$0$','$1$','$2$','$>3$']):
# cbar.ax.text(.5, (2 * j + 1) / 8.0, lab, ha='center', va='baseline')
cbar.ax.get_yaxis().labelpad = 15
cbar.ax.set_ylabel('Timestep Increase', rotation=270)
plt.show()
return p1
if __name__ == "__main__":
adcp = '/home/aidan/thesis/probe_data/panels/2013/june_july/GP-130620-BPb_vel'
el = '/home/aidan/thesis/probe_data/panels/2013/june_july/GP-130621-24-N2b_els'
vel = '/home/aidan/thesis/probe_data/panels/2013/june_july/GP-130621-24-N2b_vels'
adcp = pd.read_pickle(adcp)
el = pd.read_pickle(el)
vel = pd.read_pickle(vel)
timestart = '2013-06-21 15:00:00'
timestop = '2013-06-21 18:00:00'
average = ['2013-06-21 15:40:00','2013-06-21 15:42:00']
print adcp.major_xs(adcp.major_axis[1])['u']
adcp = timeslice(adcp,timestart,timestop)
el = timeslice(el,timestart,timestop)
vel = timeslice(vel,timestart,timestop)
s1,a1 = findtime(el,vel,average[0])
s2,a2 = findtime(el,vel,average[1])
a1,_ = findtime(adcp,adcp,average[0])
a2,_ = findtime(adcp,adcp,average[1])
gap = rolling_int(el,s1,s2)
agap = rolling_int(adcp,a1,a2)
sigma_profile(vel,el,adcp,gap,1,timestart,timestop)
|
mit
|
neutrons/Licorne-Py
|
licorne/layerplot.py
|
1
|
6557
|
from __future__ import (absolute_import, division, print_function)
from PyQt5 import QtCore, QtWidgets
import numpy as np
import copy
from licorne.layer import Layer
from licorne.generateSublayers import generateSublayers
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.cm
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
class layerplot(QtWidgets.QWidget):
def __init__(self, *args):
QtWidgets.QWidget.__init__(self, *args)
self.sample=[Layer(thickness=np.inf),Layer(thickness=np.inf)]
self.setLayout(QtWidgets.QVBoxLayout())
function_options=['NSLD_REAL','NSLD_IMAGINARY','MSLD_RHO','MSLD_THETA','MSLD_PHI','ROUGHNESS']
self.combo = QtWidgets.QComboBox(self)
for f in function_options:
self.combo.addItem(f)
self.function='NSLD_REAL'
self.paintwidget=QtWidgets.QWidget(self)
self.paintwidget.setMinimumSize(450,350)
self.canvas = PlotCanvas(self.sample, self.function,self.paintwidget)
self.hlayout=QtWidgets.QHBoxLayout()
self.hlayout.addStretch(1)
self.hlayout.addWidget(self.combo)
self.hlayout.addStretch(1)
self.layout().addLayout(self.hlayout)
self.layout().addWidget(self.paintwidget)
self.combo.activated[str].connect(self.functionSelected)
def resizeEvent(self, event):
self.canvas.setGeometry(self.paintwidget.rect())
def updateSample(self,newsamplemodel):
ll=copy.deepcopy(newsamplemodel.layers)
ll.append(newsamplemodel.substrate)
ll.insert(0,newsamplemodel.incoming_media)
self.sample=ll
self.canvas.updateLF(self.sample,self.function)
def functionSelected(self,text):
self.function=text
self.canvas.updateLF(self.sample,self.function)
class PlotCanvas(FigureCanvas):
selectionChanged=QtCore.pyqtSignal(int)
def __init__(self, layers, function, parent=None):
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
self.fig.patch.set_facecolor('white')
self.corresponding=[]
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.updateLF(layers,function)
self.fig.canvas.mpl_connect('pick_event', self.onpick)
def updateLF(self,newlayers,newfunction):
self.data=newlayers
self.variable=newfunction
self.plot()
def onpick(self,event):
ind=event.ind[0]
layer_ind=self.corresponding[ind]
self.selectionChanged.emit(layer_ind)
# for debug purposes
#if layer_ind==len(self.data)-1:
# layer_ind='substrate'
#if layer_ind==0:
# layer_ind='incoming media'
#print('picked layer {0}'.format(layer_ind))
return True
def plot(self):
sublayers,self.corresponding=plot_sublayers(self.ax, self.data, parameter=self.variable)
self.fig.tight_layout()
self.draw()
def plot_sublayers(ax, layers, parameter='NSLD_REAL'):
sublayers,corresponding=generateSublayers(layers)
thick=[sl.thickness.value for sl in sublayers]
depth=np.array(thick)
try:
thic_kmax = depth[np.isfinite(depth)].max()
except ValueError:
# all layers are infinite (maybe just incoming media and substrate)
thic_kmax = 1
depth[np.isinf(depth)]=thic_kmax
th1 = depth[corresponding.index(1)]
depth = depth.cumsum()
depth -= depth[corresponding.index(1)]-th1
depth = np.insert(depth,0,depth[0]-thic_kmax)
ax.clear()
if parameter == 'NSLD_REAL':
val=np.array([sl.nsld_real.value for sl in sublayers])
elif parameter == 'NSLD_IMAGINARY':
val=np.array([sl.nsld_imaginary.value for sl in sublayers])
elif parameter == 'MSLD_RHO':
val=np.array([sl.msld.rho.value for sl in sublayers])
elif parameter == 'MSLD_THETA':
val=np.array([sl.msld.theta.value for sl in sublayers])
elif parameter == 'MSLD_PHI':
val=np.array([sl.msld.phi.value for sl in sublayers])
elif parameter == 'ROUGHNESS':
val=np.array([layer.roughness.value for layer in layers[1:]])
else:
raise ValueError('The variable to be plotted could not be found')
if parameter != 'ROUGHNESS':
ax.plot(depth[1:],val,visible=False)
ax.plot([-1],[0.],visible=False)
patches=[]
for i,v in enumerate(val):
polygon=Polygon([[depth[i],0.],[depth[i],v],[depth[i+1],v],[depth[i+1],0]],True)
patches.append(polygon)
xmin,xmax=ax.get_xlim()
patches[0]=Polygon([[xmin,0.],[xmin,val[0]],[depth[1],val[0]],[depth[1],0]],True)
patches[-1]=Polygon([[depth[-2],0.],[depth[-2],val[-1]],[xmax,val[-1]],[xmax,0]],True)
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4, picker=True)
p.set_array(np.array(corresponding))
ax.add_collection(p)
ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
if parameter== 'ROUGHNESS':
ax.plot([-1,depth[1],depth[-1]],[0,np.max(val),np.min(val)],visible=False)
xmin, xmax = ax.get_xlim()
lthick = [l.thickness.value for l in layers[1:]]
lthick.insert(0,0.)
lthick = np.array(lthick[:-1])
depth = lthick.cumsum()
ax.stem(depth,val,linefmt='--')
ax.set_xlim(xmin,xmax)
ax.axhline(y=0)
ax.set_xlabel('Depth')
ax.set_ylabel(parameter.replace('_', ' '))
return sublayers, corresponding
if __name__ == '__main__':
# This is for testing purposes only
import sys
app=QtWidgets.QApplication(sys.argv)
mainForm=layerplot()
from licorne.layer import RoughnessModel
from licorne.SampleModel import SampleModel
newSample=SampleModel()
newSample.incoming_media=Layer(thickness=np.inf,nsld_real=1.5)
newSample.layers=[Layer(thickness=20.,nsld_real=1.),
Layer(thickness=25.,nsld_real=3.,roughness=5, roughness_model=RoughnessModel.TANH,sublayers=7),
Layer(thickness=30.,nsld_real=5.,msld_rho=7e-7,roughness=3, roughness_model=RoughnessModel.TANH,sublayers=7)]
newSample.substrate=Layer(nsld_real=4.,thickness=np.inf)
mainForm.updateSample(newSample)
mainForm.show()
sys.exit(app.exec_())
|
gpl-3.0
|
crawfordsm/pysalt
|
saltfirst/FpParallWidget.py
|
2
|
9144
|
import numpy as np
import os, errno
from PyQt4 import QtGui,QtCore
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg
from saltgui import MplCanvas
class FpParallWidget (QtGui.QWidget):
def __init__(self,parent=None):
super(FpParallWidget,self).__init__(parent)
#Load up the data:
self.loadOutparams()
#set up the file range panel
self.rangepanel=QtGui.QWidget()
# add a label:
self.FromLabel = QtGui.QLabel("From file number:")
self.ToLabel = QtGui.QLabel("To file number:")
#add the name of the file
self.FromValueLabel = QtGui.QLineEdit(str(min(self.outparams[:,0])))
self.ToValueLabel = QtGui.QLineEdit(str(max(self.outparams[:,0])))
# and a button to process the new range
self.refreshButton = QtGui.QPushButton('Refresh')
self.refreshButton.clicked.connect(self.plotOutparams)
#set up file range panel layout
rangeLayout=QtGui.QGridLayout(self.rangepanel)
rangeLayout.addWidget(self.FromLabel,0,0,1,1)
rangeLayout.addWidget(self.FromValueLabel,0,1,1,1)
rangeLayout.addWidget(self.refreshButton,0,2,2,1)
rangeLayout.addWidget(self.ToLabel,0,3,1,1)
rangeLayout.addWidget(self.ToValueLabel,0,4,1,1)
#add the radio buttons for the choice of x axis...
self.radioFilenumber= QtGui.QRadioButton("Plot vs Filenumber")
self.radioX= QtGui.QRadioButton("Plot vs etalon X")
self.radioY= QtGui.QRadioButton("Plot vs etalon Y")
#create a gropu for them:
self.radioGroupX=QtGui.QButtonGroup()
self.radioGroupX.addButton(self.radioFilenumber)
self.radioGroupX.addButton(self.radioX)
self.radioGroupX.addButton(self.radioY)
#make sure the filenumber is the default
self.radioFilenumber.setChecked(True)
#create radio buttons for the choice of y axis:
self.radioFWHM=QtGui.QRadioButton("Plots vs FWHM")
self.radioAmp=QtGui.QRadioButton("Plots vs Amplitude")
#add a group for the y axis:
self.radioGroupY=QtGui.QButtonGroup()
self.radioGroupY.addButton(self.radioFWHM)
self.radioGroupY.addButton(self.radioAmp)
#add a default:
self.radioFWHM.setChecked(True)
# display best fit in range:
self.fitpanel=QtGui.QWidget()
self.fitLabel = QtGui.QLabel("Lowest FWHM in file range:")
self.cleanOutparams()
self.getBestparams()
fitFileresult="File number: %i" %int(self.bestparams[0])
fitXresult="X: %i" % int(self.bestparams[1])
fitYresult="Y: %i" % int(self.bestparams[2])
fitZresult="Z: %i " % int(self.bestparams[3])
fitRresult="R: %.1f" % float(self.bestparams[4])
fitAmpresult="Amplitude: %.1f" % float(self.bestparams[5])
fitRmsresult="RMS: %.3f" % float(self.bestparams[6])
fitGammaresult="Gamma: %.2f" % float(self.bestparams[7])
fitFWHMresult="FWHM: %.3f" % float(self.bestparams[8])
#add the text to the fit results panel
self.fitFile = QtGui.QLabel(fitFileresult)
self.fitX = QtGui.QLabel(fitXresult)
self.fitY = QtGui.QLabel(fitYresult)
self.fitZ = QtGui.QLabel(fitZresult)
self.fitR = QtGui.QLabel(fitRresult)
self.fitAmp = QtGui.QLabel(fitAmpresult)
self.fitRms = QtGui.QLabel(fitRmsresult)
self.fitGamma = QtGui.QLabel(fitGammaresult)
self.fitFWHM = QtGui.QLabel(fitFWHMresult)
# lay them out nicely...
fitLayout=QtGui.QGridLayout(self.fitpanel)
fitLayout.addWidget(self.fitLabel,0,0,1,4)
fitLayout.addWidget(self.fitFile,3,0,1,1)
fitLayout.addWidget(self.fitX,3,1,1,1)
fitLayout.addWidget(self.fitY,3,2,1,1)
fitLayout.addWidget(self.fitZ,3,3,1,1)
fitLayout.addWidget(self.fitR,3,4,1,1)
fitLayout.addWidget(self.fitAmp,3,5,1,1)
fitLayout.addWidget(self.fitRms,3,6,1,1)
fitLayout.addWidget(self.fitGamma,3,7,1,1)
fitLayout.addWidget(self.fitFWHM,3,8,1,1)
#set up the fwhm plot
self.fwhmplot=MplCanvas()
self.fwhmaxes=self.fwhmplot.figure.add_subplot(111)
#connect mouse clicks
self.fwhmplot.mpl_connect('button_press_event',self.onClick)
#and now we know what the X and Y axis should be, make the fwhm/amp plot
self.plotOutparams()
# and check for radio button event signals!
self.radioGroupX.buttonClicked.connect(self.plotOutparams)
self.radioGroupY.buttonClicked.connect(self.plotOutparams)
#Add the X radio buttons to a horizontal layout
self.radiopanel= QtGui.QWidget()
radioLayout=QtGui.QHBoxLayout(self.radiopanel)
radioLayout.addWidget(self.radioFilenumber)
radioLayout.addWidget(self.radioX)
radioLayout.addWidget(self.radioY)
#Add the Y radio buttons to a vertical layout
self.radioYpanel=QtGui.QWidget()
radioYLayout=QtGui.QVBoxLayout(self.radioYpanel)
radioYLayout.addWidget(self.radioFWHM)
radioYLayout.addWidget(self.radioAmp)
# Set up the main layout
mainLayout = QtGui.QGridLayout()
mainLayout.addWidget(self.rangepanel,0,0,1,9)
mainLayout.addWidget(self.fitpanel,1,0,1,9)
mainLayout.addWidget(self.fwhmplot,2,0,1,4)
mainLayout.addWidget(self.radioYpanel,2,5,1,1)
mainLayout.addWidget(self.radiopanel,3,1,1,1)
self.setLayout(mainLayout)
def loadOutparams(self):
self.outparams=np.genfromtxt('outparams', skip_header=1)
return
def cleanOutparams(self):
minFile=float(self.FromValueLabel.text())
maxFile=float(self.ToValueLabel.text())
# print "reloading from %i to %i" % (minFile, maxFile)
self.cleanarr=[]
mask = (minFile <= self.outparams[:,0]) * (self.outparams[:,0] <= maxFile)
self.cleanarr = self.outparams[mask]
# print self.cleanarr[:,0]
return
def plotOutparams(self):
#reload outparams
self.loadOutparams()
#set up the plot....
self.cleanOutparams()
self.fwhmaxes.clear()
if self.radioFilenumber.isChecked():
x=self.cleanarr[:,0]
elif self.radioX.isChecked():
x=self.cleanarr[:,1]
elif self.radioY.isChecked():
x=self.cleanarr[:,2]
# Work out the Y axis:
if self.radioFWHM.isChecked():
y=self.cleanarr[:,8]
elif self.radioAmp.isChecked():
y=self.cleanarr[:,5]
self.fwhmaxes.plot(x, y, 'bo')
# self.show()
# don't forget to force a redraw!
self.fwhmplot.draw()
#ummmm we forgot to update the best fit..
self.getBestparams()
self.fitFile.setText("File number: %i" %int(self.bestparams[0]))
self.fitX.setText("X: %i" % int(self.bestparams[1]))
self.fitX.setText("X: %i" % int(self.bestparams[1]))
self.fitY.setText("Y %i:" % int(self.bestparams[2]))
self.fitZ.setText("Z: %i " % int(self.bestparams[3]))
self.fitR.setText("R: %.1f" % float(self.bestparams[4]))
self.fitAmp.setText("Amplitude: %.1f" % float(self.bestparams[5]))
self.fitRms.setText("RMS: %.2f" % float(self.bestparams[6]))
self.fitGamma.setText("Gamma: %.2f" % float(self.bestparams[7]))
self.fitFWHM.setText("FWHM: %.3f" % float(self.bestparams[8]))
# self.fitpanel.show()
return
def onClick(self,event):
# What's on the X axis?
if self.radioFilenumber.isChecked():
mask = (self.cleanarr[:,0]==round(event.xdata))
elif self.radioX.isChecked():
mask = (self.cleanarr[:,1]==round(event.xdata))
elif self.radioY.isChecked():
mask = (self.cleanarr[:,2]==round(event.xdata))
# get from the array the first row that matches the X value)
datapoint = self.cleanarr[mask][0]
#format it ready for the tooltip:
text="FileNumber: %i, \nX: %i, \nY: %i, \nZ:%i, \nAmp: %.2f, \nRMS: %.2f, \nGamma: %.2f, \nFWHM: %.3f" % (int(datapoint[0]), int(datapoint[1]),int(datapoint[2]),int(datapoint[3]),datapoint[5],datapoint[6],datapoint[7],datapoint[8])
#and plonk it on! :)
QtGui.QToolTip.showText(QtCore.QPoint(338,314),text)
return
def getBestparams(self):
if self.radioFWHM.isChecked():
self.fitLabel.setText("Lowest FWHM in file range:")
mask = (self.cleanarr[:,8]==min(self.cleanarr[:,8]))
self.bestparams = self.cleanarr[mask][0]
elif self.radioAmp.isChecked():
self.fitLabel.setText("Highest Amplitude in file range:")
mask = (self.cleanarr[:,5]==max(self.cleanarr[:,5]))
self.bestparams = self.cleanarr[mask][0]
return
|
bsd-3-clause
|
chandlercr/aima-python
|
submissions/Hawley/kmeans.py
|
13
|
1586
|
import numpy as np
import matplotlib.pyplot as plt
# import fileinput
N = 100 # number of observations / 'points'
K = 4 # number of categories / 'means'
P = 10 # plot interval
def distance(x1,y1,x2,y2): # pythagorean distance
return np.sqrt( (x2-x1)**2 + (y2-y1)**2)
# Fancy data structure: We will group points by common indices in separate arrays,
# i.e. the first point will have coordinates (x[1],y[1])
points_x = np.random.rand(N) # points are random on [0,1]
points_y = np.random.rand(N)
colors = np.random.rand(N).astype(int) # colors will show who belongs to which mean
means_x = np.random.rand(K) # initialize means w/ random numbers on [0,1]
means_y = np.random.rand(K)
fig = plt.figure()
iterations = 100
for i in range(iterations):
# loop over all points: figure out who belongs to which means (assign colors)
for j in range(N):
min_dist = 99999.9 # big number
for m in range(K): # loop over all means
dist = distance(points_x[j], points_y[j], means_x[m], means_y[m])
if (dist < min_dist): # then update the color
min_dist = dist
colors[j] = m
#re-evaluate means
for m in range(K):
inds = np.where( m == colors) # indices of everybody belonging to one mean
means_x[m] = np.mean(points_x[inds]) # take the mean of the x-values in the group
means_y[m] = np.mean(points_y[inds]) # take the mean of the y-values in the group
# Update the picture
if(not i % P):
plt.scatter(points_x, points_y, c=colors, s=50, alpha=0.7)
plt.show()
# print('Proceed', '?')
# proceed = fileinput.input()
|
mit
|
itaiin/arrow
|
python/pyarrow/tests/test_table.py
|
1
|
26230
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict, Iterable
import pickle
import numpy as np
import pytest
import pyarrow as pa
def test_chunked_array_basics():
data = pa.chunked_array([], type=pa.string())
assert data.type == pa.string()
assert data.to_pylist() == []
with pytest.raises(ValueError):
pa.chunked_array([])
data = pa.chunked_array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
assert isinstance(data.chunks, list)
assert all(isinstance(c, pa.lib.Int64Array) for c in data.chunks)
assert all(isinstance(c, pa.lib.Int64Array) for c in data.iterchunks())
assert len(data.chunks) == 3
def test_chunked_array_str():
data = [
pa.array([1, 2, 3]),
pa.array([4, 5, 6])
]
data = pa.chunked_array(data)
assert str(data) == """[
[
1,
2,
3
],
[
4,
5,
6
]
]"""
def test_chunked_array_getitem():
data = [
pa.array([1, 2, 3]),
pa.array([4, 5, 6])
]
data = pa.chunked_array(data)
assert data[1].as_py() == 2
assert data[-1].as_py() == 6
assert data[-6].as_py() == 1
with pytest.raises(IndexError):
data[6]
with pytest.raises(IndexError):
data[-7]
data_slice = data[2:4]
assert data_slice.to_pylist() == [3, 4]
data_slice = data[4:-1]
assert data_slice.to_pylist() == [5]
data_slice = data[99:99]
assert data_slice.type == data.type
assert data_slice.to_pylist() == []
def test_chunked_array_iter():
data = [
pa.array([0]),
pa.array([1, 2, 3]),
pa.array([4, 5, 6]),
pa.array([7, 8, 9])
]
arr = pa.chunked_array(data)
for i, j in zip(range(10), arr):
assert i == j
assert isinstance(arr, Iterable)
def test_chunked_array_equals():
def eq(xarrs, yarrs):
if isinstance(xarrs, pa.ChunkedArray):
x = xarrs
else:
x = pa.chunked_array(xarrs)
if isinstance(yarrs, pa.ChunkedArray):
y = yarrs
else:
y = pa.chunked_array(yarrs)
assert x.equals(y)
assert y.equals(x)
assert x == y
assert x != str(y)
def ne(xarrs, yarrs):
if isinstance(xarrs, pa.ChunkedArray):
x = xarrs
else:
x = pa.chunked_array(xarrs)
if isinstance(yarrs, pa.ChunkedArray):
y = yarrs
else:
y = pa.chunked_array(yarrs)
assert not x.equals(y)
assert not y.equals(x)
assert x != y
eq(pa.chunked_array([], type=pa.int32()),
pa.chunked_array([], type=pa.int32()))
ne(pa.chunked_array([], type=pa.int32()),
pa.chunked_array([], type=pa.int64()))
a = pa.array([0, 2], type=pa.int32())
b = pa.array([0, 2], type=pa.int64())
c = pa.array([0, 3], type=pa.int32())
d = pa.array([0, 2, 0, 3], type=pa.int32())
eq([a], [a])
ne([a], [b])
eq([a, c], [a, c])
eq([a, c], [d])
ne([c, a], [a, c])
assert not pa.chunked_array([], type=pa.int32()).equals(None)
@pytest.mark.parametrize(
('data', 'typ'),
[
([True, False, True, True], pa.bool_()),
([1, 2, 4, 6], pa.int64()),
([1.0, 2.5, None], pa.float64()),
(['a', None, 'b'], pa.string()),
([], pa.list_(pa.uint8())),
([[1, 2], [3]], pa.list_(pa.int64())),
([['a'], None, ['b', 'c']], pa.list_(pa.string())),
([(1, 'a'), (2, 'c'), None],
pa.struct([pa.field('a', pa.int64()), pa.field('b', pa.string())]))
]
)
def test_chunked_array_pickle(data, typ):
arrays = []
while data:
arrays.append(pa.array(data[:2], type=typ))
data = data[2:]
array = pa.chunked_array(arrays, type=typ)
result = pickle.loads(pickle.dumps(array))
assert result.equals(array)
@pytest.mark.pandas
def test_chunked_array_to_pandas():
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
chunked_arr = table.column(0).data
assert isinstance(chunked_arr, pa.ChunkedArray)
array = chunked_arr.to_pandas()
assert array.shape == (5,)
assert array[0] == -10
def test_chunked_array_asarray():
data = [
pa.array([0]),
pa.array([1, 2, 3])
]
chunked_arr = pa.chunked_array(data)
np_arr = np.asarray(chunked_arr)
assert np_arr.tolist() == [0, 1, 2, 3]
assert np_arr.dtype == np.dtype('int64')
# An optional type can be specified when calling np.asarray
np_arr = np.asarray(chunked_arr, dtype='str')
assert np_arr.tolist() == ['0', '1', '2', '3']
# Types are modified when there are nulls
data = [
pa.array([1, None]),
pa.array([1, 2, 3])
]
chunked_arr = pa.chunked_array(data)
np_arr = np.asarray(chunked_arr)
elements = np_arr.tolist()
assert elements[0] == 1.
assert np.isnan(elements[1])
assert elements[2:] == [1., 2., 3.]
assert np_arr.dtype == np.dtype('float64')
def test_column_basics():
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
column = table.column(0)
assert column.name == 'a'
assert column.length() == 5
assert len(column) == 5
assert column.shape == (5,)
assert column.to_pylist() == [-10, -5, 0, 5, 10]
assert column == pa.Column.from_array("a", column.data)
assert column != pa.Column.from_array("b", column.data)
assert column != column.data
assert not column.equals(None)
def test_column_factory_function():
# ARROW-1575
arr = pa.array([0, 1, 2, 3, 4])
arr2 = pa.array([5, 6, 7, 8])
col1 = pa.Column.from_array('foo', arr)
col2 = pa.Column.from_array(pa.field('foo', arr.type), arr)
assert col1.equals(col2)
col3 = pa.column('foo', [arr, arr2])
chunked_arr = pa.chunked_array([arr, arr2])
col4 = pa.column('foo', chunked_arr)
assert col3.equals(col4)
col5 = pa.column('foo', arr.to_pandas())
assert col5.equals(pa.column('foo', arr))
# Type mismatch
with pytest.raises(ValueError):
pa.Column.from_array(pa.field('foo', pa.string()), arr)
def test_column_pickle():
arr = pa.chunked_array([[1, 2], [5, 6, 7]], type=pa.int16())
field = pa.field("ints", pa.int16()).add_metadata({b"foo": b"bar"})
col = pa.column(field, arr)
result = pickle.loads(pickle.dumps(col))
assert result.equals(col)
assert result.data.num_chunks == 2
assert result.field == field
@pytest.mark.pandas
def test_column_to_pandas():
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
column = table.column(0)
series = column.to_pandas()
assert series.name == 'a'
assert series.shape == (5,)
assert series.iloc[0] == -10
def test_column_asarray():
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
column = table.column(0)
np_arr = np.asarray(column)
assert np_arr.tolist() == [-10, -5, 0, 5, 10]
assert np_arr.dtype == np.dtype('int64')
# An optional type can be specified when calling np.asarray
np_arr = np.asarray(column, dtype='str')
assert np_arr.tolist() == ['-10', '-5', '0', '5', '10']
def test_column_flatten():
ty = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
col = pa.Column.from_array('foo', a)
x, y = col.flatten()
assert x == pa.column('foo.x', pa.array([1, 3, 5], type=pa.int16()))
assert y == pa.column('foo.y', pa.array([2.5, 4.5, 6.5],
type=pa.float32()))
# Empty column
a = pa.array([], type=ty)
col = pa.Column.from_array('foo', a)
x, y = col.flatten()
assert x == pa.column('foo.x', pa.array([], type=pa.int16()))
assert y == pa.column('foo.y', pa.array([], type=pa.float32()))
def test_column_getitem():
arr = pa.array([1, 2, 3, 4, 5, 6])
col = pa.column('ints', arr)
assert col[1].as_py() == 2
assert col[-1].as_py() == 6
assert col[-6].as_py() == 1
with pytest.raises(IndexError):
col[6]
with pytest.raises(IndexError):
col[-7]
data_slice = col[2:4]
assert data_slice.to_pylist() == [3, 4]
data_slice = col[4:-1]
assert data_slice.to_pylist() == [5]
data_slice = col[99:99]
assert data_slice.type == col.type
assert data_slice.to_pylist() == []
def test_recordbatch_basics():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10])
]
batch = pa.RecordBatch.from_arrays(data, ['c0', 'c1'])
assert not batch.schema.metadata
assert len(batch) == 5
assert batch.num_rows == 5
assert batch.num_columns == len(data)
assert batch.to_pydict() == OrderedDict([
('c0', [0, 1, 2, 3, 4]),
('c1', [-10, -5, 0, 5, 10])
])
with pytest.raises(IndexError):
# bounds checking
batch[2]
# Schema passed explicitly
schema = pa.schema([pa.field('c0', pa.int16()),
pa.field('c1', pa.int32())],
metadata={b'foo': b'bar'})
batch = pa.RecordBatch.from_arrays(data, schema)
assert batch.schema == schema
def test_recordbatch_from_arrays_validate_lengths():
# ARROW-2820
data = [pa.array([1]), pa.array(["tokyo", "like", "happy"]),
pa.array(["derek"])]
with pytest.raises(ValueError):
pa.RecordBatch.from_arrays(data, ['id', 'tags', 'name'])
def test_recordbatch_no_fields():
batch = pa.RecordBatch.from_arrays([], [])
assert len(batch) == 0
assert batch.num_rows == 0
assert batch.num_columns == 0
def test_recordbatch_from_arrays_invalid_names():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10])
]
with pytest.raises(ValueError):
pa.RecordBatch.from_arrays(data, names=['a', 'b', 'c'])
with pytest.raises(ValueError):
pa.RecordBatch.from_arrays(data, names=['a'])
def test_recordbatch_empty_metadata():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10])
]
batch = pa.RecordBatch.from_arrays(data, ['c0', 'c1'])
assert batch.schema.metadata is None
def test_recordbatch_pickle():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10])
]
schema = pa.schema([pa.field('ints', pa.int8()),
pa.field('floats', pa.float32()),
]).add_metadata({b'foo': b'bar'})
batch = pa.RecordBatch.from_arrays(data, schema)
result = pickle.loads(pickle.dumps(batch))
assert result.equals(batch)
assert result.schema == schema
def test_recordbatch_slice_getitem():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10])
]
names = ['c0', 'c1']
batch = pa.RecordBatch.from_arrays(data, names)
sliced = batch.slice(2)
assert sliced.num_rows == 3
expected = pa.RecordBatch.from_arrays(
[x.slice(2) for x in data], names)
assert sliced.equals(expected)
sliced2 = batch.slice(2, 2)
expected2 = pa.RecordBatch.from_arrays(
[x.slice(2, 2) for x in data], names)
assert sliced2.equals(expected2)
# 0 offset
assert batch.slice(0).equals(batch)
# Slice past end of array
assert len(batch.slice(len(batch))) == 0
with pytest.raises(IndexError):
batch.slice(-1)
# Check __getitem__-based slicing
assert batch.slice(0, 0).equals(batch[:0])
assert batch.slice(0, 2).equals(batch[:2])
assert batch.slice(2, 2).equals(batch[2:4])
assert batch.slice(2, len(batch) - 2).equals(batch[2:])
assert batch.slice(len(batch) - 2, 2).equals(batch[-2:])
assert batch.slice(len(batch) - 4, 2).equals(batch[-4:-2])
def test_recordbatchlist_schema_equals():
a1 = np.array([1], dtype='uint32')
a2 = np.array([4.0, 5.0], dtype='float64')
batch1 = pa.RecordBatch.from_arrays([pa.array(a1)], ['c1'])
batch2 = pa.RecordBatch.from_arrays([pa.array(a2)], ['c1'])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_batches([batch1, batch2])
def test_table_equals():
table = pa.Table.from_arrays([])
assert table.equals(table)
# ARROW-4822
assert not table.equals(None)
def test_table_from_batches_and_schema():
schema = pa.schema([
pa.field('a', pa.int64()),
pa.field('b', pa.float64()),
])
batch = pa.RecordBatch.from_arrays([pa.array([1]), pa.array([3.14])],
names=['a', 'b'])
table = pa.Table.from_batches([batch], schema)
assert table.schema.equals(schema)
assert table.column(0) == pa.column('a', pa.array([1]))
assert table.column(1) == pa.column('b', pa.array([3.14]))
incompatible_schema = pa.schema([pa.field('a', pa.int64())])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_batches([batch], incompatible_schema)
incompatible_batch = pa.RecordBatch.from_arrays([pa.array([1])], ['a'])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_batches([incompatible_batch], schema)
@pytest.mark.pandas
def test_table_to_batches():
from pandas.util.testing import assert_frame_equal
import pandas as pd
df1 = pd.DataFrame({'a': list(range(10))})
df2 = pd.DataFrame({'a': list(range(10, 30))})
batch1 = pa.RecordBatch.from_pandas(df1, preserve_index=False)
batch2 = pa.RecordBatch.from_pandas(df2, preserve_index=False)
table = pa.Table.from_batches([batch1, batch2, batch1])
expected_df = pd.concat([df1, df2, df1], ignore_index=True)
batches = table.to_batches()
assert len(batches) == 3
assert_frame_equal(pa.Table.from_batches(batches).to_pandas(),
expected_df)
batches = table.to_batches(chunksize=15)
assert list(map(len, batches)) == [10, 15, 5, 10]
assert_frame_equal(table.to_pandas(), expected_df)
assert_frame_equal(pa.Table.from_batches(batches).to_pandas(),
expected_df)
table_from_iter = pa.Table.from_batches(iter([batch1, batch2, batch1]))
assert table.equals(table_from_iter)
def test_table_basics():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=('a', 'b'))
table._validate()
assert len(table) == 5
assert table.num_rows == 5
assert table.num_columns == 2
assert table.shape == (5, 2)
assert table.to_pydict() == OrderedDict([
('a', [0, 1, 2, 3, 4]),
('b', [-10, -5, 0, 5, 10])
])
columns = []
for col in table.itercolumns():
columns.append(col)
for chunk in col.data.iterchunks():
assert chunk is not None
with pytest.raises(IndexError):
col.data.chunk(-1)
with pytest.raises(IndexError):
col.data.chunk(col.data.num_chunks)
assert table.columns == columns
assert table == pa.Table.from_arrays(columns)
assert table != pa.Table.from_arrays(columns[1:])
assert table != columns
def test_table_from_arrays_preserves_column_metadata():
# Added to test https://issues.apache.org/jira/browse/ARROW-3866
arr0 = pa.array([1, 2])
arr1 = pa.array([3, 4])
field0 = pa.field('field1', pa.int64(), metadata=dict(a="A", b="B"))
field1 = pa.field('field2', pa.int64(), nullable=False)
columns = [
pa.column(field0, arr0),
pa.column(field1, arr1)
]
table = pa.Table.from_arrays(columns)
assert b"a" in table.column(0).field.metadata
assert table.column(1).field.nullable is False
def test_table_from_arrays_invalid_names():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10])
]
with pytest.raises(ValueError):
pa.Table.from_arrays(data, names=['a', 'b', 'c'])
with pytest.raises(ValueError):
pa.Table.from_arrays(data, names=['a'])
def test_table_from_lists_raises():
data = [
list(range(5)),
[-10, -5, 0, 5, 10]
]
with pytest.raises(TypeError):
pa.Table.from_arrays(data, names=['a', 'b'])
schema = pa.schema([
pa.field('a', pa.uint16()),
pa.field('b', pa.int64())
])
with pytest.raises(TypeError):
pa.Table.from_arrays(data, schema=schema)
def test_table_pickle():
data = [
pa.chunked_array([[1, 2], [3, 4]], type=pa.uint32()),
pa.chunked_array([["some", "strings", None, ""]], type=pa.string()),
]
schema = pa.schema([pa.field('ints', pa.uint32()),
pa.field('strs', pa.string())],
metadata={b'foo': b'bar'})
table = pa.Table.from_arrays(data, schema=schema)
result = pickle.loads(pickle.dumps(table))
result._validate()
assert result.equals(table)
def test_table_select_column():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array(range(5, 10))
]
table = pa.Table.from_arrays(data, names=('a', 'b', 'c'))
assert table.column('a').equals(table.column(0))
with pytest.raises(KeyError):
table.column('d')
with pytest.raises(TypeError):
table.column(None)
def test_table_add_column():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array(range(5, 10))
]
table = pa.Table.from_arrays(data, names=('a', 'b', 'c'))
col = pa.Column.from_array('d', data[1])
t2 = table.add_column(3, col)
t3 = table.append_column(col)
expected = pa.Table.from_arrays(data + [data[1]],
names=('a', 'b', 'c', 'd'))
assert t2.equals(expected)
assert t3.equals(expected)
t4 = table.add_column(0, col)
expected = pa.Table.from_arrays([data[1]] + data,
names=('d', 'a', 'b', 'c'))
assert t4.equals(expected)
def test_table_set_column():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array(range(5, 10))
]
table = pa.Table.from_arrays(data, names=('a', 'b', 'c'))
col = pa.Column.from_array('d', data[1])
t2 = table.set_column(0, col)
expected_data = list(data)
expected_data[0] = data[1]
expected = pa.Table.from_arrays(expected_data,
names=('d', 'b', 'c'))
assert t2.equals(expected)
def test_table_drop():
""" drop one or more columns given labels"""
a = pa.array(range(5))
b = pa.array([-10, -5, 0, 5, 10])
c = pa.array(range(5, 10))
table = pa.Table.from_arrays([a, b, c], names=('a', 'b', 'c'))
t2 = table.drop(['a', 'b'])
exp = pa.Table.from_arrays([c], names=('c',))
assert exp.equals(t2)
# -- raise KeyError if column not in Table
with pytest.raises(KeyError, match="Column 'd' not found"):
table.drop(['d'])
def test_table_remove_column():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array(range(5, 10))
]
table = pa.Table.from_arrays(data, names=('a', 'b', 'c'))
t2 = table.remove_column(0)
t2._validate()
expected = pa.Table.from_arrays(data[1:], names=('b', 'c'))
assert t2.equals(expected)
def test_table_remove_column_empty():
# ARROW-1865
data = [
pa.array(range(5)),
]
table = pa.Table.from_arrays(data, names=['a'])
t2 = table.remove_column(0)
t2._validate()
assert len(t2) == len(table)
t3 = t2.add_column(0, table[0])
t3._validate()
assert t3.equals(table)
def test_table_flatten():
ty1 = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
ty2 = pa.struct([pa.field('nest', ty1)])
a = pa.array([(1, 2.5), (3, 4.5)], type=ty1)
b = pa.array([((11, 12.5),), ((13, 14.5),)], type=ty2)
c = pa.array([False, True], type=pa.bool_())
table = pa.Table.from_arrays([a, b, c], names=['a', 'b', 'c'])
t2 = table.flatten()
t2._validate()
expected = pa.Table.from_arrays([
pa.array([1, 3], type=pa.int16()),
pa.array([2.5, 4.5], type=pa.float32()),
pa.array([(11, 12.5), (13, 14.5)], type=ty1),
c],
names=['a.x', 'a.y', 'b.nest', 'c'])
assert t2.equals(expected)
def test_concat_tables():
data = [
list(range(5)),
[-10., -5., 0., 5., 10.]
]
data2 = [
list(range(5, 10)),
[1., 2., 3., 4., 5.]
]
t1 = pa.Table.from_arrays([pa.array(x) for x in data],
names=('a', 'b'))
t2 = pa.Table.from_arrays([pa.array(x) for x in data2],
names=('a', 'b'))
result = pa.concat_tables([t1, t2])
result._validate()
assert len(result) == 10
expected = pa.Table.from_arrays([pa.array(x + y)
for x, y in zip(data, data2)],
names=('a', 'b'))
assert result.equals(expected)
@pytest.mark.pandas
def test_concat_tables_with_different_schema_metadata():
import pandas as pd
schema = pa.schema([
pa.field('a', pa.string()),
pa.field('b', pa.string()),
])
values = list('abcdefgh')
df1 = pd.DataFrame({'a': values, 'b': values})
df2 = pd.DataFrame({'a': [np.nan] * 8, 'b': values})
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert table1.schema.equals(table2.schema, check_metadata=False)
assert not table1.schema.equals(table2.schema, check_metadata=True)
table3 = pa.concat_tables([table1, table2])
assert table1.schema.equals(table3.schema, check_metadata=True)
assert table2.schema.equals(table3.schema, check_metadata=False)
def test_table_negative_indexing():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
pa.array([1.0, 2.0, 3.0, 4.0, 5.0]),
pa.array(['ab', 'bc', 'cd', 'de', 'ef']),
]
table = pa.Table.from_arrays(data, names=tuple('abcd'))
assert table[-1].equals(table[3])
assert table[-2].equals(table[2])
assert table[-3].equals(table[1])
assert table[-4].equals(table[0])
with pytest.raises(IndexError):
table[-5]
with pytest.raises(IndexError):
table[4]
def test_table_cast_to_incompatible_schema():
data = [
pa.array(range(5)),
pa.array([-10, -5, 0, 5, 10]),
]
table = pa.Table.from_arrays(data, names=tuple('ab'))
target_schema1 = pa.schema([
pa.field('A', pa.int32()),
pa.field('b', pa.int16()),
])
target_schema2 = pa.schema([
pa.field('a', pa.int32()),
])
message = ("Target schema's field names are not matching the table's "
"field names:.*")
with pytest.raises(ValueError, match=message):
table.cast(target_schema1)
with pytest.raises(ValueError, match=message):
table.cast(target_schema2)
def test_table_safe_casting():
data = [
pa.array(range(5), type=pa.int64()),
pa.array([-10, -5, 0, 5, 10], type=pa.int32()),
pa.array([1.0, 2.0, 3.0, 4.0, 5.0], type=pa.float64()),
pa.array(['ab', 'bc', 'cd', 'de', 'ef'], type=pa.string())
]
table = pa.Table.from_arrays(data, names=tuple('abcd'))
expected_data = [
pa.array(range(5), type=pa.int32()),
pa.array([-10, -5, 0, 5, 10], type=pa.int16()),
pa.array([1, 2, 3, 4, 5], type=pa.int64()),
pa.array(['ab', 'bc', 'cd', 'de', 'ef'], type=pa.string())
]
expected_table = pa.Table.from_arrays(expected_data, names=tuple('abcd'))
target_schema = pa.schema([
pa.field('a', pa.int32()),
pa.field('b', pa.int16()),
pa.field('c', pa.int64()),
pa.field('d', pa.string())
])
casted_table = table.cast(target_schema)
assert casted_table.equals(expected_table)
def test_table_unsafe_casting():
data = [
pa.array(range(5), type=pa.int64()),
pa.array([-10, -5, 0, 5, 10], type=pa.int32()),
pa.array([1.1, 2.2, 3.3, 4.4, 5.5], type=pa.float64()),
pa.array(['ab', 'bc', 'cd', 'de', 'ef'], type=pa.string())
]
table = pa.Table.from_arrays(data, names=tuple('abcd'))
expected_data = [
pa.array(range(5), type=pa.int32()),
pa.array([-10, -5, 0, 5, 10], type=pa.int16()),
pa.array([1, 2, 3, 4, 5], type=pa.int64()),
pa.array(['ab', 'bc', 'cd', 'de', 'ef'], type=pa.string())
]
expected_table = pa.Table.from_arrays(expected_data, names=tuple('abcd'))
target_schema = pa.schema([
pa.field('a', pa.int32()),
pa.field('b', pa.int16()),
pa.field('c', pa.int64()),
pa.field('d', pa.string())
])
with pytest.raises(pa.ArrowInvalid,
match='Floating point value truncated'):
table.cast(target_schema)
casted_table = table.cast(target_schema, safe=False)
assert casted_table.equals(expected_table)
def test_invalid_table_construct():
array = np.array([0, 1], dtype=np.uint8)
u8 = pa.uint8()
arrays = [pa.array(array, type=u8), pa.array(array[1:], type=u8)]
with pytest.raises(pa.lib.ArrowInvalid):
pa.Table.from_arrays(arrays, names=["a1", "a2"])
|
apache-2.0
|
NelisVerhoef/scikit-learn
|
examples/linear_model/plot_ridge_path.py
|
254
|
1655
|
"""
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
dhruvparamhans/zipline
|
zipline/utils/data_source_tables_gen.py
|
40
|
7380
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import getopt
import traceback
import numpy as np
import pandas as pd
import datetime
import logging
import tables
import gzip
import glob
import os
import random
import csv
import time
from six import print_
FORMAT = "%(asctime)-15s -8s %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
OHLCTableDescription = {'sid': tables.StringCol(14, pos=2),
'dt': tables.Int64Col(pos=1),
'open': tables.Float64Col(dflt=np.NaN, pos=3),
'high': tables.Float64Col(dflt=np.NaN, pos=4),
'low': tables.Float64Col(dflt=np.NaN, pos=5),
'close': tables.Float64Col(dflt=np.NaN, pos=6),
"volume": tables.Int64Col(dflt=0, pos=7)}
def process_line(line):
dt = np.datetime64(line["dt"]).astype(np.int64)
sid = line["sid"]
open_p = float(line["open"])
high_p = float(line["high"])
low_p = float(line["low"])
close_p = float(line["close"])
volume = int(line["volume"])
return (dt, sid, open_p, high_p, low_p, close_p, volume)
def parse_csv(csv_reader):
previous_date = None
data = []
dtype = [('dt', 'int64'), ('sid', '|S14'), ('open', float),
('high', float), ('low', float), ('close', float),
('volume', int)]
for line in csv_reader:
row = process_line(line)
current_date = line["dt"][:10].replace("-", "")
if previous_date and previous_date != current_date:
rows = np.array(data, dtype=dtype).view(np.recarray)
yield current_date, rows
data = []
data.append(row)
previous_date = current_date
def merge_all_files_into_pytables(file_dir, file_out):
"""
process each file into pytables
"""
start = None
start = datetime.datetime.now()
out_h5 = tables.openFile(file_out,
mode="w",
title="bars",
filters=tables.Filters(complevel=9,
complib='zlib'))
table = None
for file_in in glob.glob(file_dir + "/*.gz"):
gzip_file = gzip.open(file_in)
expected_header = ["dt", "sid", "open", "high", "low", "close",
"volume"]
csv_reader = csv.DictReader(gzip_file)
header = csv_reader.fieldnames
if header != expected_header:
logging.warn("expected header %s\n" % (expected_header))
logging.warn("header_found %s" % (header))
return
for current_date, rows in parse_csv(csv_reader):
table = out_h5.createTable("/TD", "date_" + current_date,
OHLCTableDescription,
expectedrows=len(rows),
createparents=True)
table.append(rows)
table.flush()
if table is not None:
table.flush()
end = datetime.datetime.now()
diff = (end - start).seconds
logging.debug("finished it took %d." % (diff))
def create_fake_csv(file_in):
fields = ["dt", "sid", "open", "high", "low", "close", "volume"]
gzip_file = gzip.open(file_in, "w")
dict_writer = csv.DictWriter(gzip_file, fieldnames=fields)
current_dt = datetime.date.today() - datetime.timedelta(days=2)
current_dt = pd.Timestamp(current_dt).replace(hour=9)
current_dt = current_dt.replace(minute=30)
end_time = pd.Timestamp(datetime.date.today())
end_time = end_time.replace(hour=16)
last_price = 10.0
while current_dt < end_time:
row = {}
row["dt"] = current_dt
row["sid"] = "test"
last_price += random.randint(-20, 100) / 10000.0
row["close"] = last_price
row["open"] = last_price - 0.01
row["low"] = last_price - 0.02
row["high"] = last_price + 0.02
row["volume"] = random.randint(10, 1000) * 10
dict_writer.writerow(row)
current_dt += datetime.timedelta(minutes=1)
if current_dt.hour > 16:
current_dt += datetime.timedelta(days=1)
current_dt = current_dt.replace(hour=9)
current_dt = current_dt.replace(minute=30)
gzip_file.close()
def main(argv=None):
"""
This script cleans minute bars into pytables file
data_source_tables_gen.py
[--tz_in] sets time zone of data only reasonably fast way to use
time.tzset()
[--dir_in] iterates through directory provided of csv files in gzip form
in form:
dt, sid, open, high, low, close, volume
2012-01-01T12:30:30,1234HT,1, 2,3,4.0
[--fake_csv] creates a fake sample csv to iterate through
[--file_out] determines output file
"""
if argv is None:
argv = sys.argv
try:
dir_in = None
file_out = "./all.h5"
fake_csv = None
try:
opts, args = getopt.getopt(argv[1:], "hdft",
["help",
"dir_in=",
"debug",
"tz_in=",
"fake_csv=",
"file_out="])
except getopt.error as msg:
raise Usage(msg)
for opt, value in opts:
if opt in ("--help", "-h"):
print_(main.__doc__)
if opt in ("-d", "--debug"):
logging.basicConfig(format=FORMAT,
level=logging.DEBUG)
if opt in ("-d", "--dir_in"):
dir_in = value
if opt in ("-o", "--file_out"):
file_out = value
if opt in ("--fake_csv"):
fake_csv = value
if opt in ("--tz_in"):
os.environ['TZ'] = value
time.tzset()
try:
if dir_in:
merge_all_files_into_pytables(dir_in, file_out)
if fake_csv:
create_fake_csv(fake_csv)
except Exception:
error = "An unhandled error occured in the"
error += "data_source_tables_gen.py script."
error += "\n\nTraceback:\n"
error += '-' * 70 + "\n"
error += "".join(traceback.format_tb(sys.exc_info()[2]))
error += repr(sys.exc_info()[1]) + "\n"
error += str(sys.exc_info()[1]) + "\n"
error += '-' * 70 + "\n"
print_(error)
except Usage as err:
print_(err.msg)
print_("for help use --help")
return 2
if __name__ == "__main__":
sys.exit(main())
|
apache-2.0
|
jorge2703/scikit-learn
|
examples/linear_model/plot_sgd_separating_hyperplane.py
|
260
|
1219
|
"""
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
vortex-exoplanet/VIP
|
vip_hci/var/shapes.py
|
2
|
27456
|
#! /usr/bin/env python
"""
Module with various functions to create shapes, annuli and segments.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez'
__all__ = ['dist',
'dist_matrix',
'frame_center',
'get_square',
'get_circle',
'get_ellipse',
'get_annulus_segments',
'get_annular_wedge',
'get_ell_annulus',
'mask_circle',
'create_ringed_spider_mask',
'matrix_scaling',
'prepare_matrix',
'reshape_matrix']
import numpy as np
from skimage.draw import polygon
from skimage.draw import circle
from sklearn.preprocessing import scale
from ..conf.utils_conf import frame_or_shape
def mask_circle(array, radius, fillwith=0, mode='in'):
"""
Mask the pixels inside/outside of a centered circle with ``fillwith``.
Returns a modified copy of ``array``.
Parameters
----------
array : 2d/3d/4d numpy ndarray
Input frame or cube.
radius : int
Radius of the circular aperture.
fillwith : int, float or np.nan, optional
Value to put instead of the masked out pixels.
mode : {'in', 'out'}, optional
When set to 'in' then the pixels inside the radius are set to
``fillwith``. When set to 'out' the pixels outside the circular mask are
set to ``fillwith``.
Returns
-------
array_masked : numpy ndarray
Masked frame or cube.
"""
if not isinstance(fillwith, (int, float)):
raise ValueError('`fillwith` must be integer, float or np.nan')
cy, cx = frame_center(array)
shape = (array.shape[-2],array.shape[-1])
ind = circle(cy, cx, radius, shape=shape)
if mode == 'in':
array_masked = array.copy()
if array.ndim == 2:
array_masked[ind] = fillwith
elif array.ndim == 3:
array_masked[:, ind[1], ind[0]] = fillwith
elif array.ndim == 4:
array_masked[:, :, ind[1], ind[0]] = fillwith
elif mode == 'out':
array_masked = np.full_like(array, fillwith)
if array.ndim == 2:
array_masked[ind] = array[ind]
elif array.ndim == 3:
array_masked[:, ind[1], ind[0]] = array[:, ind[1], ind[0]]
elif array.ndim == 4:
array_masked[:, :, ind[1], ind[0]] = array[:, :, ind[1], ind[0]]
return array_masked
def create_ringed_spider_mask(im_shape, ann_out, ann_in=0, sp_width=10,
sp_angle=0, nlegs=6):
"""
Mask out information outside the annulus and inside the spiders (zeros).
Parameters
----------
im_shape : tuple of int
Tuple of length two with 2d array shape (Y,X).
ann_out : int
Outer radius of the annulus.
ann_in : int, opt
Inner radius of the annulus.
sp_width : int, opt
Width of the spider arms (6 legs by default).
sp_angle : int, opt
angle of the first spider arm (on the positive horizontal axis) in
counter-clockwise sense.
nlegs: int, opt
Number of legs of the spider.
Returns
-------
mask : numpy ndarray
2d array of zeros and ones.
"""
mask = np.zeros(im_shape)
nbranch = int(nlegs/2)
s = im_shape
r = min(s)/2
theta = np.arctan2(sp_width/2, r)
cy, cx = frame_center(mask)
rr0, cc0 = circle(cy, cx, min(ann_out, cy))
mask[rr0, cc0] = 1
t0 = np.array([theta, np.pi-theta, np.pi+theta, np.pi*2 - theta])
if isinstance(sp_angle, (list,np.ndarray)):
dtheta = [sp_angle[i]-sp_angle[0] for i in range(nbranch)]
else:
sp_angle = [sp_angle]
dtheta = [i*np.pi/nbranch for i in range(nbranch)]
tn = np.zeros([nbranch,4])
xn = np.zeros_like(tn)
yn = np.zeros_like(tn)
for i in range(nbranch):
tn[i] = t0 + np.deg2rad(sp_angle[0] + dtheta[i])
xn[i] = r * np.cos(tn[i]) + s[1]/2
yn[i] = r * np.sin(tn[i]) + s[0]/2
rrn, ccn = polygon(yn[i], xn[i])
mask[rrn, ccn] = 0
rr4, cc4 = circle(cy, cx, ann_in)
mask[rr4, cc4] = 0
return mask
def dist(yc, xc, y1, x1):
"""
Return the Euclidean distance between two points, or between an array
of positions and a point.
"""
return np.sqrt(np.power(yc-y1,2) + np.power(xc-x1,2))
def dist_matrix(n, cx=None, cy=None):
"""
Create matrix with euclidian distances from a reference point (cx, cy).
Parameters
----------
n : int
output image shape is (n, n)
cx,cy : float
reference point. Defaults to the center.
Returns
-------
im : ndarray with shape (n, n)
Notes
-----
This is a replacement for ANDROMEDA's DISTC.
"""
if cx is None:
cx = (n - 1) / 2
if cy is None:
cy = (n - 1) / 2
yy, xx = np.ogrid[:n, :n]
return np.sqrt((yy-cy)**2 + (xx-cx)**2)
def frame_center(array, verbose=False):
"""
Return the coordinates y,x of the frame(s) center.
Parameters
----------
array : 2d/3d/4d numpy ndarray
Frame or cube.
verbose : bool optional
If True the center coordinates are printed out.
Returns
-------
cy, cx : float
Coordinates of the center.
"""
if array.ndim == 2:
shape = array.shape
elif array.ndim == 3:
shape = array[0].shape
elif array.ndim == 4:
shape = array[0, 0].shape
else:
raise ValueError('`array` is not a 2d, 3d or 4d array')
cy = shape[0] / 2 - 0.5
cx = shape[1] / 2 - 0.5
if verbose:
print('Center px coordinates at x,y = ({}, {})'.format(cx, cy))
return cy, cx
def get_square(array, size, y, x, position=False, force=False, verbose=True):
"""
Return an square subframe from a 2d array or image.
Parameters
----------
array : 2d numpy ndarray
Input frame.
size : int
Size of the subframe.
y : int
Y coordinate of the center of the subframe (obtained with the function
``frame_center``).
x : int
X coordinate of the center of the subframe (obtained with the function
``frame_center``).
position : bool, optional
If set to True return also the coordinates of the bottom-left vertex.
force : bool, optional
Size and the size of the 2d array must be both even or odd. With
``force`` set to True this condition can be avoided.
verbose : bool optional
If True, warning messages might be shown.
Returns
-------
array_out : numpy ndarray
Sub array.
y0, x0 : int
[position=True] Coordinates of the bottom-left vertex.
"""
size_init_y = array.shape[0]
size_init_x = array.shape[1]
size_init = array.shape[0] # "force" cases assume square input frame
if array.ndim != 2:
raise TypeError('Input array is not a 2d array.')
if not isinstance(size, int):
raise TypeError('`Size` must be integer')
if size >= size_init_y and size >= size_init_x: # assuming square frames
msg = "`Size` is equal to or bigger than the initial frame size"
raise ValueError(msg)
if not force:
# Even input size
if size_init % 2 == 0:
# Odd size
if size % 2 != 0:
size += 1
if verbose:
print("`Size` is odd (while input frame size is even). "
"Setting `size` to {} pixels".format(size))
# Odd input size
else:
# Even size
if size % 2 == 0:
size += 1
if verbose:
print("`Size` is even (while input frame size is odd). "
"Setting `size` to {} pixels".format(size))
else:
# Even input size
if size_init % 2 == 0:
# Odd size
if size % 2 != 0 and verbose:
print("WARNING: `size` is odd while input frame size is even. "
"Make sure the center coordinates are set properly")
# Odd input size
else:
# Even size
if size % 2 == 0 and verbose:
print("WARNING: `size` is even while input frame size is odd. "
"Make sure the center coordinates are set properly")
# wing is added to the sides of the subframe center
wing = (size - 1) / 2
y0 = int(y - wing)
y1 = int(y + wing + 1) # +1 cause endpoint is excluded when slicing
x0 = int(x - wing)
x1 = int(x + wing + 1)
if y0 < 0 or x0 < 0 or y1 > size_init_y or x1 > size_init_x:
# assuming square frames
raise RuntimeError('square cannot be obtained with size={}, y={}, x={}'
''.format(size, y, x))
array_out = array[y0: y1, x0: x1].copy()
if position:
return array_out, y0, x0
else:
return array_out
def get_circle(array, radius, cy=None, cx=None, mode="mask"):
"""
Return a centered circular region from a 2d ndarray.
Parameters
----------
array : numpy ndarray
Input 2d array or image.
radius : int
The radius of the circular region.
cy, cx : int, optional
Coordinates of the circle center. If one of them is ``None``, the center
of ``array`` is used.
mode : {'mask', 'val'}, optional
Controls what is returned: array with circular mask applied, or values
of the pixels in the circular region.
Returns
-------
masked : numpy ndarray
[mode="mask"] Input array with the circular mask applied.
values : numpy ndarray
[mode="val"] 1d array with the values of the pixels in the circular
region.
Notes
-----
An alternative implementation would use ``skimage.draw.circle``. ``circle``
performs better on large ``array``s (e.g. 1000px, 10.000px), while the
current implementation is faster for small ``array``s (e.g. 100px). See
`test_shapes.py` for benchmark details.
"""
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array.')
sy, sx = array.shape
if cy is None or cx is None:
cy, cx = frame_center(array, verbose=False)
# ogrid is a multidim mesh creator (faster than mgrid):
yy, xx = np.ogrid[:sy, :sx]
circle = (yy - cy) ** 2 + (xx - cx) ** 2 # eq of circle. sq dist to center
circle_mask = circle < radius ** 2 # boolean mask
if mode == "mask":
return array * circle_mask
elif mode == "val":
return array[circle_mask]
else:
raise ValueError("mode '{}' unknown!".format(mode))
def get_ellipse(data, a, b, pa, cy=None, cx=None, mode="ind"):
"""
Return a centered elliptical region from a 2d ndarray.
Parameters
----------
data : numpy ndarray or tuple
Input 2d array (image) or tuple with a shape.
a : float
Semi-major axis.
b : float
Semi-minor axis.
pa : deg, float
The PA of the semi-major axis in degrees.
cy, cx : int or None, optional
Coordinates of the circle center. If ``None``, the center is determined
by the ``frame_center`` function.
mode : {'ind', 'val', 'mask', 'bool'}, optional
Controls what is returned: indices of selected pixels, values of
selected pixels, or a boolean mask.
Returns
-------
indices : tuple(y, x)
[mode='ind'] Coordinates of the inner elliptical region.
values : 1d ndarray
[mode='val'] Values of the pixels in the inner elliptical region.
masked : 2d ndarray
[mode='mask'] Input image where the outer region is masked with ``0``.
bool_mask : 2d boolean ndarray
[mode='bool'] A boolean mask where ``True`` is the inner region.
"""
def distance(yc, xc, y1, x1):
return np.sqrt((yc - y1) ** 2 + (xc - x1) ** 2)
# --------------------------------------------------------------------------
array = frame_or_shape(data)
if cy is None or cx is None:
cy, cx = frame_center(array, verbose=False)
# Definition of other parameters of the ellipse
f = np.sqrt(a ** 2 - b ** 2) # dist between center and foci of the ellipse
pa_rad = np.deg2rad(pa)
pos_f1 = (cy + f * np.cos(pa_rad), cx + f * np.sin(pa_rad)) # first focus
pos_f2 = (cy - f * np.cos(pa_rad), cx - f * np.sin(pa_rad)) # second focus
# ogrid is a multidim mesh creator (faster than mgrid):
yy, xx = np.ogrid[:array.shape[0], :array.shape[1]]
ellipse = (distance(yy, xx, pos_f1[0], pos_f1[1]) +
distance(yy, xx, pos_f2[0], pos_f2[1]))
ellipse_mask = ellipse < 2 * a # boolean mask
if mode == "ind":
return np.where(ellipse_mask)
elif mode == "val":
return array[ellipse_mask]
elif mode == "mask":
return array * ellipse_mask
elif mode == "bool":
return ellipse_mask
else:
raise ValueError("mode '{}' unknown!".format(mode))
def get_annulus_segments(data, inner_radius, width, nsegm=1, theta_init=0,
optim_scale_fact=1, mode="ind"):
"""
Return indices or values in segments of a centerered annulus.
The annulus is defined by ``inner_radius <= annulus < inner_radius+width``.
Parameters
----------
data : 2d numpy ndarray or tuple
Input 2d array (image) ot tuple with its shape.
inner_radius : float
The inner radius of the donut region.
width : float
The size of the annulus.
nsegm : int
Number of segments of annulus to be extracted.
theta_init : int
Initial azimuth [degrees] of the first segment, counting from the
positive x-axis counterclockwise.
optim_scale_fact : float
To enlarge the width of the segments, which can then be used as
optimization segments (e.g. in LOCI).
mode : {'ind', 'val', 'mask'}, optional
Controls what is returned: indices of selected pixels, values of
selected pixels, or a boolean mask.
Returns
-------
indices : list of ndarrays
[mode='ind'] Coordinates of pixels for each annulus segment.
values : list of ndarrays
[mode='val'] Pixel values.
masked : list of ndarrays
[mode='mask'] Copy of ``data`` with masked out regions.
Notes
-----
Moving from ``get_annulus`` to ``get_annulus_segments``:
.. code::python
# get_annulus handles one single segment only, so note the ``[0]`` after
the call to get_annulus_segments if you want to work with one single
segment only.
get_annulus(arr, 2, 3, output_indices=True)
# is the same as
get_annulus_segments(arr, 2, 3)[0]
get_annulus(arr, inr, w, output_values=True)
# is the same as
get_annulus_segments(arr, inr, w, mode="val")[0]
get_annulus(arr, inr, w)
# is the same as
get_annulus_segments(arr, inr, w, mode="mask")[0]
# the only difference is the handling of the border values:
# get_annulus_segments is `in <= ann < out`, while get_annulus is
# `in <= ann <= out`. But that should make no difference in practice.
"""
array = frame_or_shape(data)
if not isinstance(nsegm, int):
raise TypeError('`nsegm` must be an integer')
cy, cx = frame_center(array)
azimuth_coverage = np.deg2rad(int(np.ceil(360 / nsegm)))
twopi = 2 * np.pi
yy, xx = np.mgrid[:array.shape[0], :array.shape[1]]
rad = np.sqrt((xx - cx) ** 2 + (yy - cy) ** 2)
phi = np.arctan2(yy - cy, xx - cx)
phirot = phi % twopi
outer_radius = inner_radius + (width*optim_scale_fact)
masks = []
for i in range(nsegm):
phi_start = np.deg2rad(theta_init) + (i * azimuth_coverage)
phi_end = phi_start + azimuth_coverage
if phi_start < twopi and phi_end > twopi:
masks.append((rad >= inner_radius) & (rad < outer_radius) &
(phirot >= phi_start) & (phirot <= twopi) |
(rad >= inner_radius) & (rad < outer_radius) &
(phirot >= 0) & (phirot < phi_end - twopi))
elif phi_start >= twopi and phi_end > twopi:
masks.append((rad >= inner_radius) & (rad < outer_radius) &
(phirot >= phi_start - twopi) &
(phirot < phi_end - twopi))
else:
masks.append((rad >= inner_radius) & (rad < outer_radius) &
(phirot >= phi_start) & (phirot < phi_end))
if mode == "ind":
return [np.where(mask) for mask in masks]
elif mode == "val":
return [array[mask] for mask in masks]
elif mode == "mask":
return [array*mask for mask in masks]
else:
raise ValueError("mode '{}' unknown!".format(mode))
def get_annular_wedge(data, inner_radius, width, wedge=(0,360), mode="ind"):
"""
Return indices or values in segments of a centered annulus.
The annulus is defined by ``inner_radius <= annulus < inner_radius+width``.
Parameters
----------
data : 2d numpy ndarray or tuple
Input 2d array (image) ot tuple with its shape.
inner_radius : float
The inner radius of the donut region.
width : float
The size of the annulus.
wedge : tuple of 2 floats
Initial and final azimuths [degrees] of the annular segment, counting
from the positive x-axis counter-clockwise.
mode : {'ind', 'val', 'mask'}, optional
Controls what is returned: indices of selected pixels, values of
selected pixels, or a boolean mask.
Returns
-------
indices : list of ndarrays
[mode='ind'] Coordinates of pixels for each annulus segment.
values : list of ndarrays
[mode='val'] Pixel values.
masked : list of ndarrays
[mode='mask'] Copy of ``data`` with masked out regions.
Notes
-----
Moving from ``get_annulus`` to ``get_annulus_segments``:
.. code::python
# get_annulus handles one single segment only, so note the ``[0]`` after
the call to get_annulus_segments if you want to work with one single
segment only.
get_annulus(arr, 2, 3, output_indices=True)
# is the same as
get_annulus_segments(arr, 2, 3)[0]
get_annulus(arr, inr, w, output_values=True)
# is the same as
get_annulus_segments(arr, inr, w, mode="val")[0]
get_annulus(arr, inr, w)
# is the same as
get_annulus_segments(arr, inr, w, mode="mask")[0]
# the only difference is the handling of the border values:
# get_annulus_segments is `in <= ann < out`, while get_annulus is
# `in <= ann <= out`. But that should make no difference in practice.
"""
array = frame_or_shape(data)
cy, cx = frame_center(array)
azimuth_coverage = np.deg2rad(wedge[1]-wedge[0])
twopi = 2 * np.pi
yy, xx = np.mgrid[:array.shape[0], :array.shape[1]]
rad = np.sqrt((xx - cx) ** 2 + (yy - cy) ** 2)
phi = np.arctan2(yy - cy, xx - cx)
phirot = phi % twopi
outer_radius = inner_radius + width
phi_start = np.deg2rad(wedge[0])
phi_end = phi_start + azimuth_coverage
if phi_start < twopi and phi_end > twopi:
mask = ((rad >= inner_radius) & (rad < outer_radius) &
(phirot >= phi_start) & (phirot <= twopi) |
(rad >= inner_radius) & (rad < outer_radius) &
(phirot >= 0) & (phirot < phi_end - twopi))
elif phi_start >= twopi and phi_end > twopi:
mask = ((rad >= inner_radius) & (rad < outer_radius) &
(phirot >= phi_start - twopi) &
(phirot < phi_end - twopi))
else:
mask = ((rad >= inner_radius) & (rad < outer_radius) &
(phirot >= phi_start) & (phirot < phi_end))
if mode == "ind":
return np.where(mask)
elif mode == "val":
return array[mask]
elif mode == "mask":
return array*mask
else:
raise ValueError("mode '{}' unknown!".format(mode))
def get_ell_annulus(data, a, b, PA, width, cy=None, cx=None, mode="ind"):
"""
Return a centered elliptical annulus from a 2d ndarray
All the rest pixels are set to zeros.
Parameters
----------
data : numpy ndarray or tuple
Input 2d array (image) or tuple with a shape.
a : float
Semi-major axis.
b : float
Semi-minor axis.
PA : deg, float
The PA of the semi-major axis in degrees.
width : float
The size of the annulus along the semi-major axis; it is proportionnally
thinner along the semi-minor axis.
output_values : {False, True}, optional
If True returns the values of the pixels in the annulus.
cy, cx : int or None, optional
Coordinates of the circle center. If ``None``, the center is determined
by the ``frame_center`` function.
mode : {'ind', 'val', 'mask'}, optional
Controls what is returned: indices of selected pixels, values of
selected pixels, or a boolean mask.
Returns
-------
indices : tuple(y, x)
[mode='ind'] Coordinates of the inner elliptical region.
values : 1d ndarray
[mode='val'] Values of the pixels in the inner elliptical region.
masked : 2d ndarray
[mode='mask'] Input image where the outer region is masked with ``0``.
"""
array = frame_or_shape(data)
hwa = width / 2 # half width for a
hwb = (width * b / a) / 2 # half width for b
big_ellipse = get_ellipse(array, a + hwa, b + hwb, PA, cy=cy, cx=cx,
mode="bool")
small_ellipse = get_ellipse(array, a - hwa, b - hwb, PA, cy=cy, cx=cx,
mode="bool")
ell_ann_mask = big_ellipse ^ small_ellipse
if mode == "ind":
return np.where(ell_ann_mask)
elif mode == "val":
return array[ell_ann_mask]
elif mode == "mask":
return array * ell_ann_mask
elif mode == "bool":
return ell_ann_mask
else:
raise ValueError("mode '{}' unknown!".format(mode))
def matrix_scaling(matrix, scaling):
"""
Scale a matrix using ``sklearn.preprocessing.scale`` function.
Parameters
----------
matrix : 2d numpy ndarray
Input 2d array.
scaling : None or string
Scaling method.
``None``
no scaling is performed on the input data before SVD
``"temp-mean"``
temporal px-wise mean subtraction
``"spat-mean"``
the spatial mean is subtracted
``temp-standard"``
temporal mean centering plus scaling to unit variance
``"spat-standard"``
spatial mean centering plus scaling to unit variance
Returns
-------
matrix : 2d numpy ndarray
2d array with scaled values.
"""
if scaling is None:
pass
elif scaling == 'temp-mean':
matrix = scale(matrix, with_mean=True, with_std=False)
elif scaling == 'spat-mean':
matrix = scale(matrix, with_mean=True, with_std=False, axis=1)
elif scaling == 'temp-standard':
matrix = scale(matrix, with_mean=True, with_std=True)
elif scaling == 'spat-standard':
matrix = scale(matrix, with_mean=True, with_std=True, axis=1)
else:
raise ValueError('Scaling mode not recognized')
return matrix
def prepare_matrix(array, scaling=None, mask_center_px=None, mode='fullfr',
inner_radius=None, outer_radius=None, verbose=True):
"""
Build the matrix for the SVD/PCA and other matrix decompositions.
Center the data and mask the frame's central area if needed.
Parameters
----------
array : 3d numpy ndarray
Input cube.
scaling : {None, "temp-mean", spat-mean", "temp-standard", "spat-standard"},
None or str optional
Pixel-wise scaling mode using ``sklearn.preprocessing.scale`` function.
If set to None, the input matrix is left untouched. Otherwise:
``temp-mean``: temporal px-wise mean is subtracted.
``spat-mean``: spatial mean is subtracted.
``temp-standard``: temporal mean centering plus scaling pixel values
to unit variance.
``spat-standard``: spatial mean centering plus scaling pixel values
to unit variance.
mask_center_px : None or int, optional
[mode='fullfr'] Whether to mask the center of the frames or not.
mode : {'fullfr', 'annular'}, optional
Whether to use the whole frames or a single annulus.
inner_radius : int or float, optional
[mode='annular'] Distance in pixels from the center of the frame to the
inner radius of the annulus.
outer_radius : int or float, optional
[mode='annular'] Distance in pixels from the center of the frame to the
outer radius of the annulus.
verbose : bool, optional
If True prints intermediate info.
Returns
-------
matrix : 2d numpy ndarray
Out matrix whose rows are vectorized frames from the input cube.
ind : tuple
[mode='annular'] Indices of the annulus as ``(yy, xx)``.
"""
if mode == 'annular':
if inner_radius is None or outer_radius is None:
raise ValueError('`inner_radius` and `outer_radius` must be defined'
' in annular mode')
fr_size = array.shape[1]
annulus_width = int(np.round(outer_radius - inner_radius))
ind = get_annulus_segments((fr_size, fr_size), inner_radius,
annulus_width, nsegm=1)[0]
yy, xx = ind
matrix = array[:, yy, xx]
matrix = matrix_scaling(matrix, scaling)
if verbose:
msg = 'Done vectorizing the cube annulus. Matrix shape: ({}, {})'
print(msg.format(matrix.shape[0], matrix.shape[1]))
return matrix, ind
elif mode == 'fullfr':
if mask_center_px:
array = mask_circle(array, mask_center_px)
nfr = array.shape[0]
matrix = np.reshape(array, (nfr, -1)) # == for i: array[i].flatten()
matrix = matrix_scaling(matrix, scaling)
if verbose:
msg = 'Done vectorizing the frames. Matrix shape: ({}, {})'
print(msg.format(matrix.shape[0], matrix.shape[1]))
return matrix
def reshape_matrix(array, y, x):
"""
Convert a matrix whose rows are vect. frames to a cube with reshaped frames.
Parameters
----------
array : 2d ndarray
Input data of shape ``(nframes, npixels)``. Every row (``array[n]``)
corresponds to one vectorized ("flattened") 2d frame.
y, x : int
desired height and width of the frames. ``y*x = npixels``
Returns
-------
cube : 3d ndarray
Cube of shape ``(nframes, y, x)``.
Examples
--------
.. code:: python
In [1]: vect_frames = np.array([[1, 1, 1, 2, 2, 2], [1, 2, 3, 4, 5, 6]])
In [2]: cube = vip.var.reshape_matrix(vect_frames, 2, 3)
In [3]: cube
Out[3]:
array([[[1, 1, 1],
[2, 2, 2]],
[[1, 2, 3],
[4, 5, 6]]])
In [4]: cube.shape
Out[4]: (2, 2, 3)
"""
return array.reshape(array.shape[0], y, x)
|
mit
|
dpshelio/scikit-image
|
skimage/viewer/tests/test_viewer.py
|
35
|
2165
|
from skimage import data
from skimage.viewer.qt import QtGui, QtCore, has_qt
from skimage.viewer import ImageViewer, CollectionViewer
from skimage.viewer.plugins import OverlayPlugin
from skimage.transform import pyramid_gaussian
from skimage.filters import sobel
from numpy.testing import assert_equal
from numpy.testing.decorators import skipif
from skimage._shared.version_requirements import is_installed
from skimage._shared._warnings import expected_warnings
@skipif(not has_qt)
def test_viewer():
astro = data.astronaut()
coins = data.coins()
view = ImageViewer(astro)
import tempfile
_, filename = tempfile.mkstemp(suffix='.png')
view.show(False)
view.close()
view.save_to_file(filename)
view.open_file(filename)
assert_equal(view.image, astro)
view.image = coins
assert_equal(view.image, coins),
view.save_to_file(filename),
view.open_file(filename),
view.reset_image(),
assert_equal(view.image, coins)
def make_key_event(key):
return QtGui.QKeyEvent(QtCore.QEvent.KeyPress, key,
QtCore.Qt.NoModifier)
@skipif(not has_qt)
def test_collection_viewer():
img = data.astronaut()
img_collection = tuple(pyramid_gaussian(img))
view = CollectionViewer(img_collection)
make_key_event(48)
view.update_index('', 2),
assert_equal(view.image, img_collection[2])
view.keyPressEvent(make_key_event(53))
assert_equal(view.image, img_collection[5])
view._format_coord(10, 10)
@skipif(not has_qt)
@skipif(not is_installed('matplotlib', '>=1.2'))
def test_viewer_with_overlay():
img = data.coins()
ov = OverlayPlugin(image_filter=sobel)
viewer = ImageViewer(img)
viewer += ov
import tempfile
_, filename = tempfile.mkstemp(suffix='.png')
ov.color = 3
assert_equal(ov.color, 'yellow')
with expected_warnings(['precision loss']):
viewer.save_to_file(filename)
ov.display_filtered_image(img)
assert_equal(ov.overlay, img)
ov.overlay = None
assert_equal(ov.overlay, None)
ov.overlay = img
assert_equal(ov.overlay, img)
assert_equal(ov.filtered_image, img)
|
bsd-3-clause
|
melgor/autograd
|
examples/fluidsim/wing.py
|
4
|
6137
|
from __future__ import absolute_import
from __future__ import print_function
import autograd.numpy as np
from autograd import value_and_grad
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import os
from six.moves import range
rows, cols = 40, 60
# Fluid simulation code based on
# "Real-Time Fluid Dynamics for Games" by Jos Stam
# http://www.intpowertechcorp.com/GDC03.pdf
def occlude(f, occlusion):
return f * (1 - occlusion)
def project(vx, vy, occlusion):
"""Project the velocity field to be approximately mass-conserving,
using a few iterations of Gauss-Seidel."""
p = np.zeros(vx.shape)
div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1)
+ np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0))
div = make_continuous(div, occlusion)
for k in range(50):
p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1)
+ np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0))/4.0
p = make_continuous(p, occlusion)
vx = vx - 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))
vy = vy - 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))
vx = occlude(vx, occlusion)
vy = occlude(vy, occlusion)
return vx, vy
def advect(f, vx, vy):
"""Move field f according to x and y velocities (u and v)
using an implicit Euler integrator."""
rows, cols = f.shape
cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows))
center_xs = (cell_xs - vx).ravel()
center_ys = (cell_ys - vy).ravel()
# Compute indices of source cells.
left_ix = np.floor(center_ys).astype(np.int)
top_ix = np.floor(center_xs).astype(np.int)
rw = center_ys - left_ix # Relative weight of right-hand cells.
bw = center_xs - top_ix # Relative weight of bottom cells.
left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation.
right_ix = np.mod(left_ix + 1, rows)
top_ix = np.mod(top_ix, cols)
bot_ix = np.mod(top_ix + 1, cols)
# A linearly-weighted sum of the 4 surrounding cells.
flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \
+ rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
return np.reshape(flat_f, (rows, cols))
def make_continuous(f, occlusion):
non_occluded = 1 - occlusion
num = np.roll(f, 1, axis=0) * np.roll(non_occluded, 1, axis=0)\
+ np.roll(f, -1, axis=0) * np.roll(non_occluded, -1, axis=0)\
+ np.roll(f, 1, axis=1) * np.roll(non_occluded, 1, axis=1)\
+ np.roll(f, -1, axis=1) * np.roll(non_occluded, -1, axis=1)
den = np.roll(non_occluded, 1, axis=0)\
+ np.roll(non_occluded, -1, axis=0)\
+ np.roll(non_occluded, 1, axis=1)\
+ np.roll(non_occluded, -1, axis=1)
return f * non_occluded + (1 - non_occluded) * num / ( den + 0.001)
def sigmoid(x):
return 0.5*(np.tanh(x) + 1.0) # Output ranges from 0 to 1.
def simulate(vx, vy, num_time_steps, occlusion, ax=None, render=False):
occlusion = sigmoid(occlusion)
# Disallow occlusion outside a certain area.
mask = np.zeros((rows, cols))
mask[10:30, 10:30] = 1.0
occlusion = occlusion * mask
# Initialize smoke bands.
red_smoke = np.zeros((rows, cols))
red_smoke[rows/4:rows/2] = 1
blue_smoke = np.zeros((rows, cols))
blue_smoke[rows/2:3*rows/4] = 1
print("Running simulation...")
vx, vy = project(vx, vy, occlusion)
for t in range(num_time_steps):
plot_matrix(ax, red_smoke, occlusion, blue_smoke, t, render)
vx_updated = advect(vx, vx, vy)
vy_updated = advect(vy, vx, vy)
vx, vy = project(vx_updated, vy_updated, occlusion)
red_smoke = advect(red_smoke, vx, vy)
red_smoke = occlude(red_smoke, occlusion)
blue_smoke = advect(blue_smoke, vx, vy)
blue_smoke = occlude(blue_smoke, occlusion)
plot_matrix(ax, red_smoke, occlusion, blue_smoke, num_time_steps, render)
return vx, vy
def plot_matrix(ax, r, g, b, t, render=False):
if ax:
plt.cla()
ax.imshow(np.concatenate((r[...,np.newaxis], g[...,np.newaxis], b[...,np.newaxis]), axis=2))
ax.set_xticks([])
ax.set_yticks([])
plt.draw()
if render:
plt.savefig('step{0:03d}.png'.format(t), bbox_inches='tight')
plt.pause(0.001)
if __name__ == '__main__':
simulation_timesteps = 20
print("Loading initial and target states...")
init_vx = np.ones((rows, cols))
init_vy = np.zeros((rows, cols))
# Initialize the occlusion to be a block.
init_occlusion = -np.ones((rows, cols))
init_occlusion[15:25, 15:25] = 0.0
init_occlusion = init_occlusion.ravel()
def drag(vx): return np.mean(init_vx - vx)
def lift(vy): return np.mean(vy - init_vy)
def objective(params):
cur_occlusion = np.reshape(params, (rows, cols))
final_vx, final_vy = simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion)
return -lift(final_vy) / drag(final_vx)
# Specify gradient of objective function using autograd.
objective_with_grad = value_and_grad(objective)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, frameon=False)
def callback(weights):
cur_occlusion = np.reshape(weights, (rows, cols))
simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion, ax)
print("Rendering initial flow...")
callback(init_occlusion)
print("Optimizing initial conditions...")
result = minimize(objective_with_grad, init_occlusion, jac=True, method='CG',
options={'maxiter':50, 'disp':True}, callback=callback)
print("Rendering optimized flow...")
final_occlusion = np.reshape(result.x, (rows, cols))
simulate(init_vx, init_vy, simulation_timesteps, final_occlusion, ax, render=True)
print("Converting frames to an animated GIF...") # Using imagemagick.
os.system("convert -delay 5 -loop 0 step*.png "
"-delay 250 step{0:03d}.png wing.gif".format(simulation_timesteps))
os.system("rm step*.png")
|
mit
|
gerritholl/pyatmlab
|
pyatmlab/db.py
|
2
|
39248
|
#!/usr/bin/env python
"""Describe batches of data: atmospheric db, scattering db, etc.
This module contains a class with functionality to read atmospheric data
from a variety of formats and write it to a variety of formats.
Mostly obtained from PyARTS.
"""
import sys
import abc
import copy
import random
import itertools
import pickle
import pathlib
import logging
import lzma
import numpy
import datetime
now = datetime.datetime.now
import numpy.lib.recfunctions
#import numpy.ma
import scipy.io
import matplotlib.mlab # contains PCA class
#import matplotlib.pyplot
#import matplotlib.cm
import progressbar
from . import tools
from . import stats
from . import config
class AtmosphericDatabase:
"""Represents an atmospheric database
Apart from the default constructor, those constructors may be useful:
- :func:`AtmosphericDatabase.from_evans07`
- :func:`AtmosphericDatabase.from_evans12`
Attributes:
- ``data``
- ``instrument``
- ``name``
"""
def __init__(self, **kwargs):
"""Constructor for atmospheric database
Takes only keyword arguments, where each keyword ends up as an
instance attribute, i.e.
AtmosphericDatabase(instrument=pyatmlab.instruments.ici).
"""
for (k, v) in kwargs.items():
setattr(self, k, v)
@classmethod
def from_evans07(cls, dbfile, instrument=None):
"""Read from db in style for Evans study in 2006-2007
:param dbfile: Path to database-file
:param instrument: Optional argument, pass instrument.
"""
with open(dbfile, 'r') as fp:
ncases = int(fp.readline().strip().split()[0])
fp.readline() # ncloudpar ntemplay nrhlay nchan nztab nmutab ntinfo nuinfo
fp.readline() # RH layers tops / bottoms
chan_names = fp.readline().split("=")[0].strip().split()
fp.readline() # ztab
fp.readline() # mutab
fp.readline() # header line
# pre-allocate
n_bt = len(chan_names)
Z = numpy.zeros(ncases, dtype=cls.get_dtype(n_bt))
i = 0
while i<ncases:
line = fp.readline()
(iwp, dme, zmed, *rest) = (float(x) for x in line.strip().split())
fp.readline() # empty line
bt = numpy.array([float(x) for x in fp.readline().strip().split()])
Z["BT"][i] = bt
Z["Dme"][i] = dme
Z["Zmed"][i] = zmed
Z["IWP"][i] = iwp
i += 1
if i%(ncases//5) == 0:
print(now(), "done", "{}/{}".format(i, ncases))
return cls(data=Z, instrument=instrument, name="colorado2007")
@classmethod
def from_evans12(cls, dbfile, instrument=None, profile=False,
radar=False, maxlen=None, ext=False):
"""Read from db in style for Evans study in 2012
:param dbfile: Path to database-file
:param instrument: Optional argument, pass instrument.
:param profile: Include profile information. Defaults to False.
:param radar: Include radar information. Defaults to False.
:param int maxlen: Optional maximum length of db to read.
:param bool ext: Read extended version, with manual additions by
Gerrit 2014-01-16 (i.e. also outputting non-retrieved
quantities used in the RTM)
"""
print(now(), "Reading", dbfile)
fp = open(dbfile, 'r')
# skip and interpret header
fp.readline() # header line
ncases = int(fp.readline().strip().split()[0])
maxlen = maxlen or ncases
fp.readline() # no. of dimensions of independent Gaussian prior space
fp.readline() # no. of elements in observation vector
nchans = int(fp.readline().strip().split()[0])
chan_names = fp.readline().split()
# print(now(), "compare:", self.instrument.channel_string(), chan_names)
# no. values per channel
chan_lengths = numpy.array([int(c)
for c in fp.readline().strip().split()])
fp.readline() # no. of viewing angles
fp.readline() # cos for each viewing angle
fp.readline() # random seed
fp.readline() # no. retrieved quantities
fp.readline() # no. integrated cloud params
n_layer_rh = int(fp.readline().strip().split()[0]) # no. retrieved humidity levels
fp.readline() # height per humidity layer
n_layer_ice = int(fp.readline().strip().split()[0]) # no. retrieved ice cloud layers
(ice_bottom, ice_top) = (float(c) for c in
fp.readline().split('!')[0].strip().split()) # bottom and top height ice cloud region
ice_axis = numpy.linspace(ice_bottom, ice_top, n_layer_ice)
fp.readline() # header line
if ext:
n_aux = int(fp.readline().strip().split()[0]) # no. aux
chan_is_radar = numpy.array(["radar" in x.lower()
for x in chan_names])
# construct dtype
#
# Will have two dtypes:
# - one where all channels are named
# - one with a single large dtype for all radiances
dt = [("IWP", numpy.float32),
("Dme", numpy.float32),
("Zmed", numpy.float32)]
dt.extend([("shape",
[("plateagg", numpy.float32),
("sphragg", numpy.float32),
("snowagg", numpy.float32),
("hail", numpy.float32)])])
if profile:
dt.append(("IWC", numpy.float32, n_layer_ice))
dt.append(("Dme_prof", numpy.float32, n_layer_ice))
dt.append(("RH", numpy.float32, n_layer_rh))
if ext:
n_per_aux = n_aux//4
dt.append(("height", numpy.float32, n_per_aux))
dt.append(("temp", numpy.float32, n_per_aux))
dt.append(("pres", numpy.float32, n_per_aux))
dt.append(("r", numpy.float32, n_per_aux))
dt_alt = copy.copy(dt)
dt.extend([(nm, numpy.float32, ln)
for (nm, ln) in zip(chan_names, chan_lengths)])
dt_alt.append(("BT", numpy.float32,
numpy.count_nonzero(~chan_is_radar)))
dt_alt.extend([(nm, numpy.float32, ln)
for (nm, ln, israd) in zip(chan_names,
chan_lengths,
chan_is_radar)
if israd])
# if radar:
# dt.extend([(nm, numpy.float32, chanlen)
# for (nm, chanlen, israd)
# in zip(chan_names, chan_lengths, chan_is_radar)
# if israd])
# get index in measurement array for each measurement
edges = numpy.r_[0, chan_lengths.cumsum()]
locations = {chan_names[k]: slice(edges[k], edges[k+1])
for k in range(chan_lengths.size)}
# passive = numpy.array([ln
# for ln in chan_lengths
# if not "radar" in nm.lower()]
#n_bt = nchans - sum(["radar" in x.lower() for x in chan_names])
# pre-allocate
data = numpy.empty(maxlen, dtype=dt)
data_alt = data.view(dtype=dt_alt)
# use while, not for-loop, because I read several lines per
# iteration
i = 0
while i<maxlen:
line = fp.readline()
if not line: # empty means EOF
break
# if i%nth != 0: # allow for reading less
# continue
(rvcheck, IWP, Dme, Zmed,
plateagg, sphragg, snowagg, hail,
meltLWP, cldLWP) = (float(x) for x in line.strip().split())
line = fp.readline()
RH = numpy.array([float(x) for x in line.strip().split()])
line = fp.readline()
IWC = numpy.array([float(x) for x in line.strip().split()])
line = fp.readline()
Dmeprof = numpy.array([float(x) for x in line.strip().split()])
line = fp.readline()
measurements = numpy.array([float(x)
for x in line.strip().split()])
if ext:
line = fp.readline()
aux = numpy.array([float(x) for x in
line.strip().split()])
# BT = numpy.array(measurements[~chan_is_radar])
# radar = numpy.array(measurements[chan_is_radar])
# radar_integrated = numpy.array(measurements[9])
# radar_prof = numpy.array(measurements[10:])
#print "measurements:", BT
#print "IWP:", IWP
#print "IWC:", IWC
data["IWP"][i] = IWP
data["Dme"][i] = Dme
data["Zmed"][i] = Zmed
# data["BT"][i] = BT
data["shape"][i]["plateagg"] = plateagg
data["shape"][i]["sphragg"] = sphragg
data["shape"][i]["snowagg"] = snowagg
data["shape"][i]["hail"] = hail
for nm in chan_names:
data[nm][i] = measurements[locations[nm]]
if profile:
data["IWC"][i] = IWC
data["Dme_prof"][i] = Dmeprof
data["RH"][i] = RH
if ext:
data["height"][i] = aux[0*n_per_aux:1*n_per_aux]
data["temp"][i] = aux[1*n_per_aux:2*n_per_aux]
data["pres"][i] = aux[2*n_per_aux:3*n_per_aux]
data["r"][i] = aux[3*n_per_aux:4*n_per_aux]
# if radar:
# for radfield in chan_is_radar.nonzero()[0]:
# nm = chan_names[radfield]
# data[nm][i] = measurements
i += 1
if i%(maxlen//8) == 0:
print(now(), "done", "{}/{}, {}%".format(i, maxlen,
(i/maxlen)*100))
return cls(data=data_alt, instrument=instrument,
name="colorado2012", ice_axis=ice_axis)
@tools.validator
def __getitem__(self, key: str):
"""Get field from data.
:param str key: Field name (from self.data["dtype"]).
:returns: ndarray, view from self.data
"""
return self.data[key]
@tools.validator
def __setitem__(self, key: str, val: numpy.ndarray):
"""Add field to data or set existing field to data.
:param str key: Field name
:param ndarray val: Field value. Must match dimensions of
self.data. Note that this will be added with dtype([key,
val.dtype]).
"""
if key in self.data.dtype.names:
self.data[key] = val
else:
prim = self.data
sec = val.view(dtype=[(key, val.dtype)])
self.data = numpy.lib.recfunctions.merge_arrays(
(prim, sec)).view(dtype=(prim.dtype.descr + sec.dtype.descr))
# calculating different statistics
# def stats_IWP_Dme(self, func, minlen=20, *args, **kwargs):
# """Calculate a statistic per logIWP/Dme
#
# :param func: Statistical function to apply for each bin
# :param minlen: Mask statistic if number of values per bin are less
# than this. Defaults to 20.
#
# All remaining arguments are passed on to
# :func:`tools.filter_array`.
#
# Returns (xbin, ybin, stats)
# """
#
# xbin = numpy.linspace(-1, 4, 20)
# ybin = numpy.linspace(0, 650, 20)
# #pos = self.data["IWP"] > 0
# minlen = 20
# nchans = self.data["BT"].shape[1]
# filler = numpy.empty(nchans)
# filler.fill(numpy.nan)
# filt = tools.filter_array(self.data, IWP=(1e-5, 1e10),
# *args, **kwargs)
# binned_data = tools.bin2d(self.data[filt], "IWP", xbin, "Dme", ybin,
# filter1=numpy.log10)
# binned_stats = (numpy.array([
# [func(b["BT"]) if len(b["BT"])>minlen else filler for b in r]
# for r in binned_data]))
# return (xbin, ybin, numpy.ma.masked_invalid(binned_stats))
#
# def stats3d(self, func, minlen=20):
# """Calculate statistic for 3-D bins per IWP, Dme, Zmed
# """
#
# bin_iwp = numpy.linspace(-1, 4, 14)
# bin_dme = numpy.linspace(0, 650, 15)
# bin_zmed = numpy.linspace(0, 20, 16)
#
# binned = tools.bin3d(self.data,
# "IWP", bin_iwp,
# "Dme", bin_dme,
# "Zmed", bin_zmed,
# filter1=numpy.log10)
#
# nchans = self.data["BT"].shape[1]
# filler = numpy.empty(nchans)
# filler.fill(numpy.nan)
#
# stats3d = numpy.array([[
# [func(e["BT"]) if len(e["BT"])>minlen else filler for e in r]
# for r in c]
# for c in binned])
#
# return (bin_iwp, bin_dme, bin_zmed, stats3d)
#
# def shape_per_iwp(self):
# """Get distribution of shapes as a function of IWP
# """
#
# bins_iwp = numpy.linspace(-1, 4, 14)
# filt = self.data["IWP"] > 0
# binned = tools.bin(numpy.log10(self.data["IWP"][filt]),
# self.data[filt], bins_iwp)
# shape_dist = numpy.empty_like(bins_iwp, dtype = self.data.dtype["shape"])
# for shape in self.data.dtype["shape"].names:
# shape_dist[shape] = [v["shape"][shape].mean() for v in binned]
# return shape_dist
#
# # visualisations
#
# def plot_stat_IWP_Dme(self, statfunc, ch,
# minlen=20, statname=None, ax=None,
# *args, **kwargs):
# """Visualise statistic IWP Dme in a hist2d.
#
# Requires instrument to be defined.
#
# Input:
# - statistic (callable)
# - channel
# - minlen, statname, ax
# - all other passed to tools.filter_array
#
# Optional input:
#
# - minlen
# - statname
# - axes object
#
# Returns:
#
# fig, ax, pcolormesh, colorbar
# """
#
# if statname is None:
# statname = statfunc.__name__
#
# if ax is None:
# fig = matplotlib.pyplot.figure()
# ax = fig.add_subplot(1, 1, 1,
# xlabel="${}^{10}$log IWP [g/m^2]",
# xscale="log",
# ylabel="Dme [µm]",
# title="{2} {0.name} {0.instrument.name} {1:s} GHz".format(
# self,
# self.instrument.channels[ch-1].get_chanstr(full=True),
# statname))
# else:
# fig = ax.figure
#
# (xbin, ybin, stat) = self.stats_IWP_Dme(statfunc, minlen=minlen,
# *args, **kwargs)
# cmap = matplotlib.cm.Spectral_r
# cmap.set_bad(color="white", alpha=None)
# #cmap.set_over(color="black")
# #cmap.set_under(color="cyan")
# pm = ax.pcolormesh(10**xbin, ybin, stat[..., ch-1].T, cmap=cmap)
# #pm.set_clim([0, 15])
# cb = fig.colorbar(pm)
# #cb.set_label("interquantile range [K]")
#
# return (fig, ax, pm, cb)
#
# def plot_lines_stat3d(self, statfunc):
# (bins_iwp, bins_dme, bins_zmed, stat) = self.stats3d(statfunc)
# isfin = numpy.isfinite(stat[..., 0])
# fin_in_iwp = isfin.sum(2).sum(1)
# fin_in_dme = isfin.sum(2).sum(0)
# fin_in_zmed = isfin.sum(1).sum(0)
# for (iwp_i, iwp) in enumerate(bins_iwp):
# if fin_in_iwp[iwp_i] > 10:
# # one zmed-line per dme for this iwp
# plot(bins_dme, stat[iwp_i, ..., ch])
# # one dme-line per zmed for this iwp
# plot(bins_zmed, stat[iwp_i, ..., ch].T)
## for (dme_i, dme) in bins_dme:
## for (zmed_i, zmed) in bins_zmed:
## pass
#
# writing out the results in different ways
def write_evans_obs(self, fn, sel=None):
"""Write as evans-obs file
:param fn: File to write to
:param sel: Selection to write to file, None (default) to write
all
"""
# from iceprofret.txt in the Evans distribution:
#
# Six header lines:
# Number of pixels
# Number of channels, total number of elements
# Channel IDs or names (matches channel IDs in retrieval database)
# number of elements in each channel
# additive uncertainties for each element
# multiplicative uncertainties for each element
# For each line:
# Time(hours) Cosine viewing zenith angle Measurement for each element
if sel is None:
sel = numpy.arange(self.data.size)
with open(fn, 'wt') as fp:
# number of pixels
fp.write("{}\n".format(sel.size))
# no channels / no elements; both equal to size of BT
fp.write("{} {}\n".format(self.data.dtype["BT"].shape[0],
self.data.dtype["BT"].shape[0]))
# names of channels
fp.write("{}\n".format(self.instrument.channel_string(pre="im",
width=True)))
# no. element per channel
fp.write(("1 " * len(self.instrument.channels)).strip())
fp.write("\n")
# additive uncertainties
for chan in self.instrument.channels:
fp.write(str(chan.noise) + " ")
fp.write("\n")
# multiplicative uncertainties (not implemented)
for chan in self.instrument.channels:
fp.write("0 ")
fp.write("\n")
for elem in sel:
# time; n/a, so write fake
fp.write("0.0 ")
# cos(angle); only nadir implented
fp.write("1.0 ")
# write channel BT's
for chan_bt in self.data[elem]["BT"]:
fp.write(str(chan_bt) + " ")
fp.write("\n")
def write_mat(self, fn, fields=None, sel=None):
"""Write to MAT file.
Use :func:`scipy.io.savemat` to write the database to a
MATLAB-style .mat file.
:param fn: Filename to write to
:param fields: List of fieldnames to write. Must be subset of
``self.data.dtype.names``. Defaults to ``None``, which means
to write all fields.
:param sel: Indices to write. Defaults to ``None``, which means
all scenes.
"""
if fields is None:
fields = list(self.data.dtype.names)
if sel is None:
sel = slice(None)
print(now(), "Writing to {}".format(fn))
scipy.io.savemat(fn, dict(data=self.data[fields][sel],
ice_axis=self.ice_axis),
appendmat=False,
do_compression=True, oned_as="column")
class LookupTable(abc.ABC):
"""Use a lookup table to consider similar measurements
This table is used to represent a large set of measurements by a small
set. It takes as input a particular measurement, and returns as
output a canonical measurements. A measurement is n-dimensional,
consisting of lat, lon, time of day, day of year, partial column,
degrees of freedom.
A use case is when we have a large set of measurements, but only error
estimates for a subset of those.
Implemented using a lookup table based on stats.bin_nd. Bins are
based on training data. If newly presented data does not look like
any pre-trained data, an error is raised.
Binning based on PCAs is currently being implemented.
Attributes::
axdata. Dictionary with keys corresponding to the axes to be
considered in the lookup table; names should correspond to
fields in data. Each value is itself a dictionary with keys::
nsteps: Number of steps in linspace
bins
db
To create an instance, use either .fromData or .fromFile (if
available).
"""
#_loaded = False
axdata = bins = db = None
use_pca = False
def compact_summary(self):
"""Return string with compact summary
Suitable in filename
"""
if self.use_pca:
s = "PCA_{:s}_{:d}_{:.1f}".format(
",".join(self.axdata["PCA"]["fields"]),
self.axdata["PCA"]["npc"],
self.axdata["PCA"]["scale"])
else:
s = "-".join(
["{:s}_{:d}".format(k,v["nsteps"])
for (k, v) in sorted(self.axdata.items())])
return s
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__,
self.compact_summary())
@property
def fields(self):
if self.use_pca:
return self.axdata["PCA"]["fields"]
else:
return self.axdata["fields"]
def get_index_tuple(self, dat, full=False):
"""Get a tuple of indices for use in the lookup table
Returns either the full tuple, or only the elements
considering to the number of PCs considered originally.
"""
if self.use_pca:
fields = self.axdata["PCA"]["fields"]
t = tuple(
stats.bin_nd_sparse(self.pca.project(numpy.vstack(
[dat[ax] for ax in fields]).T),
self.bins).squeeze().tolist())
if not full:
t = t[:self.axdata["PCA"]["npc"]]
return t
else:
fields = list(self.axdata.keys())
return tuple(stats.bin_nd_sparse(
numpy.atleast_2d([dat[ax]
for ax in fields]), self.bins).squeeze().tolist())
def get_index_tuples(self, data):
"""Yield tuples of indices for use in the lookup table
"""
# FIXME: can be faster when `dat` is large
for dat in data:
yield (self.get_index_tuple(dat), dat)
def lookup_all(self, data):
# FIXME: can be faster when dat is large
logging.info("Looking up {:d} radiances".format(data.size))
bar = progressbar.ProgressBar(maxval=data.size,
widgets=[progressbar.Bar("=", "[", "]"), " ",
progressbar.Percentage()])
bar.start()
for (i, dat) in enumerate(data):
try:
yield self.lookup(dat)
except KeyError:
logging.error("Not found for no. {:d}. :( "
"Should implement lookaround, enlarge LUT, or make it denser!".format(i))
continue
#yield None
#yield from (self.lookup(dat) for dat in data)
bar.update(i)
bar.finish()
def lookaround(self, dat):
"""Yield all neighbouring datapoints
Look at all neighbouring datapoints. NB those may be 2^N where N is
the length of tup! Very slow!
"""
tup = self.get_index_tuple(dat)
manytup = itertools.product(*[range(i-1,i+2) for i in tup])
yield from (t for t in manytup if t in self.db)
def _get_bins(self, data, axdata, pca=False):
bins = []
if pca:
# This means axdata has a single key "PCA" with fields
# “scale”. It also means that `data` is in PCA space, i.e.
# pca.Y.
# rmin = numpy.nanmin(data, 0)
# rmin -= 0.001*abs(rmin)
# rmax = numpy.nanmax(data, 0)
# rmax += 0.001*abs(rmax)
# number of bins per PC
nbins = numpy.ceil(self.pca.fracs*100*axdata["PCA"]["scale"])
bins = [numpy.linspace(data[:, i].min(),
data[:, i].max(),
max(p, 2))
for (i, p) in enumerate(nbins)]
return bins[:axdata["PCA"]["npc"]]
# b = [self._get_bins_from_range(rmi, rma, axdata, "PCA")
# for (rmi, rma) in zip(rmin, rmax)]
# raise NotImplementedError("Not implemented yet!")
else:
for ax in axdata.keys():
if "range" in axdata[ax].keys():
(rmin, rmax) = axdata[ax]["range"]
else:
rmin = numpy.nanmin(data[ax])
rmin -= 0.001*abs(rmin)
rmax = numpy.nanmin(data[ax])
rmax += 0.001*abs(rmax)
b = self._get_bins_from_range(rmin, rmax, axdata, ax)
# for case in tools.switch(axdata[ax].get("mode", "linear")):
# if case("linear"):
# b = numpy.linspace(rmin, rmax, axdata[ax]["nsteps"])
# break
# if case("optimal"):
# inrange = (data[ax] >= rmin) & (data[ax] <= rmax)
# b = scipy.stats.scoreatpercentile(data[ax][inrange],
# numpy.linspace(0, 100, axdata[ax]["nsteps"]))
# break
# if case():
# raise ValueError("ax {!s} unknown mode {!s}, I know "
# "'linear' and 'optimal'".format(axdata[ax], axdata[ax]["mode"]))
bins.append(b)
return bins
# end for
# end if
@staticmethod
def _get_bins_from_range(rmin, rmax, axdata, ax):
"""Small helper for _get_bins.
From extrema and `axdata` description, get either linearly or
logarithmically spaced bins.
"""
for case in tools.switch(axdata[ax].get("mode", "linear")):
if case("linear"):
b = numpy.linspace(rmin, rmax, axdata[ax]["nsteps"])
break
if case("optimal"):
inrange = (data[ax] >= rmin) & (data[ax] <= rmax)
b = scipy.stats.scoreatpercentile(data[ax][inrange],
numpy.linspace(0, 100, axdata[ax]["nsteps"]))
break
if case():
raise ValueError("ax {!s} unknown mode {!s}, I know "
"'linear' and 'optimal'".format(ax, axdata[ax]["mode"]))
return b
@staticmethod
def _make_pca(data, axdata):
fields = axdata["PCA"]["fields"]
valid_range = axdata["PCA"]["valid_range"]
if not all([issubclass(data[x].dtype.type, numpy.floating)
for x in fields]):
logging.warning("Casting all data to float64 for PCA")
data_mat = numpy.vstack([data[x] for x in fields]).T
valid = numpy.all((data_mat > valid_range[0]) &
(data_mat < valid_range[1]), 1)
return matplotlib.mlab.PCA(data_mat[valid, :])
def lookup(self, dat):
tup = self.get_index_tuple(dat)
return self[tup]
@staticmethod
@abc.abstractmethod
def _choose():
...
@classmethod
@abc.abstractmethod
def fromData(cls):
...
@abc.abstractmethod
def __getitem__(self):
...
@abc.abstractmethod
def __setitem__(self):
...
class SmallLookupTable(LookupTable):
"""Lookup table small enough to be in memory
"""
@classmethod
def fromFile(cls, file):
with open(file, 'rb') as fp:
(axdata, bins, db) = pickle.load(fp)
self = cls()
self.axdata = axdata
self.bins = bins
self.db = db
#self._loaded = True
return self
def propose_filename(self):
return "similarity_db_{}".format(self.compact_summary())
def toFile(self, file):
"""Store lookup table to a file
"""
with open(file, 'wb') as fp:
pickle.dump((self.axdata, self.bins, self.db), fp,
protocol=4)
@classmethod
def fromData(cls, data, axdata):
"""Build lookup table from data
``data`` should be a structured ndarray with dtype fields
``axdata`` should be a ``collections.OrderedDict`` where the keys
refer to fields from `data` to use, and the values are
dictionaries with the keys. If regular binning is used (i.e. no
PCA), those keys are:
nsteps (mandatory)
number of steps in binning data
mode
string that can be either "linear" (use linspace between
extremes) or "optimal" (choose bins based on percentiles so
1-D binning would create equal content in each).
Defaults to linear.
range
tuple with (min, max) of range within which to bin data.
Defaults to extremes of data.
It is also possible to bin based on PCA. In this case, ``axdata''
should have a single key "PCA". When binning based on PCA, the
number of bins per PC are proportional the the proportion of
variance along each PC axis (pca.fracs). By default, the number
of bins is the % of variability associated with the axis, i.e. if
the first PC explains 67% of variability and the second 25%, there
will be 67 and 25 bins, respectively. This can be scaled by
setting the key `scale` to something other than one.
fields
Sequence of strings: what fields to use in PCA-based
analysis.
npc
Integer, how many PCs to consider
scale
Float, defaults to 1.0, for scaling the number of bins.
valid_range
Before performing PCA, require that ALL input vectors are
within this range, otherwise discard.
"""
self = cls()
self.use_pca = list(axdata.keys()) == ["PCA"]
self.pca = None
if self.use_pca:
# _make_pca considers axdata["PCA"]["fields"]
self.pca = self._make_pca(data, axdata)
# _get_bins considers axdata["PCA"]["npc"]
# and axdata["PCA"]["scale"]
bins = self._get_bins(self.pca.Y, axdata, pca=True)
binned_indices = stats.bin_nd(
[self.pca.Y[:, i] for i in range(self.pca.Y.shape[1])])
else:
fields = axdata.keys()
bins = self._get_bins(data, axdata, pca=False)
binned_indices = stats.bin_nd(
[data[ax] for ax in fields], bins)
db = {}
# Do something for every bin. `_choose` is implemented in another
# class, it might do a count, it might choose one, or it might
# choose all.
for ii in itertools.product(*(range(i) for i in
binned_indices.shape)):
# ii should be a tuple that can be passed directly
if binned_indices[ii].size > 0:
db[ii] = data[self._choose(binned_indices[ii])]
self.axdata = axdata
self.bins = bins
self.db = db
#self._loaded = True
return self
def __getitem__(self, tup):
return self.db[tup]
def __setitem__(self, tup, val):
self.db[tup] = val
def keys(self):
yield from self.db.keys()
class LargeLookupTable(LookupTable):
"""Lookup table too large in memory, mapped to directory
"""
basename = "bucket/{coor:s}/contents.npy.xz"
_db = {}
_maxcache = 1e9 # 100 MB
_N = 0
def propose_dirname(self):
return "large_similarity_db_{}".format(self.compact_summary())
@classmethod
def fromData(cls, data, axdata, use_pca=False):
# docstring copied from SmallLookupTable
self = cls()
self.use_pca = use_pca
if use_pca:
self.pca = self._make_pca(data, axdata)
bins = self._get_bins(self.pca.Y, axdata, pca=True)
else:
bins = self._get_bins(data, axdata, pca=False)
self.axdata = axdata
self.bins = bins
if not self.bucket_dir().is_dir():
self.bucket_dir().mkdir(parents=True)
self.storemeta()
self.addData(data)
return self
fromData.__doc__ = SmallLookupTable.__doc__
def addData(self, data):
"""Add a lot of data
"""
k = set()
bar = progressbar.ProgressBar(maxval=len(data),
widgets=[progressbar.Bar("=", "[", "]"), " ",
progressbar.Percentage()])
bar.start()
for (i, (t, contents)) in enumerate(self.get_index_tuples(data)):
if t in k:
cur = self[t]
# contents = contents[numpy.array([contents[i] not in cur
# for i in range(contents.size)])]
# if contents.size > 0:
if contents not in cur:
self[t] = numpy.hstack((self[t], numpy.atleast_1d(contents)))
else:
self[t] = numpy.atleast_1d(contents)
k.add(t)
bar.update(i+1)
bar.finish()
self.dumpcache() # store and clear
self.storemeta()
@classmethod
def fromDir(cls, arg):
"""Initialise from directory
`arg` can be either a directory (str or pathlib.Path), or a
dictionary of axdata describing such (see fromData docstring).
"""
self = cls()
if isinstance(arg, dict):
self.axdata = arg
self.use_pca = "PCA" in arg
dir = self.bucket_dir()
else:
dir = arg
with (pathlib.Path(dir) / "info.npy").open(mode="rb") as fp:
logging.info("Reading into {!s}".format(pathlib.Path(dir)))
(self.axdata, self.bins) = pickle.load(fp)
if "PCA" in self.axdata:
self.use_pca = True
with (pathlib.Path(dir) / "pca.npy").open(mode="rb") as fp:
self.pca = pickle.load(fp)
return self
def keys(self):
"""Yield keys one by one. Reads from directory, no caching!
"""
for p in self.bucket_dir().iterdir():
if p.name.startswith("bucket_") and p.name.endswith(".npy.xz"):
yield tuple(int(s) for s in p.name[7:-7].split("-"))
def __setitem__(self, tup, data):
self._db[tup] = data
if len(self._db) > self._N: # recalculate size
totsize = sum(v.nbytes for v in self._db.values())
if totsize > self._maxcache:
logging.debug("Size {:,} exceeds max cache {:,}, "
"dumping {:d} keys".format(totsize,
self._maxcache, len(self._db)))
self.dumpcache()
else:
self._N += 10 # i.e. after every 10 new entries, check size
def __getitem__(self, tup):
if tup in self._db: # cached
return self._db[tup]
else:
path = self.bucket_name(tup)
if not path.exists():
raise KeyError("No entry for {!s}".format(tup))
with lzma.open(str(path), mode="rb") as fp:
try:
v = numpy.load(fp)
except Exception as e:
raise type(e)(str(e) + " while reading {!s}".format(
path)).with_traceback(sys.exc_info()[2])
self._db[tup] = v
return v
def dumpcache(self):
sizes = [v.size for v in self._db.values()]
logging.info("Dumping cache for {:,} profiles in {:d} buckets to {!s}".format(
sum(sizes), len(self._db), self.bucket_dir()))
bar = progressbar.ProgressBar(maxval=len(self._db),
widgets=[progressbar.Bar('=', '[', ']'), ' ',
progressbar.Percentage()])
bar.start()
newdirs = 0
counts = numpy.zeros(dtype=numpy.uint32, shape=(max(sizes)+1),)
for (i, (k, v)) in enumerate(self._db.items()):
path = self.bucket_name(k)
if not path.parent.is_dir():
#logging.info("Creating directory {!s}".format(path.parent))
path.parent.mkdir(parents=True)
newdirs += 1
# if v.nbytes > 1e6:
# logging.debug("Storing {:d}/{:d}, {:,} bytes to {!s}".format(
# i, len(self._db), v.nbytes, path))
counts[v.size] += 1
with lzma.open(str(path), mode="wb") as fp:
numpy.save(fp, v)
bar.update(i+1)
bar.finish()
logging.info("Stored cache. Created {:d} new directories. "
"Profiles per bucket histogram: {!s}".format(newdirs, counts))
self.clearcache()
def storemeta(self):
"""Store metadata for database
"""
d = self.bucket_dir()
with (d / "info.npy").open(mode="wb") as fp:
pickle.dump((self.axdata, self.bins), fp, protocol=4)
if self.use_pca:
with (d / "pca.npy").open(mode="wb") as fp:
pickle.dump(self.pca, fp, protocol=4)
def loadmeta(self):
"""Load metadata for database
"""
d = self.bucket_dir()
with (d / "info.npy").open(mode="rb") as fp:
(self.axdata, self.bins) = pickle.load(fp)
def clearcache(self):
self._db.clear()
self._N = 0
def bucket_dir(self):
return (pathlib.Path(config.conf["main"]["lookup_table_dir"]) /
self.propose_dirname())
def bucket_name(self, coor):
"""Return full path to bucket at coor
"""
return (self.bucket_dir() /
self.basename.format(coor="/".join("{:02d}".format(x) for x in coor)))
class SimilarityLookupTable(LookupTable):
def propose_filename(self):
return "tanso_similarity_db_{}".format(self.compact_summary())
@staticmethod
def _choose(data):
"""Choose one of the data to use for building the db
"""
return random.choice(data)
class FullLookupTable(LookupTable):
"""Like a similarity lookup table, but keeps all entries
"""
@staticmethod
def _choose(data):
return data
class CountingLookupTable(LookupTable):
"""Provide counting only, effectively creating a histogram.
"""
@staticmethod
def _choose(data):
return data.size
class SmallSimilarityLookupTable(SmallLookupTable, SimilarityLookupTable):
pass
class LargeSimilarityLookupTable(LargeLookupTable, SimilarityLookupTable):
pass
class SmallFullLookupTable(SmallLookupTable, FullLookupTable):
pass
class LargeFullLookupTable(LargeLookupTable, FullLookupTable):
pass
class SmallCountingLookupTable(SmallLookupTable, CountingLookupTable):
pass
|
bsd-3-clause
|
mxjl620/scikit-learn
|
sklearn/utils/validation.py
|
30
|
24618
|
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will"
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we acually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
|
bsd-3-clause
|
jyeatman/dipy
|
doc/examples/reconst_dsi_metrics.py
|
13
|
4539
|
"""
===============================
Calculate DSI-based scalar maps
===============================
We show how to calculate two DSI-based scalar maps: return to origin
probability (rtop) [Descoteaux2011]_ and mean square displacement (msd)
[Wu2007]_, [Wu2008]_ on your dataset.
First import the necessary modules:
"""
import numpy as np
import matplotlib.pyplot as plt
from dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi
from dipy.reconst.dsi import DiffusionSpectrumModel
"""
Download and read the data for this tutorial.
"""
fetch_taiwan_ntu_dsi()
img, gtab = read_taiwan_ntu_dsi()
"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example to read the b-values
it is possible to write print(gtab.bvals).
Load the raw diffusion data and the affine.
"""
data = img.get_data()
affine = img.get_affine()
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
Instantiate the Model and apply it to the data.
"""
dsmodel = DiffusionSpectrumModel(gtab, qgrid_size=35, filter_width=18.5)
"""
Lets just use one slice only from the data.
"""
dataslice = data[30:70, 20:80, data.shape[2] / 2]
"""
Normalize the signal by the b0
"""
dataslice = dataslice / (dataslice[..., 0, None]).astype(np.float)
"""
Calculate the return to origin probability on the signal
that corresponds to the integral of the signal.
"""
print('Calculating... rtop_signal')
rtop_signal = dsmodel.fit(dataslice).rtop_signal()
"""
Now we calculate the return to origin probability on the propagator,
that corresponds to its central value.
By default the propagator is divided by its sum in order to obtain a properly normalized pdf,
however this normalization changes the values of rtop, therefore in order to compare it
with the rtop previously calculated on the signal we turn the normalized parameter to false.
"""
print('Calculating... rtop_pdf')
rtop_pdf = dsmodel.fit(dataslice).rtop_pdf(normalized=False)
"""
In theory, these two measures must be equal,
to show that we calculate the mean square error on this two measures.
"""
mse = np.sum((rtop_signal - rtop_pdf) ** 2) / rtop_signal.size
print("mse = %f" % mse)
"""
mse = 0.000000
Leaving the normalized parameter to the default changes the values of the
rtop but not the contrast between the voxels.
"""
print('Calculating... rtop_pdf_norm')
rtop_pdf_norm = dsmodel.fit(dataslice).rtop_pdf()
"""
Let's calculate the mean square displacement on the normalized propagator.
"""
print('Calculating... msd_norm')
msd_norm = dsmodel.fit(dataslice).msd_discrete()
"""
Turning the normalized parameter to false makes it possible to calculate
the mean square displacement on the propagator without normalization.
"""
print('Calculating... msd')
msd = dsmodel.fit(dataslice).msd_discrete(normalized=False)
"""
Show the rtop images and save them in rtop.png.
"""
fig = plt.figure(figsize=(6, 6))
ax1 = fig.add_subplot(2, 2, 1, title='rtop_signal')
ax1.set_axis_off()
ind = ax1.imshow(rtop_signal.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax2 = fig.add_subplot(2, 2, 2, title='rtop_pdf_norm')
ax2.set_axis_off()
ind = ax2.imshow(rtop_pdf_norm.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax3 = fig.add_subplot(2, 2, 3, title='rtop_pdf')
ax3.set_axis_off()
ind = ax3.imshow(rtop_pdf.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
plt.savefig('rtop.png')
"""
.. figure:: rtop.png
:align: center
**Return to origin probability**.
Show the msd images and save them in msd.png.
"""
fig = plt.figure(figsize=(7, 3))
ax1 = fig.add_subplot(1, 2, 1, title='msd_norm')
ax1.set_axis_off()
ind = ax1.imshow(msd_norm.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax2 = fig.add_subplot(1, 2, 2, title='msd')
ax2.set_axis_off()
ind = ax2.imshow(msd.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
plt.savefig('msd.png')
"""
.. figure:: msd.png
:align: center
**Mean square displacement**.
.. [Descoteaux2011] Descoteaux M. et. al , "Multiple q-shell diffusion
propagator imaging", Medical Image Analysis, vol 15,
No. 4, p. 603-621, 2011.
.. [Wu2007] Wu Y. et al., "Hybrid diffusion imaging", NeuroImage, vol 36,
p. 617-629, 2007.
.. [Wu2008] Wu Y. et al., "Computation of Diffusion Function Measures
in q -Space Using Magnetic Resonance Hybrid Diffusion Imaging",
IEEE TRANSACTIONS ON MEDICAL IMAGING, vol. 27, No. 6, p. 858-865,
2008
.. include:: ../links_names.inc
"""
|
bsd-3-clause
|
fbagirov/scikit-learn
|
sklearn/tests/test_random_projection.py
|
142
|
14033
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
|
bsd-3-clause
|
emon10005/scikit-image
|
doc/examples/plot_canny.py
|
11
|
1633
|
"""
===================
Canny edge detector
===================
The Canny filter is a multi-stage edge detector. It uses a filter based on the
derivative of a Gaussian in order to compute the intensity of the gradients.The
Gaussian reduces the effect of noise present in the image. Then, potential
edges are thinned down to 1-pixel curves by removing non-maximum pixels of the
gradient magnitude. Finally, edge pixels are kept or removed using hysteresis
thresholding on the gradient magnitude.
The Canny has three adjustable parameters: the width of the Gaussian (the
noisier the image, the greater the width), and the low and high threshold for
the hysteresis thresholding.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import feature
# Generate noisy image of a square
im = np.zeros((128, 128))
im[32:-32, 32:-32] = 1
im = ndi.rotate(im, 15, mode='constant')
im = ndi.gaussian_filter(im, 4)
im += 0.2 * np.random.random(im.shape)
# Compute the Canny filter for two values of sigma
edges1 = feature.canny(im)
edges2 = feature.canny(im, sigma=3)
# display results
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3))
ax1.imshow(im, cmap=plt.cm.jet)
ax1.axis('off')
ax1.set_title('noisy image', fontsize=20)
ax2.imshow(edges1, cmap=plt.cm.gray)
ax2.axis('off')
ax2.set_title('Canny filter, $\sigma=1$', fontsize=20)
ax3.imshow(edges2, cmap=plt.cm.gray)
ax3.axis('off')
ax3.set_title('Canny filter, $\sigma=3$', fontsize=20)
fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
bottom=0.02, left=0.02, right=0.98)
plt.show()
|
bsd-3-clause
|
yhat/ggplot
|
tests/test_ggplot.py
|
1
|
6995
|
import unittest
import ggplot as gg
import pandas as pd
class TestGgplot(unittest.TestCase):
def test_init_args_order(self):
p = gg.ggplot(gg.mtcars, gg.aes(x='mpg'))
self.assertTrue(isinstance(p.data, pd.DataFrame))
self.assertTrue(isinstance(p._aes, gg.aes))
def test_init_args_backwards_order(self):
p = gg.ggplot(gg.aes(x='mpg'), gg.mtcars)
self.assertTrue(isinstance(p.data, pd.DataFrame))
self.assertTrue(isinstance(p._aes, gg.aes))
# facets
def test_ndim_2facet_grid(self):
p = gg.ggplot(gg.aes(x='price'), gg.diamonds) + gg.facet_grid('cut', 'clarity')
nrow, ncol = p.facets.nrow, p.facets.ncol
self.assertEqual(nrow, 5)
self.assertEqual(ncol, 8)
def test_ndim_2facet_grid_reverse(self):
p = gg.ggplot(gg.aes(x='price'), gg.diamonds) + gg.facet_grid('clarity', 'cut')
nrow, ncol = p.facets.nrow, p.facets.ncol
self.assertEqual(nrow, 8)
self.assertEqual(ncol, 5)
def test_ndim_1_facet_grid_row(self):
p = gg.ggplot(gg.aes(x='price'), gg.diamonds) + gg.facet_grid('clarity')
nrow, ncol = p.facets.nrow, p.facets.ncol
self.assertEqual(nrow, 8)
self.assertEqual(ncol, 1)
def test_ndim_1_facet_grid_col(self):
p = gg.ggplot(gg.aes(x='price'), gg.diamonds) + gg.facet_grid(None, 'clarity')
nrow, ncol = p.facets.nrow, p.facets.ncol
self.assertEqual(nrow, 1)
self.assertEqual(ncol, 8)
def test_ndim_1_facet_wrap(self):
p = gg.ggplot(gg.aes(x='price'), gg.diamonds) + gg.facet_wrap('clarity')
nrow, ncol = p.facets.nrow, p.facets.ncol
self.assertEqual(nrow, 3)
self.assertEqual(ncol, 3)
self.assertEqual(p.facets.ndim, 8)
def test_ndim_1_facet_wrap_subplots(self):
p = gg.ggplot(gg.aes(x='price'), gg.diamonds) + gg.facet_wrap('clarity')
fig, subplots = p.make_facets()
nrow, ncol = subplots.shape
self.assertEqual(nrow, 3)
self.assertEqual(ncol, 3)
def test_ndim_2_facet_wrap(self):
p = gg.ggplot(gg.aes(x='price'), gg.diamonds) + gg.facet_wrap('cut', 'clarity')
nrow, ncol = p.facets.nrow, p.facets.ncol
self.assertEqual(nrow, 7)
self.assertEqual(ncol, 6)
self.assertEqual(p.facets.ndim, 40)
def test_ndim_2_facet_wrap_subplots(self):
p = gg.ggplot(gg.aes(x='price'), gg.diamonds) + gg.facet_wrap('cut', 'clarity')
fig, subplots = p.make_facets()
nrow, ncol = subplots.shape
self.assertEqual(nrow, 7)
self.assertEqual(ncol, 6)
def test_facet_wrap_nrow(self):
p = gg.ggplot(gg.aes(x='price'), gg.diamonds) + gg.facet_wrap('cut', nrow=2)
nrow, ncol = p.facets.nrow, p.facets.ncol
self.assertEqual(nrow, 2)
self.assertEqual(ncol, 3)
def test_facet_wrap_ncol(self):
p = gg.ggplot(gg.aes(x='price'), gg.diamonds) + gg.facet_wrap('cut', ncol=2)
nrow, ncol = p.facets.nrow, p.facets.ncol
self.assertEqual(nrow, 3)
self.assertEqual(ncol, 2)
# groups
def test_groups_1_aes(self):
p = gg.ggplot(gg.aes(x='carat', y='price', color='clarity'), gg.diamonds) + gg.geom_point()
_, groups = p._construct_plot_data()
self.assertEqual(len(groups), 8)
def test_groups_2_aes(self):
p = gg.ggplot(gg.aes(x='carat', y='price', color='clarity', shape='cut'), gg.diamonds) + gg.geom_point()
_, groups = p._construct_plot_data()
self.assertEqual(len(groups), 8*5)
# labels
def test_xlab(self):
p = gg.ggplot(gg.aes(x='mpg'), gg.mtcars) + gg.geom_histogram() + gg.xlab("TEST")
self.assertEqual(p.xlab, "TEST")
def test_ylab(self):
p = gg.ggplot(gg.aes(x='mpg'), gg.mtcars) + gg.geom_histogram() + gg.ylab("TEST")
self.assertEqual(p.ylab, "TEST")
def test_ggtitle(self):
p = gg.ggplot(gg.aes(x='mpg'), gg.mtcars) + gg.geom_histogram() + gg.ggtitle("TEST")
self.assertEqual(p.title, "TEST")
# patsy formula
def test_patsy(self):
p = gg.ggplot(gg.aes(x='mpg + 100'), gg.mtcars)
self.assertEqual((p.data['mpg + 100']==(gg.mtcars.mpg + 100)).sum(), 32)
# scales
def test_scale_x_log_default10(self):
p = gg.ggplot(gg.aes(x='mpg'), gg.mtcars) + gg.scale_x_log()
self.assertEqual(p.scale_x_log, 10)
def test_scale_x_log_base(self):
p = gg.ggplot(gg.aes(x='mpg'), gg.mtcars) + gg.scale_x_log(base=100)
self.assertEqual(p.scale_x_log, 100)
def test_scale_y_log_default10(self):
p = gg.ggplot(gg.aes(x='mpg'), gg.mtcars) + gg.scale_y_log()
self.assertEqual(p.scale_y_log, 10)
def test_scale_y_log_base(self):
p = gg.ggplot(gg.aes(x='mpg'), gg.mtcars) + gg.scale_y_log(base=100)
self.assertEqual(p.scale_y_log, 100)
def test_scale_alpha_identity(self):
df = pd.DataFrame({'x': range(10), 'the-alpha': '+' })
p = gg.ggplot(gg.aes(x='x', alpha='the-alpha'), df) + gg.scale_alpha_identity()
self.assertTrue((p.data['the-alpha']==df['the-alpha']).all())
def test_scale_color_identity(self):
df = pd.DataFrame({'x': range(10), 'the-color': 'blue' })
p = gg.ggplot(gg.aes(x='x', color='the-color'), df) + gg.scale_color_identity()
self.assertTrue((p.data['the-color']==df['the-color']).all())
def test_scale_fill_identity(self):
df = pd.DataFrame({'x': range(10), 'the-fill': '+' })
p = gg.ggplot(gg.aes(x='x', fill='the-fill'), df) + gg.scale_fill_identity()
self.assertTrue((p.data['the-fill']==df['the-fill']).all())
def test_scale_linetype_identity(self):
df = pd.DataFrame({'x': range(10), 'the-linetype': '+' })
p = gg.ggplot(gg.aes(x='x', linetype='the-linetype'), df) + gg.scale_linetype_identity()
self.assertTrue((p.data['the-linetype']==df['the-linetype']).all())
def test_scale_shape_identity(self):
df = pd.DataFrame({'x': range(10), 'the-shape': '+' })
p = gg.ggplot(gg.aes(x='x', shape='the-shape'), df) + gg.scale_shape_identity()
self.assertTrue((p.data['the-shape']==df['the-shape']).all())
def test_scale_size_identity(self):
df = pd.DataFrame({'x': range(10), 'the-size': '+' })
p = gg.ggplot(gg.aes(x='x', size='the-size'), df) + gg.scale_size_identity()
self.assertTrue((p.data['the-size']==df['the-size']).all())
def test_scale_x_reverse(self):
df = pd.DataFrame({'x': range(10), 'the-size': '+' })
p = gg.ggplot(gg.aes(x='x', size='the-size'), df) + gg.scale_x_reverse()
self.assertTrue(p.scale_x_reverse)
def test_scale_y_reverse(self):
df = pd.DataFrame({'x': range(10), 'the-size': '+' })
p = gg.ggplot(gg.aes(x='x', size='the-size'), df) + gg.scale_y_reverse()
self.assertTrue(p.scale_y_reverse)
# TODO legend tests
if __name__ == '__main__':
unittest.main()
|
bsd-2-clause
|
chrysante87/pyterpol
|
pyterpol_test/test_SyntheticGrid/test_grid_interpolation.py
|
1
|
1262
|
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
# create the grid
sygri = pyterpol.SyntheticGrid()
# read properties
sygri.read_list_from_file('b1.dat', columns=['FILENAME', 'TEFF', 'LOGG', 'Z'], family='BSTAR')
sygri.read_list_from_file('p1.dat', columns=['FILENAME', 'TEFF', 'LOGG', 'Z'], family='POLLUX')
# test of dealing with degeneracies - this should return two pectra
sl = sygri.get_all(z=1.0, teff=15000, logg=4.5)
#print sl
# resolve degeneracy - should raise an exception - checked
#sl = sygri.resolve_degeneracy(sl)
## so we set the grid order and now it should return one spectrum - checked
sygri.set_grid_order(['BSTAR', 'POLLUX'])
sl = sygri.resolve_degeneracy(sl)
#print sl['family']
# this should create a list with intensities of individual
# spectra that will be used for interpolation
parlist, vals, keys = sygri.select_and_verify_parameters(teff=15000, logg=2.75, z=1.5, order=2)
for row in parlist:
print row
#spectra = sygri.get_spectra_for_interpolation(parlist, ['logg', 'z', 'teff'])
#for spec in spectra[:10]:
#print spec
#print len(parlist), len(spectra)
#try to interpolate the spectra
#print sygri.interpolate_spectra(parlist, spectra, [3.5, 1.0, 15100])
|
gpl-2.0
|
BiaDarkia/scikit-learn
|
sklearn/covariance/tests/test_robust_covariance.py
|
9
|
5155
|
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import itertools
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_mcd_issue3367():
# Check that MCD completes when the covariance matrix is singular
# i.e. one of the rows and columns are all zeros
rand_gen = np.random.RandomState(0)
# Think of these as the values for X and Y -> 10 values between -5 and 5
data_values = np.linspace(-5, 5, 10).tolist()
# Get the cartesian product of all possible coordinate pairs from above set
data = np.array(list(itertools.product(data_values, data_values)))
# Add a third column that's all zeros to make our data a set of point
# within a plane, which means that the covariance matrix will be singular
data = np.hstack((data, np.zeros((data.shape[0], 1))))
# The below line of code should raise an exception if the covariance matrix
# is singular. As a further test, since we have points in XYZ, the
# principle components (Eigenvectors) of these directly relate to the
# geometry of the points. Since it's a plane, we should be able to test
# that the Eigenvector that corresponds to the smallest Eigenvalue is the
# plane normal, specifically [0, 0, 1], since everything is in the XY plane
# (as I've set it up above). To do this one would start by:
#
# evals, evecs = np.linalg.eigh(mcd_fit.covariance_)
# normal = evecs[:, np.argmin(evals)]
#
# After which we need to assert that our `normal` is equal to [0, 0, 1].
# Do note that there is floating point error associated with this, so it's
# best to subtract the two and then compare some small tolerance (e.g.
# 1e-12).
MinCovDet(random_state=rand_gen).fit(data)
def test_mcd_support_covariance_is_zero():
# Check that MCD returns a ValueError with informative message when the
# covariance of the support data is equal to 0.
X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1])
X_1 = X_1.reshape(-1, 1)
X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3])
X_2 = X_2.reshape(-1, 1)
msg = ('The covariance matrix of the support data is equal to 0, try to '
'increase support_fraction')
for X in [X_1, X_2]:
assert_raise_message(ValueError, msg, MinCovDet().fit, X)
|
bsd-3-clause
|
ryandougherty/mwa-capstone
|
MWA_Tools/build/matplotlib/lib/mpl_examples/api/legend_demo.py
|
6
|
1083
|
import numpy as np
import matplotlib.pyplot as plt
a = np.arange(0,3,.02)
b = np.arange(0,3,.02)
c = np.exp(a)
d = c[::-1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(a,c,'k--',a,d,'k:',a,c+d,'k')
leg = ax.legend(('Model length', 'Data length', 'Total message length'),
'upper center', shadow=True)
ax.set_ylim([-1,20])
ax.grid(False)
ax.set_xlabel('Model complexity --->')
ax.set_ylabel('Message length --->')
ax.set_title('Minimum Message Length')
ax.set_yticklabels([])
ax.set_xticklabels([])
# set some legend properties. All the code below is optional. The
# defaults are usually sensible but if you need more control, this
# shows you how
# the matplotlib.patches.Rectangle instance surrounding the legend
frame = leg.get_frame()
frame.set_facecolor('0.80') # set the frame face color to light gray
# matplotlib.text.Text instances
for t in leg.get_texts():
t.set_fontsize('small') # the legend text fontsize
# matplotlib.lines.Line2D instances
for l in leg.get_lines():
l.set_linewidth(1.5) # the legend line width
plt.show()
|
gpl-2.0
|
ch3ll0v3k/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
105
|
26588
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
|
bsd-3-clause
|
JrtPec/opengrid
|
opengrid/library/tests/test_fluksoapi.py
|
3
|
7958
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 02:37:25 2013
@author: roel
"""
import os, sys
import unittest
import inspect
import numpy as np
import pdb
import datetime as dt
import pandas as pd
import pytz
test_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# add the path to opengrid to sys.path
sys.path.append(os.path.join(test_dir, os.pardir, os.pardir))
from opengrid.library import fluksoapi
class FluksoapiTest(unittest.TestCase):
"""
Class for testing the module fluksoapi
"""
def test_consolidate_single(self):
"""Return abspath if a single file found"""
datafolder = os.path.join(test_dir, 'data')
self.assertRaises(ValueError, fluksoapi.consolidate_sensor, datafolder, 'f81fb35a62f59a987d8eea3ffc845ed0')
csv_expected = os.path.join(datafolder, 'FL12345678_sensorS_FROM_2014-01-07_16-02-00_TO_2014-01-08_16-01-00.csv' )
self.assertEqual(csv_expected,
fluksoapi.consolidate_sensor(datafolder, 'sensorS'))
def test_consolidate_multiple(self):
"""Consolidate and return single filename if more than one file found"""
datafolder = os.path.join(test_dir, 'data')
csv_expected = os.path.join(datafolder, 'FL12345678_sensorD_FROM_2014-01-07_08-02-00_TO_2014-01-08_16-01-00.csv' )
self.assertEqual(csv_expected, fluksoapi.consolidate_sensor(datafolder, 'sensorD'))
os.remove(csv_expected)
def test_consolidate_raises(self):
"""Raise ValueError if no file found"""
datafolder = os.path.join(test_dir, 'data')
self.assertRaises(ValueError, fluksoapi.consolidate_sensor, datafolder, 'thissensordoesnotexist')
def test_consolidate(self):
"""Consolidating 2 files and checking variable"""
datafolder = os.path.join(test_dir, 'data')
new_csv=fluksoapi.consolidate_sensor(folder = datafolder,
sensor = 'sensorD')
ts1 = fluksoapi.load_file(os.path.join(datafolder, 'FL12345678_sensorD_FROM_2014-01-07_08-02-00_TO_2014-01-08_08-01-00.csv'))
self.assertTrue(np.isnan(ts1['sensorD'].loc[dt.datetime(2014,1,8,8,0,0, tzinfo=pytz.UTC)]))
ts2 = fluksoapi.load_file(os.path.join(datafolder, 'FL12345678_sensorD_FROM_2014-01-07_16-02-00_TO_2014-01-08_16-01-00.csv'))
#ts = fluksoapi.load_file(os.path.join(datafolder, 'f81fb35a62f59a987d8eea3ffc845ed0_FROM_2014-01-07_08-02-00_TO_2014-01-08_16-01-00.csv'))
#pdb.set_trace()
ts = fluksoapi.load_file(new_csv)
self.assertEqual(ts.index[0], ts1.index[0])
self.assertEqual(ts.index[-1], ts2.index[-1])
self.assertEqual(ts['sensorD'].loc['2014-01-08 08:00:00'], 1120.0, "Last file should overwrite identical indices")
os.remove(new_csv)
def test_consolidate_with_hidden_file(self):
"""Consolidate should skip hidden file"""
datafolder = os.path.join(test_dir, 'data')
new_csv=fluksoapi.consolidate_sensor(folder = datafolder,
sensor = 'sensorH')
self.assertEqual(new_csv, os.path.join(datafolder, 'FL12345678_sensorH_FROM_2014-01-07_12-02-00_TO_2014-01-08_16-01-00.csv'))
os.remove(new_csv)
def test_consolidate_single_file(self):
"""Consolidating a single file should NOT consolidate but should return the file"""
datafolder = os.path.join(test_dir, 'data')
new_csv=fluksoapi.consolidate_sensor(folder = datafolder,
sensor = 'sensorS')
self.assertEqual(new_csv, os.path.join(datafolder,'FL12345678_sensorS_FROM_2014-01-07_16-02-00_TO_2014-01-08_16-01-00.csv'))
def test_consolidate_day(self):
"""Consolidating 2 files for a single day and checking variable"""
datafolder = os.path.join(test_dir, 'data')
new_csv=fluksoapi.consolidate_sensor(folder = datafolder,
sensor = 'sensorD',
dt_day = dt.datetime(2014,1,7))
ts1 = fluksoapi.load_file(os.path.join(datafolder, 'FL12345678_sensorD_FROM_2014-01-07_08-02-00_TO_2014-01-08_08-01-00.csv'))
self.assertTrue(np.isnan(ts1['sensorD'].loc[dt.datetime(2014,1,8,8,0,0, tzinfo=pytz.UTC)]))
ts2 = fluksoapi.load_file(os.path.join(datafolder, 'FL12345678_sensorD_FROM_2014-01-07_16-02-00_TO_2014-01-08_16-01-00.csv'))
ts = fluksoapi.load_file(new_csv)
self.assertEqual(ts.index[0], ts1.index[0])
self.assertEqual(ts.index[-1], dt.datetime(2014,1,8,0,0,0, tzinfo=pytz.UTC))
os.remove(new_csv)
def test_load_file(self):
"""load_file should return a pandas dataframe with localized index (UTC)"""
datafolder = os.path.join(test_dir, 'data')
df = fluksoapi.load_file(os.path.join(datafolder, 'FL12345678_sensorD_FROM_2014-01-07_08-02-00_TO_2014-01-08_08-01-00.csv'))
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual(df.index.tz, pytz.UTC, "the tz is {} instead of UTC".format(df.index.tz))
self.assertListEqual(df.columns.tolist(), ['sensorD'])
def test_parse_date_from_datetime(self):
"""Parsing a datetime into a pandas.Timestamp"""
BXL = pytz.timezone('Europe/Brussels')
dt_ = BXL.localize(dt.datetime(2014,11,23,1,2,3))
epoch = pytz.UTC.localize(dt.datetime(1970,1,1,0,0,0))
epoch_expected = (dt_ - epoch).total_seconds()
pts = fluksoapi._parse_date(dt_)
self.assertEqual(pts.value/1e9, epoch_expected)
def test_parse_date_from_datetime_naive(self):
"""Parsing a naïve datetime into a pandas.Timestamp makes it UTC"""
dt_ = pytz.UTC.localize(dt.datetime(2014,11,23,1,2,3))
epoch = pytz.UTC.localize(dt.datetime(1970,1,1,0,0,0))
epoch_expected = (dt_ - epoch).total_seconds()
pts = fluksoapi._parse_date(dt.datetime(2014,11,23,1,2,3))
self.assertEqual(pts.value/1e9, epoch_expected)
def test_parse_date_from_posix(self):
"""Parsing a float"""
pts = fluksoapi._parse_date(1416778251.460574)
self.assertEqual(1416778251.460574, pts.value/1e9)
def test_parse_date_from_string(self):
"""Parsing some commong types of strings"""
dt_ = pytz.UTC.localize(dt.datetime(2014,11,23,1,2,3))
epoch = pytz.UTC.localize(dt.datetime(1970,1,1,0,0,0))
epoch_expected = (dt_ - epoch).total_seconds()
pts = fluksoapi._parse_date('20141123 01:02:03')
self.assertEqual(pts.value/1e9, epoch_expected)
pts = fluksoapi._parse_date('2014-11-23 01:02:03')
self.assertEqual(pts.value/1e9, epoch_expected)
pts = fluksoapi._parse_date('2014-11-23T010203')
self.assertEqual(pts.value/1e9, epoch_expected)
if __name__ == '__main__':
#http://stackoverflow.com/questions/4005695/changing-order-of-unit-tests-in-python
ln = lambda f: getattr(FluksoapiTest, f).im_func.func_code.co_firstlineno
lncmp = lambda _, a, b: cmp(ln(a), ln(b))
unittest.TestLoader.sortTestMethodsUsing = lncmp
suite1 = unittest.TestLoader().loadTestsFromTestCase(FluksoapiTest)
alltests = unittest.TestSuite([suite1])
#selection = unittest.TestSuite()
#selection.addTest(HouseprintTest('test_get_sensor'))
unittest.TextTestRunner(verbosity=1, failfast=False).run(alltests)
|
apache-2.0
|
zrhans/pythonanywhere
|
.virtualenvs/django19/lib/python3.4/site-packages/matplotlib/_pylab_helpers.py
|
8
|
4008
|
"""
Manage figures for pyplot interface.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import sys
import gc
import atexit
def error_msg(msg):
print(msg, file=sys.stderr)
class Gcf(object):
"""
Singleton to manage a set of integer-numbered figures.
This class is never instantiated; it consists of two class
attributes (a list and a dictionary), and a set of static
methods that operate on those attributes, accessing them
directly as class attributes.
Attributes:
*figs*:
dictionary of the form {*num*: *manager*, ...}
*_activeQue*:
list of *managers*, with active one at the end
"""
_activeQue = []
figs = {}
@classmethod
def get_fig_manager(cls, num):
"""
If figure manager *num* exists, make it the active
figure and return the manager; otherwise return *None*.
"""
manager = cls.figs.get(num, None)
if manager is not None:
cls.set_active(manager)
return manager
@classmethod
def destroy(cls, num):
"""
Try to remove all traces of figure *num*.
In the interactive backends, this is bound to the
window "destroy" and "delete" events.
"""
if not cls.has_fignum(num):
return
manager = cls.figs[num]
manager.canvas.mpl_disconnect(manager._cidgcf)
# There must be a good reason for the following careful
# rebuilding of the activeQue; what is it?
oldQue = cls._activeQue[:]
cls._activeQue = []
for f in oldQue:
if f != manager:
cls._activeQue.append(f)
del cls.figs[num]
manager.destroy()
gc.collect(1)
@classmethod
def destroy_fig(cls, fig):
"*fig* is a Figure instance"
num = None
for manager in six.itervalues(cls.figs):
if manager.canvas.figure == fig:
num = manager.num
break
if num is not None:
cls.destroy(num)
@classmethod
def destroy_all(cls):
# this is need to ensure that gc is available in corner cases
# where modules are being torn down after install with easy_install
import gc # noqa
for manager in list(cls.figs.values()):
manager.canvas.mpl_disconnect(manager._cidgcf)
manager.destroy()
cls._activeQue = []
cls.figs.clear()
gc.collect(1)
@classmethod
def has_fignum(cls, num):
"""
Return *True* if figure *num* exists.
"""
return num in cls.figs
@classmethod
def get_all_fig_managers(cls):
"""
Return a list of figure managers.
"""
return list(cls.figs.values())
@classmethod
def get_num_fig_managers(cls):
"""
Return the number of figures being managed.
"""
return len(cls.figs)
@classmethod
def get_active(cls):
"""
Return the manager of the active figure, or *None*.
"""
if len(cls._activeQue) == 0:
return None
else:
return cls._activeQue[-1]
@classmethod
def set_active(cls, manager):
"""
Make the figure corresponding to *manager* the active one.
"""
oldQue = cls._activeQue[:]
cls._activeQue = []
for m in oldQue:
if m != manager:
cls._activeQue.append(m)
cls._activeQue.append(manager)
cls.figs[manager.num] = manager
@classmethod
def draw_all(cls, force=False):
"""
Redraw all figures registered with the pyplot
state machine.
"""
for f_mgr in cls.get_all_fig_managers():
if force or f_mgr.canvas.figure.stale:
f_mgr.canvas.draw_idle()
atexit.register(Gcf.destroy_all)
|
apache-2.0
|
dgoldman916/nyu-python
|
class2/data_analysis_final/perihelion_distance_graph.py
|
1
|
2433
|
#!/usr/bin/env python3
import numpy as np
import getdata as gd
import pandas as pd
import plotly.plotly as py
from plotly.graph_objs import *
class PerihelionGraph:
'''This is a Graph of all asteroids from our Asteroid Data Frame
plotted into a graph to show its distance in relation to the
Earth and the Sun. '''
def __init__(self):
self.data = gd.main()
self.diameter = self.data.df['Asteroid Diameter (km)']
self.perihelion = self.data.df['Perihelion Distance (au)']
self.yaxis_vals = list(np.ones(len(self.perihelion)))
self.traces = self.get_traces()
self.layout = self.get_layout()
def get_traces(self):
self.asteroids = Scatter(
x = self.perihelion,
y = self.yaxis_vals,
mode = 'markers',
marker = dict(
color = '#ffffff',
size = self.diameter
),
name = 'Asteroids',
text = self.diameter
)
self.sun = Scatter(
x = [1,1,1,1,1],
y = [1],
mode = 'markers',
name = 'Sun',
text = '1.3914 million km'
)
self.earth = Scatter(
x = [0,0,0,0,0],
y = [1],
mode = 'markers',
name = 'Earth',
text = '12,742 km'
)
self.traces = [ self.asteroids, self.sun, self.earth ]
return self.traces
def get_layout(self):
self.layout = Layout(
title = 'Asteroid distances from Earth in Astronomical Units (AU)',
xaxis = dict(title = 'Astronomical Units (AU)'),
height=400,
width = 1000,
plot_bgcolor = '#000000',
)
return self.layout
def show(self):
self.data = Data(self.traces)
self.fig = Figure(data=self.data, layout=self.layout)
py.plot(self.fig, filename = 'asteroids-perihelion-distances-graph')
return self.fig
def main():
perihelionGraph = PerihelionGraph()
perihelionGraph.show()
return perihelionGraph
if __name__ == "__main__":
main()
|
mit
|
francesco-mannella/dmp-esn
|
parametric/parametric_dmp/bin/tr_datasets/e_cursive_curves/data/plot.py
|
1
|
2181
|
#!/usr/bin/env python
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
pathname = os.path.dirname(sys.argv[0])
if pathname:
os.chdir(pathname)
n_dim = None
def get_trajectories(pattern):
trs = []
names = glob.glob(pattern)
names.sort()
for fname in names:
t = np.loadtxt(fname)
trs.append(t)
return trs
trains = get_trajectories("trajectories/tl*")
tests = get_trajectories("trajectories/tt*")
train_results = get_trajectories("results/rtl*")
test_results = get_trajectories("results/rtt*")
ltrain = None
ltest = None
lalltrain = []
lalltest = []
idcs = np.arange(len(trains))
theo_train = {
'color': [1,.6,.6],
'lw': 5,
'zorder': 2,
'label': "Training"
}
repr_train = {
'color': [.3,0,0],
'lw': 1.5,
'zorder': 3,
'label': "Training repr."
}
theo_test = {
'color': [.6,.6,1],
'lw': 5,
'zorder': 2,
'label': "Test"
}
repr_test = {
'color': [0,0,.3],
'lw': 1.5,
'zorder': 3,
'label': "Test repr"
}
def common_plot(ax, d, label, color, lw, zorder):
h, = ax.plot(d[:,1]+d[:,7]*8, d[:,2],
color=color, lw=lw, zorder=zorder,
label=label)
return h
def plot_trajectories(ax, ttype, lall, **kargs):
idcs = np.arange(len(ttype))
for d,i in zip(ttype, idcs):
if i == 0:
lplot = common_plot(ax, d, **kargs)
lall.append(lplot)
else:
common_plot(ax, d, **kargs)
fig = plt.figure("DMP Stulp", figsize=(14,5))
ax = fig.add_subplot(211, aspect="equal")
plot_trajectories(ax, trains, lalltrain, **theo_train)
plot_trajectories(ax, train_results, lalltrain, **repr_train)
ax.set_xlim([-0.5,11])
ax.set_ylim([-0.1,1.3])
ax.legend(handles=lalltrain)
ax = fig.add_subplot(212, aspect="equal")
plot_trajectories(ax, tests, lalltest, **theo_test)
plot_trajectories(ax, test_results, lalltest, **repr_test)
ax.set_xlim([-0.5,11])
ax.set_ylim([-0.1,1.3])
ax.legend(handles=lalltest)
plt.tight_layout()
plt.show()
|
gpl-2.0
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/sklearn/linear_model/ransac.py
|
8
|
19390
|
# coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
from ..utils.validation import has_fit_parameter
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
max_skips : int, optional
Maximum number of iterations that can be skipped due to finding zero
inliers or invalid data defined by ``is_data_valid`` or invalid models
defined by ``is_model_valid``.
.. versionadded:: 0.19
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
.. deprecated:: 0.18
``residual_metric`` is deprecated from 0.18 and will be removed in
0.20. Use ``loss`` instead.
loss : string, callable, optional, default "absolute_loss"
String inputs, "absolute_loss" and "squared_loss" are supported which
find the absolute loss and squared loss per sample
respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the i-th value of the array corresponding to the loss
on ``X[i]``.
If the loss on a sample is greater than the ``residual_threshold``,
then this sample is classified as an outlier.
random_state : int, RandomState instance or None, optional, default None
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
n_skips_no_inliers_ : int
Number of iterations skipped due to finding zero inliers.
.. versionadded:: 0.19
n_skips_invalid_data_ : int
Number of iterations skipped due to invalid data defined by
``is_data_valid``.
.. versionadded:: 0.19
n_skips_invalid_model_ : int
Number of iterations skipped due to an invalid model defined by
``is_model_valid``.
.. versionadded:: 0.19
References
----------
.. [1] https://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100, max_skips=np.inf,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
loss='absolute_loss', random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.max_skips = max_skips
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
sample_weight : array-like, shape = [n_samples]
Individual weights for each sample
raises error if sample_weight is passed and base_estimator
fit method does not support it.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is not None:
warnings.warn(
"'residual_metric' was deprecated in version 0.18 and "
"will be removed in version 0.20. Use 'loss' instead.",
DeprecationWarning)
if self.loss == "absolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
elif callable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absolute_loss', 'squared_loss' or a callable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
n_inliers_best = 1
score_best = -np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
self.n_skips_no_inliers_ = 0
self.n_skips_invalid_data_ = 0
self.n_skips_invalid_model_ = 0
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
self.n_trials_ = 0
max_trials = self.max_trials
while self.n_trials_ < max_trials:
self.n_trials_ += 1
if (self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips:
break
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
self.n_skips_invalid_data_ += 1
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
self.n_skips_invalid_model_ += 1
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
# XXX: Deprecation: Remove this if block in 0.20
if self.residual_metric is not None:
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = self.residual_metric(diff)
else:
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
self.n_skips_no_inliers_ += 1
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
max_trials = min(
max_trials,
_dynamic_max_trials(n_inliers_best, n_samples,
min_samples, self.stop_probability))
# break if sufficient number of inliers or score is reached
if n_inliers_best >= self.stop_n_inliers or \
score_best >= self.stop_score:
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
if ((self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips):
raise ValueError(
"RANSAC skipped more iterations than `max_skips` without"
" finding a valid consensus set. Iterations were skipped"
" because each randomly chosen sub-sample failed the"
" passing criteria. See estimator attributes for"
" diagnostics (n_skips*).")
else:
raise ValueError(
"RANSAC could not find a valid consensus set. All"
" `max_trials` iterations were skipped because each"
" randomly chosen sub-sample failed the passing criteria."
" See estimator attributes for diagnostics (n_skips*).")
else:
if (self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips:
warnings.warn("RANSAC found a valid consensus set but exited"
" early due to skipping more iterations than"
" `max_skips`. See estimator attributes for"
" diagnostics (n_skips*).",
UserWarning)
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
|
mit
|
bloyl/mne-python
|
mne/viz/utils.py
|
4
|
91045
|
# -*- coding: utf-8 -*-
"""Utility functions for plotting M/EEG data."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Mainak Jas <[email protected]>
# Stefan Appelhoff <[email protected]>
# Clemens Brunner <[email protected]>
# Daniel McCloy <[email protected]>
#
# License: Simplified BSD
from collections import defaultdict
from contextlib import contextmanager
from functools import partial
import difflib
import webbrowser
import tempfile
import math
import numpy as np
from copy import deepcopy
from distutils.version import LooseVersion
import warnings
from datetime import datetime
from ..defaults import _handle_default
from ..io import show_fiff, Info
from ..io.constants import FIFF
from ..io.pick import (channel_type, channel_indices_by_type, pick_channels,
_pick_data_channels, _DATA_CH_TYPES_SPLIT,
_DATA_CH_TYPES_ORDER_DEFAULT, _VALID_CHANNEL_TYPES,
pick_info, _picks_by_type, pick_channels_cov,
_contains_ch_type)
from ..io.meas_info import create_info
from ..rank import compute_rank
from ..io.proj import setup_proj
from ..utils import (verbose, get_config, warn, _check_ch_locs, _check_option,
logger, fill_doc, _pl, _check_sphere, _ensure_int)
from ..transforms import apply_trans
_channel_type_prettyprint = {'eeg': "EEG channel", 'grad': "Gradiometer",
'mag': "Magnetometer", 'seeg': "sEEG channel",
'dbs': "DBS channel", 'eog': "EOG channel",
'ecg': "ECG sensor", 'emg': "EMG sensor",
'ecog': "ECoG channel",
'misc': "miscellaneous sensor"}
def _setup_vmin_vmax(data, vmin, vmax, norm=False):
"""Handle vmin and vmax parameters for visualizing topomaps.
For the normal use-case (when `vmin` and `vmax` are None), the parameter
`norm` drives the computation. When norm=False, data is supposed to come
from a mag and the output tuple (vmin, vmax) is symmetric range
(-x, x) where x is the max(abs(data)). When norm=True (a.k.a. data is the
L2 norm of a gradiometer pair) the output tuple corresponds to (0, x).
Otherwise, vmin and vmax are callables that drive the operation.
"""
should_warn = False
if vmax is None and vmin is None:
vmax = np.abs(data).max()
vmin = 0. if norm else -vmax
if vmin == 0 and np.min(data) < 0:
should_warn = True
else:
if callable(vmin):
vmin = vmin(data)
elif vmin is None:
vmin = 0. if norm else np.min(data)
if vmin == 0 and np.min(data) < 0:
should_warn = True
if callable(vmax):
vmax = vmax(data)
elif vmax is None:
vmax = np.max(data)
if should_warn:
warn_msg = ("_setup_vmin_vmax output a (min={vmin}, max={vmax})"
" range whereas the minimum of data is {data_min}")
warn_val = {'vmin': vmin, 'vmax': vmax, 'data_min': np.min(data)}
warn(warn_msg.format(**warn_val), UserWarning)
return vmin, vmax
def plt_show(show=True, fig=None, **kwargs):
"""Show a figure while suppressing warnings.
Parameters
----------
show : bool
Show the figure.
fig : instance of Figure | None
If non-None, use fig.show().
**kwargs : dict
Extra arguments for :func:`matplotlib.pyplot.show`.
"""
from matplotlib import get_backend
import matplotlib.pyplot as plt
if show and get_backend() != 'agg':
(fig or plt).show(**kwargs)
def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
"""Adjust subplot parameters to give specified padding.
.. note:: For plotting please use this function instead of
``plt.tight_layout``.
Parameters
----------
pad : float
Padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad : float
Padding height between edges of adjacent subplots.
Defaults to ``pad_inches``.
w_pad : float
Padding width between edges of adjacent subplots.
Defaults to ``pad_inches``.
fig : instance of Figure
Figure to apply changes to.
Notes
-----
This will not force constrained_layout=False if the figure was created
with that method.
"""
import matplotlib.pyplot as plt
fig = plt.gcf() if fig is None else fig
fig.canvas.draw()
constrained = fig.get_constrained_layout()
if constrained:
return # no-op
try: # see https://github.com/matplotlib/matplotlib/issues/2654
with warnings.catch_warnings(record=True) as ws:
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
except Exception:
try:
with warnings.catch_warnings(record=True) as ws:
fig.set_tight_layout(dict(pad=pad, h_pad=h_pad, w_pad=w_pad))
except Exception:
warn('Matplotlib function "tight_layout" is not supported.'
' Skipping subplot adjustment.')
return
for w in ws:
w_msg = str(w.message) if hasattr(w, 'message') else w.get_message()
if not w_msg.startswith('This figure includes Axes'):
warn(w_msg, w.category, 'matplotlib')
def _check_delayed_ssp(container):
"""Handle interactive SSP selection."""
if container.proj is True or\
all(p['active'] for p in container.info['projs']):
raise RuntimeError('Projs are already applied. Please initialize'
' the data with proj set to False.')
elif len(container.info['projs']) < 1:
raise RuntimeError('No projs found in evoked.')
def _validate_if_list_of_axes(axes, obligatory_len=None):
"""Validate whether input is a list/array of axes."""
from matplotlib.axes import Axes
if obligatory_len is not None and not isinstance(obligatory_len, int):
raise ValueError('obligatory_len must be None or int, got %d',
'instead' % type(obligatory_len))
if not isinstance(axes, (list, np.ndarray)):
raise ValueError('axes must be a list or numpy array of matplotlib '
'axes objects, got %s instead.' % type(axes))
if isinstance(axes, np.ndarray) and axes.ndim > 1:
raise ValueError('if input is a numpy array, it must be '
'one-dimensional. The received numpy array has %d '
'dimensions however. Try using ravel or flatten '
'method of the array.' % axes.ndim)
is_correct_type = np.array([isinstance(x, Axes)
for x in axes])
if not np.all(is_correct_type):
first_bad = np.where(np.logical_not(is_correct_type))[0][0]
raise ValueError('axes must be a list or numpy array of matplotlib '
'axes objects while one of the list elements is '
'%s.' % type(axes[first_bad]))
if obligatory_len is not None and not len(axes) == obligatory_len:
raise ValueError('axes must be a list/array of length %d, while the'
' length is %d' % (obligatory_len, len(axes)))
def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
"""Return a colormap similar to that used by mne_analyze.
Parameters
----------
limits : list (or array) of length 3 or 6
Bounds for the colormap, which will be mirrored across zero if length
3, or completely specified (and potentially asymmetric) if length 6.
format : str
Type of colormap to return. If 'matplotlib', will return a
matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
return an RGBA array of shape (256, 4).
Returns
-------
cmap : instance of colormap | array
A teal->blue->gray->red->yellow colormap. See docstring of the 'format'
argument for further details.
Notes
-----
For this will return a colormap that will display correctly for data
that are scaled by the plotting function to span [-fmax, fmax].
""" # noqa: E501
# Ensure limits is an array
limits = np.asarray(limits, dtype='float')
if len(limits) != 3 and len(limits) != 6:
raise ValueError('limits must have 3 or 6 elements')
if len(limits) == 3 and any(limits < 0.):
raise ValueError('if 3 elements, limits must all be non-negative')
if any(np.diff(limits) <= 0):
raise ValueError('limits must be monotonically increasing')
if format == 'matplotlib':
from matplotlib import colors
if len(limits) == 3:
limits = (np.concatenate((-np.flipud(limits), limits)) +
limits[-1]) / (2 * limits[-1])
else:
limits = (limits - np.min(limits)) / np.max(limits -
np.min(limits))
cdict = {'red': ((limits[0], 0.0, 0.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 1.0, 1.0),
(limits[5], 1.0, 1.0)),
'green': ((limits[0], 1.0, 1.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 1.0, 1.0)),
'blue': ((limits[0], 1.0, 1.0),
(limits[1], 1.0, 1.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 0.0, 0.0)),
'alpha': ((limits[0], 1.0, 1.0),
(limits[1], 1.0, 1.0),
(limits[2], 0.0, 0.0),
(limits[3], 0.0, 0.0),
(limits[4], 1.0, 1.0),
(limits[5], 1.0, 1.0)),
}
return colors.LinearSegmentedColormap('mne_analyze', cdict)
elif format == 'mayavi':
if len(limits) == 3:
limits = np.concatenate((-np.flipud(limits), [0], limits)) /\
limits[-1]
else:
limits = np.concatenate((limits[:3], [0], limits[3:]))
limits /= np.max(np.abs(limits))
r = np.array([0, 0, 0, 0, 1, 1, 1])
g = np.array([1, 0, 0, 0, 0, 0, 1])
b = np.array([1, 1, 1, 0, 0, 0, 0])
a = np.array([1, 1, 0, 0, 0, 1, 1])
xp = (np.arange(256) - 128) / 128.0
colormap = np.r_[[np.interp(xp, limits, 255 * c)
for c in [r, g, b, a]]].T
return colormap
else:
raise ValueError('format must be either matplotlib or mayavi')
@contextmanager
def _events_off(obj):
obj.eventson = False
try:
yield
finally:
obj.eventson = True
def _toggle_proj(event, params, all_=False):
"""Perform operations when proj boxes clicked."""
# read options if possible
if 'proj_checks' in params:
bools = list(params['proj_checks'].get_status())
if all_:
new_bools = [not all(bools)] * len(bools)
with _events_off(params['proj_checks']):
for bi, (old, new) in enumerate(zip(bools, new_bools)):
if old != new:
params['proj_checks'].set_active(bi)
bools[bi] = new
for bi, (b, p) in enumerate(zip(bools, params['projs'])):
# see if they tried to deactivate an active one
if not b and p['active']:
bools[bi] = True
else:
proj = params.get('apply_proj', True)
bools = [proj] * len(params['projs'])
compute_proj = False
if 'proj_bools' not in params:
compute_proj = True
elif not np.array_equal(bools, params['proj_bools']):
compute_proj = True
# if projectors changed, update plots
if compute_proj is True:
params['plot_update_proj_callback'](params, bools)
def _get_channel_plotting_order(order, ch_types, picks=None):
"""Determine channel plotting order for browse-style Raw/Epochs plots."""
if order is None:
# for backward compat, we swap the first two to keep grad before mag
ch_type_order = list(_DATA_CH_TYPES_ORDER_DEFAULT)
ch_type_order = tuple(['grad', 'mag'] + ch_type_order[2:])
order = [pick_idx for order_type in ch_type_order
for pick_idx, pick_type in enumerate(ch_types)
if order_type == pick_type]
elif not isinstance(order, (np.ndarray, list, tuple)):
raise ValueError('order should be array-like; got '
f'"{order}" ({type(order)}).')
if picks is not None:
order = [ch for ch in order if ch in picks]
return np.asarray(order)
def _make_event_color_dict(event_color, events=None, event_id=None):
"""Make or validate a dict mapping event ids to colors."""
from .misc import _handle_event_colors
if isinstance(event_color, dict): # if event_color is a dict, validate it
event_id = dict() if event_id is None else event_id
event_color = {_ensure_int(event_id.get(key, key), 'event_color key'):
value for key, value in event_color.items()}
default = event_color.pop(-1, None)
default_factory = None if default is None else lambda: default
new_dict = defaultdict(default_factory)
for key, value in event_color.items():
if key < 1:
raise KeyError('event_color keys must be strictly positive, '
f'or -1 (cannot use {key})')
new_dict[key] = value
return new_dict
elif event_color is None: # make a dict from color cycle
uniq_events = set() if events is None else np.unique(events[:, 2])
return _handle_event_colors(event_color, uniq_events, event_id)
else: # if event_color is a MPL color-like thing, use it for all events
return defaultdict(lambda: event_color)
def _prepare_trellis(n_cells, ncols, nrows='auto', title=False, colorbar=False,
size=1.3, sharex=False, sharey=False):
from matplotlib.gridspec import GridSpec
from ._figure import _figure
if n_cells == 1:
nrows = ncols = 1
elif isinstance(ncols, int) and n_cells <= ncols:
nrows, ncols = 1, n_cells
else:
if ncols == 'auto' and nrows == 'auto':
nrows = math.floor(math.sqrt(n_cells))
ncols = math.ceil(n_cells / nrows)
elif ncols == 'auto':
ncols = math.ceil(n_cells / nrows)
elif nrows == 'auto':
nrows = math.ceil(n_cells / ncols)
else:
naxes = ncols * nrows
if naxes < n_cells:
raise ValueError("Cannot plot {} axes in a {} by {} "
"figure.".format(n_cells, nrows, ncols))
if colorbar:
ncols += 1
width = size * ncols
height = (size + max(0, 0.1 * (4 - size))) * nrows + bool(title) * 0.5
height_ratios = None
fig = _figure(toolbar=False, figsize=(width * 1.5, 0.25 + height * 1.5))
gs = GridSpec(nrows, ncols, figure=fig, height_ratios=height_ratios)
axes = []
if colorbar:
# exclude last axis of each row except top row, which is for colorbar
exclude = set(range(2 * ncols - 1, nrows * ncols, ncols))
ax_idxs = sorted(set(range(nrows * ncols)) - exclude)[:n_cells + 1]
else:
ax_idxs = range(n_cells)
for ax_idx in ax_idxs:
subplot_kw = dict()
if ax_idx > 0:
if sharex:
subplot_kw.update(sharex=axes[0])
if sharey:
subplot_kw.update(sharey=axes[0])
axes.append(fig.add_subplot(gs[ax_idx], **subplot_kw))
return fig, axes, ncols, nrows
def _draw_proj_checkbox(event, params, draw_current_state=True):
"""Toggle options (projectors) dialog."""
from matplotlib import widgets
projs = params['projs']
# turn on options dialog
labels = [p['desc'] for p in projs]
actives = ([p['active'] for p in projs] if draw_current_state else
params.get('proj_bools', [params['apply_proj']] * len(projs)))
width = max([4., max([len(p['desc']) for p in projs]) / 6.0 + 0.5])
height = (len(projs) + 1) / 6.0 + 1.5
fig_proj = figure_nobar(figsize=(width, height))
_set_window_title(fig_proj, 'SSP projection vectors')
offset = (1. / 6. / height)
params['fig_proj'] = fig_proj # necessary for proper toggling
ax_temp = fig_proj.add_axes((0, offset, 1, 0.8 - offset), frameon=False)
ax_temp.set_title('Projectors marked with "X" are active')
proj_checks = widgets.CheckButtons(ax_temp, labels=labels, actives=actives)
# make edges around checkbox areas
for rect in proj_checks.rectangles:
rect.set_edgecolor('0.5')
rect.set_linewidth(1.)
# change already-applied projectors to red
for ii, p in enumerate(projs):
if p['active']:
for x in proj_checks.lines[ii]:
x.set_color('#ff0000')
# make minimal size
# pass key presses from option dialog over
proj_checks.on_clicked(partial(_toggle_proj, params=params))
params['proj_checks'] = proj_checks
fig_proj.canvas.mpl_connect('key_press_event', _key_press)
# Toggle all
ax_temp = fig_proj.add_axes((0, 0, 1, offset), frameon=False)
proj_all = widgets.Button(ax_temp, 'Toggle all')
proj_all.on_clicked(partial(_toggle_proj, params=params, all_=True))
params['proj_all'] = proj_all
# this should work for non-test cases
try:
fig_proj.canvas.draw()
plt_show(fig=fig_proj, warn=False)
except Exception:
pass
def _simplify_float(label):
# Heuristic to turn floats to ints where possible (e.g. -500.0 to -500)
if isinstance(label, float) and np.isfinite(label) and \
float(str(label)) != round(label):
label = round(label, 2)
return label
def _get_figsize_from_config():
"""Get default / most recent figure size from config."""
figsize = get_config('MNE_BROWSE_RAW_SIZE')
if figsize is not None:
figsize = figsize.split(',')
figsize = tuple([float(s) for s in figsize])
return figsize
@verbose
def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ',
read_limit=np.inf, max_str=30, verbose=None):
"""Compare the contents of two fiff files using diff and show_fiff.
Parameters
----------
fname_1 : str
First file to compare.
fname_2 : str
Second file to compare.
fname_out : str | None
Filename to store the resulting diff. If None, a temporary
file will be created.
show : bool
If True, show the resulting diff in a new tab in a web browser.
indent : str
How to indent the lines.
read_limit : int
Max number of bytes of data to read from a tag. Can be np.inf
to always read all data (helps test read completion).
max_str : int
Max number of characters of string representation to print for
each tag's data.
%(verbose)s
Returns
-------
fname_out : str
The filename used for storing the diff. Could be useful for
when a temporary file is used.
"""
file_1 = show_fiff(fname_1, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
file_2 = show_fiff(fname_2, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
if fname_out is not None:
f = open(fname_out, 'wb')
else:
f = tempfile.NamedTemporaryFile('wb', delete=False, suffix='.html')
fname_out = f.name
with f as fid:
fid.write(diff.encode('utf-8'))
if show is True:
webbrowser.open_new_tab(fname_out)
return fname_out
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar.
Parameters
----------
*args : list
Arguments to pass to :func:`matplotlib.pyplot.figure`.
**kwargs : dict
Keyword arguments to pass to :func:`matplotlib.pyplot.figure`.
Returns
-------
fig : instance of Figure
The figure.
"""
from matplotlib import rcParams, pyplot as plt
old_val = rcParams['toolbar']
try:
rcParams['toolbar'] = 'none'
fig = plt.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in cbs:
fig.canvas.callbacks.disconnect(key)
finally:
rcParams['toolbar'] = old_val
return fig
def _show_help_fig(col1, col2, fig_help, ax, show):
_set_window_title(fig_help, 'Help')
celltext = [[c1, c2] for c1, c2 in zip(col1.strip().split("\n"),
col2.strip().split("\n"))]
table = ax.table(cellText=celltext, loc="center", cellLoc="left")
table.auto_set_font_size(False)
table.set_fontsize(12)
ax.set_axis_off()
for (row, col), cell in table.get_celld().items():
cell.set_edgecolor(None) # remove cell borders
# right justify, following:
# https://stackoverflow.com/questions/48210749/matplotlib-table-assign-different-text-alignments-to-different-columns?rq=1 # noqa: E501
if col == 0:
cell._loc = 'right'
fig_help.canvas.mpl_connect('key_press_event', _key_press)
if show:
# this should work for non-test cases
try:
fig_help.canvas.draw()
plt_show(fig=fig_help, warn=False)
except Exception:
pass
def _show_help(col1, col2, width, height):
fig_help = figure_nobar(figsize=(width, height), dpi=80)
ax = fig_help.add_subplot(111)
_show_help_fig(col1, col2, fig_help, ax, show=True)
def _key_press(event):
"""Handle key press in dialog."""
import matplotlib.pyplot as plt
if event.key == 'escape':
plt.close(event.canvas.figure)
class ClickableImage(object):
"""Display an image so you can click on it and store x/y positions.
Takes as input an image array (can be any array that works with imshow,
but will work best with images. Displays the image and lets you
click on it. Stores the xy coordinates of each click, so now you can
superimpose something on top of it.
Upon clicking, the x/y coordinate of the cursor will be stored in
self.coords, which is a list of (x, y) tuples.
Parameters
----------
imdata : ndarray
The image that you wish to click on for 2-d points.
**kwargs : dict
Keyword arguments. Passed to ax.imshow.
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, imdata, **kwargs):
"""Display the image for clicking."""
import matplotlib.pyplot as plt
self.coords = []
self.imdata = imdata
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.ymax = self.imdata.shape[0]
self.xmax = self.imdata.shape[1]
self.im = self.ax.imshow(imdata,
extent=(0, self.xmax, 0, self.ymax),
picker=True, **kwargs)
self.ax.axis('off')
self.fig.canvas.mpl_connect('pick_event', self.onclick)
plt_show(block=True)
def onclick(self, event):
"""Handle Mouse clicks.
Parameters
----------
event : matplotlib.backend_bases.Event
The matplotlib object that we use to get x/y position.
"""
mouseevent = event.mouseevent
self.coords.append((mouseevent.xdata, mouseevent.ydata))
def plot_clicks(self, **kwargs):
"""Plot the x/y positions stored in self.coords.
Parameters
----------
**kwargs : dict
Arguments are passed to imshow in displaying the bg image.
"""
import matplotlib.pyplot as plt
if len(self.coords) == 0:
raise ValueError('No coordinates found, make sure you click '
'on the image that is first shown.')
f, ax = plt.subplots()
ax.imshow(self.imdata, extent=(0, self.xmax, 0, self.ymax), **kwargs)
xlim, ylim = [ax.get_xlim(), ax.get_ylim()]
xcoords, ycoords = zip(*self.coords)
ax.scatter(xcoords, ycoords, c='#ff0000')
ann_text = np.arange(len(self.coords)).astype(str)
for txt, coord in zip(ann_text, self.coords):
ax.annotate(txt, coord, fontsize=20, color='#ff0000')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt_show()
def to_layout(self, **kwargs):
"""Turn coordinates into an MNE Layout object.
Normalizes by the image you used to generate clicks
Parameters
----------
**kwargs : dict
Arguments are passed to generate_2d_layout.
Returns
-------
layout : instance of Layout
The layout.
"""
from ..channels.layout import generate_2d_layout
coords = np.array(self.coords)
lt = generate_2d_layout(coords, bg_image=self.imdata, **kwargs)
return lt
def _fake_click(fig, ax, point, xform='ax', button=1, kind='press'):
"""Fake a click at a relative point within axes."""
if xform == 'ax':
x, y = ax.transAxes.transform_point(point)
elif xform == 'data':
x, y = ax.transData.transform_point(point)
else:
assert xform == 'pix'
x, y = point
if kind == 'press':
func = partial(fig.canvas.button_press_event, x=x, y=y, button=button)
elif kind == 'release':
func = partial(fig.canvas.button_release_event, x=x, y=y,
button=button)
elif kind == 'motion':
func = partial(fig.canvas.motion_notify_event, x=x, y=y)
func(guiEvent=None)
def add_background_image(fig, im, set_ratios=None):
"""Add a background image to a plot.
Adds the image specified in ``im`` to the
figure ``fig``. This is generally meant to
be done with topo plots, though it could work
for any plot.
.. note:: This modifies the figure and/or axes in place.
Parameters
----------
fig : Figure
The figure you wish to add a bg image to.
im : array, shape (M, N, {3, 4})
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
set_ratios : None | str
Set the aspect ratio of any axes in fig
to the value in set_ratios. Defaults to None,
which does nothing to axes.
Returns
-------
ax_im : instance of Axes
Axes created corresponding to the image you added.
Notes
-----
.. versionadded:: 0.9.0
"""
if im is None:
# Don't do anything and return nothing
return None
if set_ratios is not None:
for ax in fig.axes:
ax.set_aspect(set_ratios)
ax_im = fig.add_axes([0, 0, 1, 1], label='background')
ax_im.imshow(im, aspect='auto')
ax_im.set_zorder(-1)
return ax_im
def _find_peaks(evoked, npeaks):
"""Find peaks from evoked data.
Returns ``npeaks`` biggest peaks as a list of time points.
"""
from scipy.signal import argrelmax
gfp = evoked.data.std(axis=0)
order = len(evoked.times) // 30
if order < 1:
order = 1
peaks = argrelmax(gfp, order=order, axis=0)[0]
if len(peaks) > npeaks:
max_indices = np.argsort(gfp[peaks])[-npeaks:]
peaks = np.sort(peaks[max_indices])
times = evoked.times[peaks]
if len(times) == 0:
times = [evoked.times[gfp.argmax()]]
return times
def _process_times(inst, use_times, n_peaks=None, few=False):
"""Return a list of times for topomaps."""
if isinstance(use_times, str):
if use_times == 'interactive':
use_times, n_peaks = 'peaks', 1
if use_times == 'peaks':
if n_peaks is None:
n_peaks = min(3 if few else 7, len(inst.times))
use_times = _find_peaks(inst, n_peaks)
elif use_times == 'auto':
if n_peaks is None:
n_peaks = min(5 if few else 10, len(use_times))
use_times = np.linspace(inst.times[0], inst.times[-1], n_peaks)
else:
raise ValueError("Got an unrecognized method for `times`. Only "
"'peaks', 'auto' and 'interactive' are supported "
"(or directly passing numbers).")
elif np.isscalar(use_times):
use_times = [use_times]
use_times = np.array(use_times, float)
if use_times.ndim != 1:
raise ValueError('times must be 1D, got %d dimensions'
% use_times.ndim)
if len(use_times) > 25:
warn('More than 25 topomaps plots requested. This might take a while.')
return use_times
@verbose
def plot_sensors(info, kind='topomap', ch_type=None, title=None,
show_names=False, ch_groups=None, to_sphere=True, axes=None,
block=False, show=True, sphere=None, verbose=None):
"""Plot sensors positions.
Parameters
----------
info : instance of Info
Info structure containing the channel locations.
kind : str
Whether to plot the sensors as 3d, topomap or as an interactive
sensor selection dialog. Available options 'topomap', '3d', 'select'.
If 'select', a set of channels can be selected interactively by using
lasso selector or clicking while holding control key. The selected
channels are returned along with the figure instance. Defaults to
'topomap'.
ch_type : None | str
The channel type to plot. Available options 'mag', 'grad', 'eeg',
'seeg', 'dbs', 'ecog', 'all'. If ``'all'``, all the available mag,
grad, eeg, seeg, dbs and ecog channels are plotted. If None (default),
then channels are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to
``'Sensor positions (%%s)' %% ch_type``.
show_names : bool | array of str
Whether to display all channel names. If an array, only the channel
names in the array are shown. Defaults to False.
ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None
Channel groups for coloring the sensors. If None (default), default
coloring scheme is used. If 'position', the sensors are divided
into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If
array, the channels are divided by picks given in the array.
.. versionadded:: 0.13.0
to_sphere : bool
Whether to project the 3d locations to a sphere. When False, the
sensor array appears similar as to looking downwards straight above the
subject's head. Has no effect when kind='3d'. Defaults to True.
.. versionadded:: 0.14.0
axes : instance of Axes | instance of Axes3D | None
Axes to draw the sensors to. If ``kind='3d'``, axes must be an instance
of Axes3D. If None (default), a new axes will be created.
.. versionadded:: 0.13.0
block : bool
Whether to halt program execution until the figure is closed. Defaults
to False.
.. versionadded:: 0.13.0
show : bool
Show figure if True. Defaults to True.
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
fig : instance of Figure
Figure containing the sensor topography.
selection : list
A list of selected channels. Only returned if ``kind=='select'``.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_alignment`.
.. versionadded:: 0.12.0
"""
from .evoked import _rgb
_check_option('kind', kind, ['topomap', '3d', 'select'])
if not isinstance(info, Info):
raise TypeError('info must be an instance of Info not %s' % type(info))
ch_indices = channel_indices_by_type(info)
allowed_types = _DATA_CH_TYPES_SPLIT
if ch_type is None:
for this_type in allowed_types:
if _contains_ch_type(info, this_type):
ch_type = this_type
break
picks = ch_indices[ch_type]
elif ch_type == 'all':
picks = list()
for this_type in allowed_types:
picks += ch_indices[this_type]
elif ch_type in allowed_types:
picks = ch_indices[ch_type]
else:
raise ValueError("ch_type must be one of %s not %s!" % (allowed_types,
ch_type))
if len(picks) == 0:
raise ValueError('Could not find any channels of type %s.' % ch_type)
chs = [info['chs'][pick] for pick in picks]
if not _check_ch_locs(chs):
raise RuntimeError('No valid channel positions found')
dev_head_t = info['dev_head_t']
pos = np.empty((len(chs), 3))
for ci, ch in enumerate(chs):
pos[ci] = ch['loc'][:3]
if ch['coord_frame'] == FIFF.FIFFV_COORD_DEVICE:
if dev_head_t is None:
warn('dev_head_t is None, transforming MEG sensors to head '
'coordinate frame using identity transform')
dev_head_t = np.eye(4)
pos[ci] = apply_trans(dev_head_t, pos[ci])
del dev_head_t
ch_names = np.array([ch['ch_name'] for ch in chs])
bads = [idx for idx, name in enumerate(ch_names) if name in info['bads']]
if ch_groups is None:
def_colors = _handle_default('color')
colors = ['red' if i in bads else def_colors[channel_type(info, pick)]
for i, pick in enumerate(picks)]
else:
if ch_groups in ['position', 'selection']:
# Avoid circular import
from ..channels import (read_vectorview_selection, _SELECTIONS,
_EEG_SELECTIONS, _divide_to_regions)
if ch_groups == 'position':
ch_groups = _divide_to_regions(info, add_stim=False)
ch_groups = list(ch_groups.values())
else:
ch_groups, color_vals = list(), list()
for selection in _SELECTIONS + _EEG_SELECTIONS:
channels = pick_channels(
info['ch_names'],
read_vectorview_selection(selection, info=info))
ch_groups.append(channels)
color_vals = np.ones((len(ch_groups), 4))
for idx, ch_group in enumerate(ch_groups):
color_picks = [np.where(picks == ch)[0][0] for ch in ch_group
if ch in picks]
if len(color_picks) == 0:
continue
x, y, z = pos[color_picks].T
color = np.mean(_rgb(x, y, z), axis=0)
color_vals[idx, :3] = color # mean of spatial color
else:
import matplotlib.pyplot as plt
colors = np.linspace(0, 1, len(ch_groups))
color_vals = [plt.cm.jet(colors[i]) for i in range(len(ch_groups))]
if not isinstance(ch_groups, (np.ndarray, list)):
raise ValueError("ch_groups must be None, 'position', "
"'selection', or an array. Got %s." % ch_groups)
colors = np.zeros((len(picks), 4))
for pick_idx, pick in enumerate(picks):
for ind, value in enumerate(ch_groups):
if pick in value:
colors[pick_idx] = color_vals[ind]
break
title = 'Sensor positions (%s)' % ch_type if title is None else title
fig = _plot_sensors(pos, info, picks, colors, bads, ch_names, title,
show_names, axes, show, kind, block,
to_sphere, sphere)
if kind == 'select':
return fig, fig.lasso.selection
return fig
def _onpick_sensor(event, fig, ax, pos, ch_names, show_names):
"""Pick a channel in plot_sensors."""
if event.mouseevent.inaxes != ax:
return
if event.mouseevent.key == 'control' and fig.lasso is not None:
for ind in event.ind:
fig.lasso.select_one(ind)
return
if show_names:
return # channel names already visible
ind = event.ind[0] # Just take the first sensor.
ch_name = ch_names[ind]
this_pos = pos[ind]
# XXX: Bug in matplotlib won't allow setting the position of existing
# text item, so we create a new one.
ax.texts.pop(0)
if len(this_pos) == 3:
ax.text(this_pos[0], this_pos[1], this_pos[2], ch_name)
else:
ax.text(this_pos[0], this_pos[1], ch_name)
fig.canvas.draw()
def _close_event(event, fig):
"""Listen for sensor plotter close event."""
if getattr(fig, 'lasso', None) is not None:
fig.lasso.disconnect()
def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names,
ax, show, kind, block, to_sphere, sphere):
"""Plot sensors."""
from matplotlib import rcParams
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 analysis:ignore
from .topomap import _get_pos_outlines, _draw_outlines
sphere = _check_sphere(sphere, info)
edgecolors = np.repeat(rcParams['axes.edgecolor'], len(colors))
edgecolors[bads] = 'red'
axes_was_none = ax is None
if axes_was_none:
subplot_kw = dict()
if kind == '3d':
subplot_kw.update(projection='3d')
fig, ax = plt.subplots(
1, figsize=(max(rcParams['figure.figsize']),) * 2,
subplot_kw=subplot_kw)
else:
fig = ax.get_figure()
if kind == '3d':
ax.text(0, 0, 0, '', zorder=1)
ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2], picker=True, c=colors,
s=75, edgecolor=edgecolors, linewidth=2)
ax.azim = 90
ax.elev = 0
ax.xaxis.set_label_text('x (m)')
ax.yaxis.set_label_text('y (m)')
ax.zaxis.set_label_text('z (m)')
else: # kind in 'select', 'topomap'
ax.text(0, 0, '', zorder=1)
pos, outlines = _get_pos_outlines(info, picks, sphere,
to_sphere=to_sphere)
_draw_outlines(ax, outlines)
pts = ax.scatter(pos[:, 0], pos[:, 1], picker=True, clip_on=False,
c=colors, edgecolors=edgecolors, s=25, lw=2)
if kind == 'select':
fig.lasso = SelectFromCollection(ax, pts, ch_names)
else:
fig.lasso = None
# Equal aspect for 3D looks bad, so only use for 2D
ax.set(aspect='equal')
if axes_was_none: # we'll show the plot title as the window title
fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
ax.axis("off") # remove border around figure
del sphere
connect_picker = True
if show_names:
if isinstance(show_names, (list, np.ndarray)): # only given channels
indices = [list(ch_names).index(name) for name in show_names]
else: # all channels
indices = range(len(pos))
for idx in indices:
this_pos = pos[idx]
if kind == '3d':
ax.text(this_pos[0], this_pos[1], this_pos[2], ch_names[idx])
else:
ax.text(this_pos[0] + 0.0025, this_pos[1], ch_names[idx],
ha='left', va='center')
connect_picker = (kind == 'select')
if connect_picker:
picker = partial(_onpick_sensor, fig=fig, ax=ax, pos=pos,
ch_names=ch_names, show_names=show_names)
fig.canvas.mpl_connect('pick_event', picker)
if axes_was_none:
_set_window_title(fig, title)
closed = partial(_close_event, fig=fig)
fig.canvas.mpl_connect('close_event', closed)
plt_show(show, block=block)
return fig
def _compute_scalings(scalings, inst, remove_dc=False, duration=10):
"""Compute scalings for each channel type automatically.
Parameters
----------
scalings : dict
The scalings for each channel type. If any values are
'auto', this will automatically compute a reasonable
scaling for that channel type. Any values that aren't
'auto' will not be changed.
inst : instance of Raw or Epochs
The data for which you want to compute scalings. If data
is not preloaded, this will read a subset of times / epochs
up to 100mb in size in order to compute scalings.
remove_dc : bool
Whether to remove the mean (DC) before calculating the scalings. If
True, the mean will be computed and subtracted for short epochs in
order to compensate not only for global mean offset, but also for slow
drifts in the signals.
duration : float
If remove_dc is True, the mean will be computed and subtracted on
segments of length ``duration`` seconds.
Returns
-------
scalings : dict
A scalings dictionary with updated values
"""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
scalings = _handle_default('scalings_plot_raw', scalings)
if not isinstance(inst, (BaseRaw, BaseEpochs)):
raise ValueError('Must supply either Raw or Epochs')
ch_types = channel_indices_by_type(inst.info)
ch_types = {i_type: i_ixs
for i_type, i_ixs in ch_types.items() if len(i_ixs) != 0}
scalings = deepcopy(scalings)
if inst.preload is False:
if isinstance(inst, BaseRaw):
# Load a window of data from the center up to 100mb in size
n_times = 1e8 // (len(inst.ch_names) * 8)
n_times = np.clip(n_times, 1, inst.n_times)
n_secs = n_times / float(inst.info['sfreq'])
time_middle = np.mean(inst.times)
tmin = np.clip(time_middle - n_secs / 2., inst.times.min(), None)
tmax = np.clip(time_middle + n_secs / 2., None, inst.times.max())
smin, smax = [
int(round(x * inst.info['sfreq'])) for x in (tmin, tmax)]
data = inst._read_segment(smin, smax)
elif isinstance(inst, BaseEpochs):
# Load a random subset of epochs up to 100mb in size
n_epochs = 1e8 // (len(inst.ch_names) * len(inst.times) * 8)
n_epochs = int(np.clip(n_epochs, 1, len(inst)))
ixs_epochs = np.random.choice(range(len(inst)), n_epochs, False)
inst = inst.copy()[ixs_epochs].load_data()
else:
data = inst._data
if isinstance(inst, BaseEpochs):
data = inst._data.swapaxes(0, 1).reshape([len(inst.ch_names), -1])
# Iterate through ch types and update scaling if ' auto'
for key, value in scalings.items():
if key not in ch_types:
continue
if not (isinstance(value, str) and value == 'auto'):
try:
scalings[key] = float(value)
except Exception:
raise ValueError(
f'scalings must be "auto" or float, got scalings[{key!r}]='
f'{value!r} which could not be converted to float')
continue
this_data = data[ch_types[key]]
if remove_dc and (this_data.shape[1] / inst.info["sfreq"] >= duration):
length = int(duration * inst.info["sfreq"]) # segment length
# truncate data so that we can divide into segments of equal length
this_data = this_data[:, :this_data.shape[1] // length * length]
shape = this_data.shape # original shape
this_data = this_data.T.reshape(-1, length, shape[0]) # segment
this_data -= np.nanmean(this_data, 0) # subtract segment means
this_data = this_data.T.reshape(shape) # reshape into original
this_data = this_data.ravel()
this_data = this_data[np.isfinite(this_data)]
if this_data.size:
iqr = np.diff(np.percentile(this_data, [25, 75]))[0]
else:
iqr = 1.
scalings[key] = iqr
return scalings
def _setup_cmap(cmap, n_axes=1, norm=False):
"""Set color map interactivity."""
if cmap == 'interactive':
cmap = ('Reds' if norm else 'RdBu_r', True)
elif not isinstance(cmap, tuple):
if cmap is None:
cmap = 'Reds' if norm else 'RdBu_r'
cmap = (cmap, False if n_axes > 2 else True)
return cmap
def _prepare_joint_axes(n_maps, figsize=None):
"""Prepare axes for topomaps and colorbar in joint plot figure.
Parameters
----------
n_maps: int
Number of topomaps to include in the figure
figsize: tuple
Figure size, see plt.figsize
Returns
-------
fig : matplotlib.figure.Figure
Figure with initialized axes
main_ax: matplotlib.axes._subplots.AxesSubplot
Axes in which to put the main plot
map_ax: list
List of axes for each topomap
cbar_ax: matplotlib.axes._subplots.AxesSubplot
Axes for colorbar next to topomaps
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
main_ax = fig.add_subplot(212)
ts = n_maps + 2
map_ax = [plt.subplot(4, ts, x + 2 + ts) for x in range(n_maps)]
# Position topomap subplots on the second row, starting on the
# second column
cbar_ax = plt.subplot(4, 5 * (ts + 1), 10 * (ts + 1))
# Position colorbar at the very end of a more finely divided
# second row of subplots
return fig, main_ax, map_ax, cbar_ax
class DraggableColorbar(object):
"""Enable interactive colorbar.
See http://www.ster.kuleuven.be/~pieterd/python/html/plotting/interactive_colorbar.html
""" # noqa: E501
def __init__(self, cbar, mappable):
import matplotlib.pyplot as plt
self.cbar = cbar
self.mappable = mappable
self.press = None
self.cycle = sorted([i for i in dir(plt.cm) if
hasattr(getattr(plt.cm, i), 'N')])
self.cycle += [mappable.get_cmap().name]
self.index = self.cycle.index(mappable.get_cmap().name)
self.lims = (self.cbar.norm.vmin, self.cbar.norm.vmax)
self.connect()
def connect(self):
"""Connect to all the events we need."""
self.cidpress = self.cbar.patch.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.cbar.patch.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.cbar.patch.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
self.keypress = self.cbar.patch.figure.canvas.mpl_connect(
'key_press_event', self.key_press)
self.scroll = self.cbar.patch.figure.canvas.mpl_connect(
'scroll_event', self.on_scroll)
def on_press(self, event):
"""Handle button press."""
if event.inaxes != self.cbar.ax:
return
self.press = event.y
def key_press(self, event):
"""Handle key press."""
# print(event.key)
scale = self.cbar.norm.vmax - self.cbar.norm.vmin
perc = 0.03
if event.key == 'down':
self.index += 1
elif event.key == 'up':
self.index -= 1
elif event.key == ' ': # space key resets scale
self.cbar.norm.vmin = self.lims[0]
self.cbar.norm.vmax = self.lims[1]
elif event.key == '+':
self.cbar.norm.vmin -= (perc * scale) * -1
self.cbar.norm.vmax += (perc * scale) * -1
elif event.key == '-':
self.cbar.norm.vmin -= (perc * scale) * 1
self.cbar.norm.vmax += (perc * scale) * 1
elif event.key == 'pageup':
self.cbar.norm.vmin -= (perc * scale) * 1
self.cbar.norm.vmax -= (perc * scale) * 1
elif event.key == 'pagedown':
self.cbar.norm.vmin -= (perc * scale) * -1
self.cbar.norm.vmax -= (perc * scale) * -1
else:
return
if self.index < 0:
self.index = len(self.cycle) - 1
elif self.index >= len(self.cycle):
self.index = 0
cmap = self.cycle[self.index]
self.cbar.mappable.set_cmap(cmap)
self.cbar.draw_all()
self.mappable.set_cmap(cmap)
self._update()
def on_motion(self, event):
"""Handle mouse movements."""
if self.press is None:
return
if event.inaxes != self.cbar.ax:
return
yprev = self.press
dy = event.y - yprev
self.press = event.y
scale = self.cbar.norm.vmax - self.cbar.norm.vmin
perc = 0.03
if event.button == 1:
self.cbar.norm.vmin -= (perc * scale) * np.sign(dy)
self.cbar.norm.vmax -= (perc * scale) * np.sign(dy)
elif event.button == 3:
self.cbar.norm.vmin -= (perc * scale) * np.sign(dy)
self.cbar.norm.vmax += (perc * scale) * np.sign(dy)
self._update()
def on_release(self, event):
"""Handle release."""
self.press = None
self._update()
def on_scroll(self, event):
"""Handle scroll."""
scale = 1.1 if event.step < 0 else 1. / 1.1
self.cbar.norm.vmin *= scale
self.cbar.norm.vmax *= scale
self._update()
def _update(self):
self.cbar.set_ticks(None, update_ticks=True) # use default
self.cbar.draw_all()
self.mappable.set_norm(self.cbar.norm)
self.cbar.patch.figure.canvas.draw()
class SelectFromCollection(object):
"""Select channels from a matplotlib collection using ``LassoSelector``.
Selected channels are saved in the ``selection`` attribute. This tool
highlights selected points by fading other points out (i.e., reducing their
alpha values).
Parameters
----------
ax : instance of Axes
Axes to interact with.
collection : instance of matplotlib collection
Collection you want to select from.
alpha_other : 0 <= float <= 1
To highlight a selection, this tool sets all selected points to an
alpha value of 1 and non-selected points to ``alpha_other``.
Defaults to 0.3.
linewidth_other : float
Linewidth to use for non-selected sensors. Default is 1.
Notes
-----
This tool selects collection objects based on their *origins*
(i.e., ``offsets``). Emits mpl event 'lasso_event' when selection is ready.
"""
def __init__(self, ax, collection, ch_names, alpha_other=0.5,
linewidth_other=0.5, alpha_selected=1, linewidth_selected=1):
from matplotlib import __version__
if LooseVersion(__version__) < LooseVersion('1.2.1'):
raise ImportError('Interactive selection not possible for '
'matplotlib versions < 1.2.1. Upgrade '
'matplotlib.')
from matplotlib.widgets import LassoSelector
self.canvas = ax.figure.canvas
self.collection = collection
self.ch_names = ch_names
self.alpha_other = alpha_other
self.linewidth_other = linewidth_other
self.alpha_selected = alpha_selected
self.linewidth_selected = linewidth_selected
self.xys = collection.get_offsets()
self.Npts = len(self.xys)
# Ensure that we have separate colors for each object
self.fc = collection.get_facecolors()
self.ec = collection.get_edgecolors()
self.lw = collection.get_linewidths()
if len(self.fc) == 0:
raise ValueError('Collection must have a facecolor')
elif len(self.fc) == 1:
self.fc = np.tile(self.fc, self.Npts).reshape(self.Npts, -1)
self.ec = np.tile(self.ec, self.Npts).reshape(self.Npts, -1)
self.fc[:, -1] = self.alpha_other # deselect in the beginning
self.ec[:, -1] = self.alpha_other
self.lw = np.full(self.Npts, self.linewidth_other)
self.lasso = LassoSelector(ax, onselect=self.on_select,
lineprops=dict(color='red', linewidth=0.5))
self.selection = list()
def on_select(self, verts):
"""Select a subset from the collection."""
from matplotlib.path import Path
if len(verts) <= 3: # Seems to be a good way to exclude single clicks.
return
path = Path(verts)
inds = np.nonzero([path.contains_point(xy) for xy in self.xys])[0]
if self.canvas._key == 'control': # Appending selection.
sels = [np.where(self.ch_names == c)[0][0] for c in self.selection]
inters = set(inds) - set(sels)
inds = list(inters.union(set(sels) - set(inds)))
self.selection[:] = np.array(self.ch_names)[inds].tolist()
self.style_sensors(inds)
self.canvas.callbacks.process('lasso_event')
def select_one(self, ind):
"""Select or deselect one sensor."""
ch_name = self.ch_names[ind]
if ch_name in self.selection:
sel_ind = self.selection.index(ch_name)
self.selection.pop(sel_ind)
else:
self.selection.append(ch_name)
inds = np.in1d(self.ch_names, self.selection).nonzero()[0]
self.style_sensors(inds)
self.canvas.callbacks.process('lasso_event')
def select_many(self, inds):
"""Select many sensors using indices (for predefined selections)."""
self.selection[:] = np.array(self.ch_names)[inds].tolist()
self.style_sensors(inds)
def style_sensors(self, inds):
"""Style selected sensors as "active"."""
# reset
self.fc[:, -1] = self.alpha_other
self.ec[:, -1] = self.alpha_other / 2
self.lw[:] = self.linewidth_other
# style sensors at `inds`
self.fc[inds, -1] = self.alpha_selected
self.ec[inds, -1] = self.alpha_selected
self.lw[inds] = self.linewidth_selected
self.collection.set_facecolors(self.fc)
self.collection.set_edgecolors(self.ec)
self.collection.set_linewidths(self.lw)
self.canvas.draw_idle()
def disconnect(self):
"""Disconnect the lasso selector."""
self.lasso.disconnect_events()
self.fc[:, -1] = self.alpha_selected
self.ec[:, -1] = self.alpha_selected
self.collection.set_facecolors(self.fc)
self.collection.set_edgecolors(self.ec)
self.canvas.draw_idle()
def _get_color_list(annotations=False):
"""Get the current color list from matplotlib rcParams.
Parameters
----------
annotations : boolean
Has no influence on the function if false. If true, check if color
"red" (#ff0000) is in the cycle and remove it.
Returns
-------
colors : list
"""
from matplotlib import rcParams
color_cycle = rcParams.get('axes.prop_cycle')
if not color_cycle:
# Use deprecated color_cycle to avoid KeyErrors in environments
# with Python 2.7 and Matplotlib < 1.5
# this will already be a list
colors = rcParams.get('axes.color_cycle')
else:
# we were able to use the prop_cycle. Now just convert to list
colors = color_cycle.by_key()['color']
# If we want annotations, red is reserved ... remove if present. This
# checks for the reddish color in MPL dark background style, normal style,
# and MPL "red", and defaults to the last of those if none are present
for red in ('#fa8174', '#d62728', '#ff0000'):
if annotations and red in colors:
colors.remove(red)
break
return (colors, red) if annotations else colors
def _merge_annotations(start, stop, description, annotations, current=()):
"""Handle drawn annotations."""
ends = annotations.onset + annotations.duration
idx = np.intersect1d(np.where(ends >= start)[0],
np.where(annotations.onset <= stop)[0])
idx = np.intersect1d(idx,
np.where(annotations.description == description)[0])
new_idx = np.setdiff1d(idx, current) # don't include modified annotation
end = max(np.append((annotations.onset[new_idx] +
annotations.duration[new_idx]), stop))
onset = min(np.append(annotations.onset[new_idx], start))
duration = end - onset
annotations.delete(idx)
annotations.append(onset, duration, description)
def _connection_line(x, fig, sourceax, targetax, y=1.,
y_source_transform="transAxes"):
"""Connect source and target plots with a line.
Connect source and target plots with a line, such as time series
(source) and topolots (target). Primarily used for plot_joint
functions.
"""
from matplotlib.lines import Line2D
trans_fig = fig.transFigure
trans_fig_inv = fig.transFigure.inverted()
xt, yt = trans_fig_inv.transform(targetax.transAxes.transform([.5, 0.]))
xs, _ = trans_fig_inv.transform(sourceax.transData.transform([x, 0.]))
_, ys = trans_fig_inv.transform(getattr(sourceax, y_source_transform
).transform([0., y]))
return Line2D((xt, xs), (yt, ys), transform=trans_fig, color='grey',
linestyle='-', linewidth=1.5, alpha=.66, zorder=1,
clip_on=False)
class DraggableLine(object):
"""Custom matplotlib line for moving around by drag and drop.
Parameters
----------
line : instance of matplotlib Line2D
Line to add interactivity to.
callback : function
Callback to call when line is released.
"""
def __init__(self, line, modify_callback, drag_callback):
self.line = line
self.press = None
self.x0 = line.get_xdata()[0]
self.modify_callback = modify_callback
self.drag_callback = drag_callback
self.cidpress = self.line.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.line.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.line.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
def set_x(self, x):
"""Repoisition the line."""
self.line.set_xdata([x, x])
self.x0 = x
def on_press(self, event):
"""Store button press if on top of the line."""
if event.inaxes != self.line.axes or not self.line.contains(event)[0]:
return
x0 = self.line.get_xdata()
y0 = self.line.get_ydata()
self.press = x0, y0, event.xdata, event.ydata
def on_motion(self, event):
"""Move the line on drag."""
if self.press is None:
return
if event.inaxes != self.line.axes:
return
x0, y0, xpress, ypress = self.press
dx = event.xdata - xpress
self.line.set_xdata(x0 + dx)
self.drag_callback((x0 + dx)[0])
self.line.figure.canvas.draw()
def on_release(self, event):
"""Handle release."""
if event.inaxes != self.line.axes or self.press is None:
return
self.press = None
self.line.figure.canvas.draw()
self.modify_callback(self.x0, event.xdata)
self.x0 = event.xdata
def remove(self):
"""Remove the line."""
self.line.figure.canvas.mpl_disconnect(self.cidpress)
self.line.figure.canvas.mpl_disconnect(self.cidrelease)
self.line.figure.canvas.mpl_disconnect(self.cidmotion)
self.line.remove()
def _setup_ax_spines(axes, vlines, xmin, xmax, ymin, ymax, invert_y=False,
unit=None, truncate_xaxis=True, truncate_yaxis=True,
skip_axlabel=False, hline=True):
# don't show zero line if it coincides with x-axis (even if hline=True)
if hline and ymin != 0.:
axes.spines['top'].set_position('zero')
else:
axes.spines['top'].set_visible(False)
# the axes can become very small with topo plotting. This prevents the
# x-axis from shrinking to length zero if truncate_xaxis=True, by adding
# new ticks that are nice round numbers close to (but less extreme than)
# xmin and xmax
vlines = [] if vlines is None else vlines
xticks = _trim_ticks(axes.get_xticks(), xmin, xmax)
xticks = np.array(sorted(set([x for x in xticks] + vlines)))
if len(xticks) < 2:
def log_fix(tval):
exp = np.log10(np.abs(tval))
return np.sign(tval) * 10 ** (np.fix(exp) - (exp < 0))
xlims = np.array([xmin, xmax])
temp_ticks = log_fix(xlims)
closer_idx = np.argmin(np.abs(xlims - temp_ticks))
further_idx = np.argmax(np.abs(xlims - temp_ticks))
start_stop = [temp_ticks[closer_idx], xlims[further_idx]]
step = np.sign(np.diff(start_stop)) * np.max(np.abs(temp_ticks))
tts = np.arange(*start_stop, step)
xticks = np.array(sorted(xticks + [tts[0], tts[-1]]))
axes.set_xticks(xticks)
# y-axis is simpler
yticks = _trim_ticks(axes.get_yticks(), ymin, ymax)
axes.set_yticks(yticks)
# truncation case 1: truncate both
if truncate_xaxis and truncate_yaxis:
axes.spines['bottom'].set_bounds(*xticks[[0, -1]])
axes.spines['left'].set_bounds(*yticks[[0, -1]])
# case 2: truncate only x (only right side; connect to y at left)
elif truncate_xaxis:
xbounds = np.array(axes.get_xlim())
xbounds[1] = axes.get_xticks()[-1]
axes.spines['bottom'].set_bounds(*xbounds)
# case 3: truncate only y (only top; connect to x at bottom)
elif truncate_yaxis:
ybounds = np.array(axes.get_ylim())
if invert_y:
ybounds[0] = axes.get_yticks()[0]
else:
ybounds[1] = axes.get_yticks()[-1]
axes.spines['left'].set_bounds(*ybounds)
# handle axis labels
if skip_axlabel:
axes.set_yticklabels([''] * len(yticks))
axes.set_xticklabels([''] * len(xticks))
else:
if unit is not None:
axes.set_ylabel(unit, rotation=90)
axes.set_xlabel('Time (s)')
# plot vertical lines
if vlines:
_ymin, _ymax = axes.get_ylim()
axes.vlines(vlines, _ymax, _ymin, linestyles='--', colors='k',
linewidth=1., zorder=1)
# invert?
if invert_y:
axes.invert_yaxis()
# changes we always make:
axes.tick_params(direction='out')
axes.tick_params(right=False)
axes.spines['right'].set_visible(False)
axes.spines['left'].set_zorder(0)
def _handle_decim(info, decim, lowpass):
"""Handle decim parameter for plotters."""
from ..evoked import _check_decim
from ..utils import _ensure_int
if isinstance(decim, str) and decim == 'auto':
lp = info['sfreq'] if info['lowpass'] is None else info['lowpass']
lp = min(lp, info['sfreq'] if lowpass is None else lowpass)
info['lowpass'] = lp
decim = max(int(info['sfreq'] / (lp * 3) + 1e-6), 1)
decim = _ensure_int(decim, 'decim', must_be='an int or "auto"')
if decim <= 0:
raise ValueError('decim must be "auto" or a positive integer, got %s'
% (decim,))
decim = _check_decim(info, decim, 0)[0]
data_picks = _pick_data_channels(info, exclude=())
return decim, data_picks
def _setup_plot_projector(info, noise_cov, proj=True, use_noise_cov=True,
nave=1):
from ..cov import compute_whitener
projector = np.eye(len(info['ch_names']))
whitened_ch_names = []
if noise_cov is not None and use_noise_cov:
# any channels in noise_cov['bads'] but not in info['bads'] get
# set to nan, which means that they are not plotted.
data_picks = _pick_data_channels(info, with_ref_meg=False, exclude=())
data_names = {info['ch_names'][pick] for pick in data_picks}
# these can be toggled by the user
bad_names = set(info['bads'])
# these can't in standard pipelines be enabled (we always take the
# union), so pretend they're not in cov at all
cov_names = ((set(noise_cov['names']) & set(info['ch_names'])) -
set(noise_cov['bads']))
# Actually compute the whitener only using the difference
whiten_names = cov_names - bad_names
whiten_picks = pick_channels(info['ch_names'], whiten_names)
whiten_info = pick_info(info, whiten_picks)
rank = _triage_rank_sss(whiten_info, [noise_cov])[1][0]
whitener, whitened_ch_names = compute_whitener(
noise_cov, whiten_info, rank=rank, verbose=False)
whitener *= np.sqrt(nave) # proper scaling for Evoked data
assert set(whitened_ch_names) == whiten_names
projector[whiten_picks, whiten_picks[:, np.newaxis]] = whitener
# Now we need to change the set of "whitened" channels to include
# all data channel names so that they are properly italicized.
whitened_ch_names = data_names
# We would need to set "bad_picks" to identity to show the traces
# (but in gray), but here we don't need to because "projector"
# starts out as identity. So all that is left to do is take any
# *good* data channels that are not in the noise cov to be NaN
nan_names = data_names - (bad_names | cov_names)
# XXX conditional necessary because of annoying behavior of
# pick_channels where an empty list means "all"!
if len(nan_names) > 0:
nan_picks = pick_channels(info['ch_names'], nan_names)
projector[nan_picks] = np.nan
elif proj:
projector, _ = setup_proj(info, add_eeg_ref=False, verbose=False)
return projector, whitened_ch_names
def _check_sss(info):
"""Check SSS history in info."""
ch_used = [ch for ch in _DATA_CH_TYPES_SPLIT
if _contains_ch_type(info, ch)]
has_meg = 'mag' in ch_used and 'grad' in ch_used
has_sss = (has_meg and len(info['proc_history']) > 0 and
info['proc_history'][0].get('max_info') is not None)
return ch_used, has_meg, has_sss
def _triage_rank_sss(info, covs, rank=None, scalings=None):
rank = dict() if rank is None else rank
scalings = _handle_default('scalings_cov_rank', scalings)
# Only look at good channels
picks = _pick_data_channels(info, with_ref_meg=False, exclude='bads')
info = pick_info(info, picks)
ch_used, has_meg, has_sss = _check_sss(info)
if has_sss:
if 'mag' in rank or 'grad' in rank:
raise ValueError('When using SSS, pass "meg" to set the rank '
'(separate rank values for "mag" or "grad" are '
'meaningless).')
elif 'meg' in rank:
raise ValueError('When not using SSS, pass separate rank values '
'for "mag" and "grad" (do not use "meg").')
picks_list = _picks_by_type(info, meg_combined=has_sss)
if has_sss:
# reduce ch_used to combined mag grad
ch_used = list(zip(*picks_list))[0]
# order pick list by ch_used (required for compat with plot_evoked)
picks_list = [x for x, y in sorted(zip(picks_list, ch_used))]
n_ch_used = len(ch_used)
# make sure we use the same rank estimates for GFP and whitening
picks_list2 = [k for k in picks_list]
# add meg picks if needed.
if has_meg:
# append ("meg", picks_meg)
picks_list2 += _picks_by_type(info, meg_combined=True)
rank_list = [] # rank dict for each cov
for cov in covs:
# We need to add the covariance projectors, compute the projector,
# and apply it, just like we will do in prepare_noise_cov, otherwise
# we risk the rank estimates being incorrect (i.e., if the projectors
# do not match).
info_proj = info.copy()
info_proj['projs'] += cov['projs']
this_rank = {}
# assemble rank dict for this cov, such that we have meg
for ch_type, this_picks in picks_list2:
# if we have already estimates / values for mag/grad but not
# a value for meg, combine grad and mag.
if ('mag' in this_rank and 'grad' in this_rank and
'meg' not in rank):
this_rank['meg'] = this_rank['mag'] + this_rank['grad']
# and we're done here
break
if rank.get(ch_type) is None:
ch_names = [info['ch_names'][pick] for pick in this_picks]
this_C = pick_channels_cov(cov, ch_names)
this_estimated_rank = compute_rank(
this_C, scalings=scalings, info=info_proj)[ch_type]
this_rank[ch_type] = this_estimated_rank
elif rank.get(ch_type) is not None:
this_rank[ch_type] = rank[ch_type]
rank_list.append(this_rank)
return n_ch_used, rank_list, picks_list, has_sss
def _check_cov(noise_cov, info):
"""Check the noise_cov for whitening and issue an SSS warning."""
from ..cov import read_cov, Covariance
if noise_cov is None:
return None
if isinstance(noise_cov, str):
noise_cov = read_cov(noise_cov)
if not isinstance(noise_cov, Covariance):
raise TypeError('noise_cov must be a str or Covariance, got %s'
% (type(noise_cov),))
if _check_sss(info)[2]: # has_sss
warn('Data have been processed with SSS, which changes the relative '
'scaling of magnetometers and gradiometers when viewing data '
'whitened by a noise covariance')
return noise_cov
def _set_title_multiple_electrodes(title, combine, ch_names, max_chans=6,
all=False, ch_type=None):
"""Prepare a title string for multiple electrodes."""
if title is None:
title = ", ".join(ch_names[:max_chans])
ch_type = _channel_type_prettyprint.get(ch_type, ch_type)
if ch_type is None:
ch_type = "sensor"
if len(ch_names) > 1:
ch_type += "s"
if all is True and isinstance(combine, str):
combine = combine.capitalize()
title = "{} of {} {}".format(
combine, len(ch_names), ch_type)
elif len(ch_names) > max_chans and combine != "gfp":
logger.info("More than {} channels, truncating title ...".format(
max_chans))
title += ", ...\n({} of {} {})".format(
combine, len(ch_names), ch_type,)
return title
def _check_time_unit(time_unit, times):
if not isinstance(time_unit, str):
raise TypeError('time_unit must be str, got %s' % (type(time_unit),))
if time_unit == 's':
pass
elif time_unit == 'ms':
times = 1e3 * times
else:
raise ValueError("time_unit must be 's' or 'ms', got %r" % time_unit)
return time_unit, times
def _plot_masked_image(ax, data, times, mask=None, yvals=None,
cmap="RdBu_r", vmin=None, vmax=None, ylim=None,
mask_style="both", mask_alpha=.25, mask_cmap="Greys",
yscale="linear"):
"""Plot a potentially masked (evoked, TFR, ...) 2D image."""
from matplotlib import ticker, __version__ as mpl_version
if mask_style is None and mask is not None:
mask_style = "both" # default
draw_mask = mask_style in {"both", "mask"}
draw_contour = mask_style in {"both", "contour"}
if cmap is None:
mask_cmap = cmap
# mask param check and preparation
if draw_mask is None:
if mask is not None:
draw_mask = True
else:
draw_mask = False
if draw_contour is None:
if mask is not None:
draw_contour = True
else:
draw_contour = False
if mask is None:
if draw_mask:
warn("`mask` is None, not masking the plot ...")
draw_mask = False
if draw_contour:
warn("`mask` is None, not adding contour to the plot ...")
draw_contour = False
if draw_mask:
if mask.shape != data.shape:
raise ValueError(
"The mask must have the same shape as the data, "
"i.e., %s, not %s" % (data.shape, mask.shape))
if draw_contour and yscale == "log":
warn("Cannot draw contours with linear yscale yet ...")
if yvals is None: # for e.g. Evoked images
yvals = np.arange(data.shape[0])
# else, if TFR plot, yvals will be freqs
# test yscale
if yscale == 'log' and not yvals[0] > 0:
raise ValueError('Using log scale for frequency axis requires all your'
' frequencies to be positive (you cannot include'
' the DC component (0 Hz) in the TFR).')
if len(yvals) < 2 or yvals[0] == 0:
yscale = 'linear'
elif yscale != 'linear':
ratio = yvals[1:] / yvals[:-1]
if yscale == 'auto':
if yvals[0] > 0 and np.allclose(ratio, ratio[0]):
yscale = 'log'
else:
yscale = 'linear'
# https://github.com/matplotlib/matplotlib/pull/9477
if yscale == "log" and mpl_version == "2.1.0":
warn("With matplotlib version 2.1.0, lines may not show up in "
"`AverageTFR.plot_joint`. Upgrade to a more recent version.")
if yscale == "log": # pcolormesh for log scale
# compute bounds between time samples
time_lims, = centers_to_edges(times)
log_yvals = np.concatenate([[yvals[0] / ratio[0]], yvals,
[yvals[-1] * ratio[0]]])
yval_lims = np.sqrt(log_yvals[:-1] * log_yvals[1:])
# construct a time-yvaluency bounds grid
time_mesh, yval_mesh = np.meshgrid(time_lims, yval_lims)
if mask is not None:
ax.pcolormesh(time_mesh, yval_mesh, data, cmap=mask_cmap,
vmin=vmin, vmax=vmax, alpha=mask_alpha)
im = ax.pcolormesh(time_mesh, yval_mesh,
np.ma.masked_where(~mask, data), cmap=cmap,
vmin=vmin, vmax=vmax, alpha=1)
else:
im = ax.pcolormesh(time_mesh, yval_mesh, data, cmap=cmap,
vmin=vmin, vmax=vmax)
if ylim is None:
ylim = yval_lims[[0, -1]]
if yscale == 'log':
ax.set_yscale('log')
ax.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
# get rid of minor ticks
ax.yaxis.set_minor_locator(ticker.NullLocator())
tick_vals = yvals[np.unique(np.linspace(
0, len(yvals) - 1, 12).round().astype('int'))]
ax.set_yticks(tick_vals)
else:
# imshow for linear because the y ticks are nicer
# and the masked areas look better
dt = np.median(np.diff(times)) / 2. if len(times) > 1 else 0.1
dy = np.median(np.diff(yvals)) / 2. if len(yvals) > 1 else 0.5
extent = [times[0] - dt, times[-1] + dt,
yvals[0] - dy, yvals[-1] + dy]
im_args = dict(interpolation='nearest', origin='lower',
extent=extent, aspect='auto', vmin=vmin, vmax=vmax)
if draw_mask:
ax.imshow(data, alpha=mask_alpha, cmap=mask_cmap, **im_args)
im = ax.imshow(
np.ma.masked_where(~mask, data), cmap=cmap, **im_args)
else:
ax.imshow(data, cmap=cmap, **im_args) # see #6481
im = ax.imshow(data, cmap=cmap, **im_args)
if draw_contour and np.unique(mask).size == 2:
big_mask = np.kron(mask, np.ones((10, 10)))
ax.contour(big_mask, colors=["k"], extent=extent,
linewidths=[.75], corner_mask=False,
antialiased=False, levels=[.5])
time_lims = [extent[0], extent[1]]
if ylim is None:
ylim = [extent[2], extent[3]]
ax.set_xlim(time_lims[0], time_lims[-1])
ax.set_ylim(ylim)
if (draw_mask or draw_contour) and mask is not None:
if mask.all():
t_end = ", all points masked)"
else:
fraction = 1 - (np.float64(mask.sum()) / np.float64(mask.size))
t_end = ", %0.3g%% of points masked)" % (fraction * 100,)
else:
t_end = ")"
return im, t_end
@fill_doc
def _make_combine_callable(combine):
"""Convert None or string values of ``combine`` into callables.
Params
------
%(combine)s
If callable, the callable must accept one positional input (data of
shape ``(n_epochs, n_channels, n_times)`` or ``(n_evokeds, n_channels,
n_times)``) and return an :class:`array <numpy.ndarray>` of shape
``(n_epochs, n_times)`` or ``(n_evokeds, n_times)``.
"""
if combine is None:
combine = partial(np.squeeze, axis=1)
elif isinstance(combine, str):
combine_dict = {key: partial(getattr(np, key), axis=1)
for key in ('mean', 'median', 'std')}
combine_dict['gfp'] = lambda data: np.sqrt((data ** 2).mean(axis=1))
try:
combine = combine_dict[combine]
except KeyError:
raise ValueError('"combine" must be None, a callable, or one of '
'"mean", "median", "std", or "gfp"; got {}'
''.format(combine))
return combine
def center_cmap(cmap, vmin, vmax, name="cmap_centered"):
"""Center given colormap (ranging from vmin to vmax) at value 0.
Parameters
----------
cmap : matplotlib.colors.Colormap
The colormap to center around 0.
vmin : float
Minimum value in the data to map to the lower end of the colormap.
vmax : float
Maximum value in the data to map to the upper end of the colormap.
name : str
Name of the new colormap. Defaults to 'cmap_centered'.
Returns
-------
cmap_centered : matplotlib.colors.Colormap
The new colormap centered around 0.
Notes
-----
This function can be used in situations where vmin and vmax are not
symmetric around zero. Normally, this results in the value zero not being
mapped to white anymore in many colormaps. Using this function, the value
zero will be mapped to white even for asymmetric positive and negative
value ranges. Note that this could also be achieved by re-normalizing a
given colormap by subclassing matplotlib.colors.Normalize as described
here:
https://matplotlib.org/users/colormapnorms.html#custom-normalization-two-linear-ranges
""" # noqa: E501
from matplotlib.colors import LinearSegmentedColormap
vzero = abs(vmin) / float(vmax - vmin)
index_old = np.linspace(0, 1, cmap.N)
index_new = np.hstack([np.linspace(0, vzero, cmap.N // 2, endpoint=False),
np.linspace(vzero, 1, cmap.N // 2)])
colors = "red", "green", "blue", "alpha"
cdict = {name: [] for name in colors}
for old, new in zip(index_old, index_new):
for color, name in zip(cmap(old), colors):
cdict[name].append((new, color, color))
return LinearSegmentedColormap(name, cdict)
def _convert_psds(psds, dB, estimate, scaling, unit, ch_names=None,
first_dim='channel'):
"""Convert PSDs to dB (if necessary) and appropriate units.
The following table summarizes the relationship between the value of
parameters ``dB`` and ``estimate``, and the type of plot and corresponding
units.
| dB | estimate | plot | units |
|-------+-------------+------+-------------------|
| True | 'power' | PSD | amp**2/Hz (dB) |
| True | 'amplitude' | ASD | amp/sqrt(Hz) (dB) |
| True | 'auto' | PSD | amp**2/Hz (dB) |
| False | 'power' | PSD | amp**2/Hz |
| False | 'amplitude' | ASD | amp/sqrt(Hz) |
| False | 'auto' | ASD | amp/sqrt(Hz) |
where amp are the units corresponding to the variable, as specified by
``unit``.
"""
_check_option('first_dim', first_dim, ['channel', 'epoch'])
where = np.where(psds.min(1) <= 0)[0]
if len(where) > 0:
# Construct a helpful error message, depending on whether the first
# dimension of `psds` are channels or epochs.
if dB:
bad_value = 'Infinite'
else:
bad_value = 'Zero'
if first_dim == 'channel':
bads = ', '.join(ch_names[ii] for ii in where)
else:
bads = ', '.join(str(ii) for ii in where)
msg = f'{bad_value} value in PSD for {first_dim}{_pl(where)} {bads}.'
if first_dim == 'channel':
msg += '\nThese channels might be dead.'
warn(msg, UserWarning)
if estimate == 'auto':
estimate = 'power' if dB else 'amplitude'
if estimate == 'amplitude':
np.sqrt(psds, out=psds)
psds *= scaling
ylabel = r'$\mathrm{%s/\sqrt{Hz}}$' % unit
else:
psds *= scaling * scaling
if '/' in unit:
unit = '(%s)' % unit
ylabel = r'$\mathrm{%s²/Hz}$' % unit
if dB:
np.log10(np.maximum(psds, np.finfo(float).tiny), out=psds)
psds *= 10
ylabel += r'$\ \mathrm{(dB)}$'
return ylabel
def _plot_psd(inst, fig, freqs, psd_list, picks_list, titles_list,
units_list, scalings_list, ax_list, make_label, color, area_mode,
area_alpha, dB, estimate, average, spatial_colors, xscale,
line_alpha, sphere, xlabels_list):
# helper function for plot_raw_psd and plot_epochs_psd
from matplotlib.ticker import ScalarFormatter
from .evoked import _plot_lines
for key, ls in zip(['lowpass', 'highpass', 'line_freq'],
['--', '--', '-.']):
if inst.info[key] is not None:
for ax in ax_list:
ax.axvline(inst.info[key], color='k', linestyle=ls,
alpha=0.25, linewidth=2, zorder=2)
if line_alpha is None:
line_alpha = 1.0 if average else 0.75
line_alpha = float(line_alpha)
ylabels = list()
for ii, (psd, picks, title, ax, scalings, units) in enumerate(zip(
psd_list, picks_list, titles_list, ax_list,
scalings_list, units_list)):
ylabel = _convert_psds(psd, dB, estimate, scalings, units,
[inst.ch_names[pi] for pi in picks])
ylabels.append(ylabel)
del ylabel
if average:
# mean across channels
psd_mean = np.mean(psd, axis=0)
if area_mode == 'std':
# std across channels
psd_std = np.std(psd, axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(psd, axis=0),
np.max(psd, axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color, alpha=line_alpha,
linewidth=0.5)
if hyp_limits is not None:
ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
facecolor=color, alpha=area_alpha)
if not average:
picks = np.concatenate(picks_list)
psd_list = np.concatenate(psd_list)
types = np.array(inst.get_channel_types(picks=picks))
# Needed because the data do not match the info anymore.
info = create_info([inst.ch_names[p] for p in picks],
inst.info['sfreq'], types)
info['chs'] = [inst.info['chs'][p] for p in picks]
info['dev_head_t'] = inst.info['dev_head_t']
ch_types_used = list()
for this_type in _VALID_CHANNEL_TYPES:
if this_type in types:
ch_types_used.append(this_type)
assert len(ch_types_used) == len(ax_list)
unit = ''
units = {t: yl for t, yl in zip(ch_types_used, ylabels)}
titles = {c: t for c, t in zip(ch_types_used, titles_list)}
picks = np.arange(len(psd_list))
if not spatial_colors:
spatial_colors = color
_plot_lines(psd_list, info, picks, fig, ax_list, spatial_colors,
unit, units=units, scalings=None, hline=None, gfp=False,
types=types, zorder='std', xlim=(freqs[0], freqs[-1]),
ylim=None, times=freqs, bad_ch_idx=[], titles=titles,
ch_types_used=ch_types_used, selectable=True, psd=True,
line_alpha=line_alpha, nave=None, time_unit='ms',
sphere=sphere)
for ii, (ax, xlabel) in enumerate(zip(ax_list, xlabels_list)):
ax.grid(True, linestyle=':')
if xscale == 'log':
ax.set(xscale='log')
ax.set(xlim=[freqs[1] if freqs[0] == 0 else freqs[0], freqs[-1]])
ax.get_xaxis().set_major_formatter(ScalarFormatter())
else: # xscale == 'linear'
ax.set(xlim=(freqs[0], freqs[-1]))
if make_label:
ax.set(ylabel=ylabels[ii], title=titles_list[ii])
if xlabel:
ax.set_xlabel('Frequency (Hz)')
if make_label:
fig.align_ylabels(axs=ax_list)
return fig
def _trim_ticks(ticks, _min, _max):
"""Remove ticks that are more extreme than the given limits."""
keep = np.where(np.logical_and(ticks >= _min, ticks <= _max))
return ticks[keep]
def _set_window_title(fig, title):
if fig.canvas.manager is not None:
fig.canvas.manager.set_window_title(title)
def _shorten_path_from_middle(fpath, max_len=60, replacement='...'):
"""Truncate a path from the middle by omitting complete path elements."""
from os.path import sep
if len(fpath) > max_len:
pathlist = fpath.split(sep)
# indices starting from middle, alternating sides, omitting final elem:
# range(8) → 3, 4, 2, 5, 1, 6; range(7) → 2, 3, 1, 4, 0, 5
ixs_to_trunc = list(zip(range(len(pathlist) // 2 - 1, -1, -1),
range(len(pathlist) // 2, len(pathlist) - 1)))
ixs_to_trunc = np.array(ixs_to_trunc).flatten()
for ix in ixs_to_trunc:
pathlist[ix] = replacement
truncs = (np.array(pathlist) == replacement).nonzero()[0]
newpath = sep.join(pathlist[:truncs[0]] + pathlist[truncs[-1]:])
if len(newpath) < max_len:
break
return newpath
return fpath
def centers_to_edges(*arrays):
"""Convert center points to edges.
Parameters
----------
*arrays : list of ndarray
Each input array should be 1D monotonically increasing,
and will be cast to float.
Returns
-------
arrays : list of ndarray
Given each input of shape (N,), the output will have shape (N+1,).
Examples
--------
>>> x = [0., 0.1, 0.2, 0.3]
>>> y = [20, 30, 40]
>>> centers_to_edges(x, y) # doctest: +SKIP
[array([-0.05, 0.05, 0.15, 0.25, 0.35]), array([15., 25., 35., 45.])]
"""
out = list()
for ai, arr in enumerate(arrays):
arr = np.asarray(arr, dtype=float)
_check_option(f'arrays[{ai}].ndim', arr.ndim, (1,))
if len(arr) > 1:
arr_diff = np.diff(arr) / 2.
else:
arr_diff = [abs(arr[0]) * 0.001] if arr[0] != 0 else [0.001]
out.append(np.concatenate([
[arr[0] - arr_diff[0]],
arr[:-1] + arr_diff,
[arr[-1] + arr_diff[-1]]]))
return out
def _figure_agg(**kwargs):
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
fig = Figure(**kwargs)
FigureCanvasAgg(fig)
return fig
def _ndarray_to_fig(img):
"""Convert to MPL figure, adapted from matplotlib.image.imsave."""
dpi = 100
figsize = np.array(img.shape[:2][::-1]) / dpi
fig = _figure_agg(dpi=dpi, figsize=figsize, frameon=False)
fig.figimage(img, resize=True)
return fig
def _save_ndarray_img(fname, img):
"""Save an image to disk."""
from PIL import Image
Image.fromarray(img).save(fname)
def concatenate_images(images, axis=0, bgcolor='black', centered=True):
"""Concatenate a list of images.
Parameters
----------
images : list of ndarray
The list of images to concatenate.
axis : 0 or 1
The images are concatenated horizontally if 0 and vertically otherwise.
The default orientation is horizontal.
bgcolor : str | list
The color of the background. The name of the color is accepted
(e.g 'red') or a list of RGB values between 0 and 1. Defaults to
'black'.
centered : bool
If True, the images are centered. Defaults to True.
Returns
-------
img : ndarray
The concatenated image.
"""
from matplotlib.colors import colorConverter
if isinstance(bgcolor, str):
bgcolor = colorConverter.to_rgb(bgcolor)
bgcolor = np.asarray(bgcolor) * 255
funcs = [np.sum, np.max]
ret_shape = np.asarray([
funcs[axis]([image.shape[0] for image in images]),
funcs[1 - axis]([image.shape[1] for image in images]),
])
ret = np.zeros((ret_shape[0], ret_shape[1], 3), dtype=np.uint8)
ret[:, :, :] = bgcolor
ptr = np.array([0, 0])
sec = np.array([0 == axis, 1 == axis]).astype(int)
for image in images:
shape = image.shape[:-1]
dec = ptr
dec += ((ret_shape - shape) // 2) * (1 - sec) if centered else 0
ret[dec[0]:dec[0] + shape[0], dec[1]:dec[1] + shape[1], :] = image
ptr += shape * sec
return ret
def _generate_default_filename(ext=".png"):
now = datetime.now()
dt_string = now.strftime("_%Y-%m-%d_%H-%M-%S")
return "MNE" + dt_string + ext
|
bsd-3-clause
|
ben-hopps/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/axes.py
|
69
|
259904
|
from __future__ import division, generators
import math, sys, warnings, datetime, new
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.mlab as mlab
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
iterable = cbook.iterable
is_string_like = cbook.is_string_like
def _process_plot_format(fmt):
"""
Process a matlab(TM) style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`:
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
return linestyle, marker, color # Yes.
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers
"""
_process_plot_var_args.defaultColors = clist[:]
rcParams['lines.color'] = clist[0]
class _process_plot_var_args:
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
defaultColors = ['b','g','r','c','m','y','k']
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self._clear_color_cycle()
def _clear_color_cycle(self):
self.colors = _process_plot_var_args.defaultColors[:]
# if the default line color is a color format string, move it up
# in the que
try: ind = self.colors.index(rcParams['lines.color'])
except ValueError:
self.firstColor = rcParams['lines.color']
else:
self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def set_color_cycle(self, clist):
self.colors = clist[:]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def _get_next_cycle_color(self):
if self.count==0:
color = self.firstColor
else:
color = self.colors[int(self.count % self.Ncolors)]
self.count += 1
return color
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError, 'There is no line property "%s"'%key
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError, 'There is no patch property "%s"'%key
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_y(self, y):
if self.axes.yaxis is not None:
b = self.axes.yaxis.update_units(y)
if b: return np.arange(len(y)), y, False
if not ma.isMaskedArray(y):
y = np.asarray(y)
if len(y.shape) == 1:
y = y[:,np.newaxis]
nr, nc = y.shape
x = np.arange(nr)
if len(x.shape) == 1:
x = x[:,np.newaxis]
return x,y, True
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
# right now multicol is not supported if either x or y are
# unit enabled but this can be fixed..
if bx or by: return x, y, False
x = ma.asarray(x)
y = ma.asarray(y)
if len(x.shape) == 1:
x = x[:,np.newaxis]
if len(y.shape) == 1:
y = y[:,np.newaxis]
nrx, ncx = x.shape
nry, ncy = y.shape
assert nrx == nry, 'Dimensions of x and y are incompatible'
if ncx == ncy:
return x, y, True
if ncx == 1:
x = np.repeat(x, ncy, axis=1)
if ncy == 1:
y = np.repeat(y, ncx, axis=1)
assert x.shape == y.shape, 'Dimensions of x and y are incompatible'
return x, y, True
def _plot_1_arg(self, y, **kwargs):
assert self.command == 'plot', 'fill needs at least 2 arguments'
ret = []
x, y, multicol = self._xy_from_y(y)
if multicol:
for j in xrange(y.shape[1]):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y[:,j],
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
else:
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
return ret
def _plot_2_args(self, tup2, **kwargs):
ret = []
if is_string_like(tup2[1]):
assert self.command == 'plot', ('fill needs at least 2 non-string '
'arguments')
y, fmt = tup2
x, y, multicol = self._xy_from_y(y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
if multicol:
for j in xrange(y.shape[1]):
makeline(x[:,j], y[:,j])
else:
makeline(x, y)
return ret
else:
x, y = tup2
x, y, multicol = self._xy_from_xy(x, y)
def makeline(x, y):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
facecolor = self._get_next_cycle_color()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _plot_3_args(self, tup3, **kwargs):
ret = []
x, y, fmt = tup3
x, y, multicol = self._xy_from_xy(x, y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
facecolor = color
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0: return
if len(remaining)==1:
for seg in self._plot_1_arg(remaining[0], **kwargs):
yield seg
remaining = []
continue
if len(remaining)==2:
for seg in self._plot_2_args(remaining, **kwargs):
yield seg
remaining = []
continue
if len(remaining)==3:
if not is_string_like(remaining[2]):
raise ValueError, 'third arg must be a format string'
for seg in self._plot_3_args(remaining, **kwargs):
yield seg
remaining=[]
continue
if is_string_like(remaining[2]):
for seg in self._plot_3_args(remaining[:3], **kwargs):
yield seg
remaining=remaining[3:]
else:
for seg in self._plot_2_args(remaining[:2], **kwargs):
yield seg
remaining=remaining[2:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' ]
*alpha* float: the alpha transparency
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def get_window_extent(self, *args, **kwargs):
'''
get the axes bounding box in display space; *args* and
*kwargs* are empty
'''
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.axes.transData, self.axes.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.axes.transAxes, self.axes.transData)
def get_xaxis_transform(self):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
line._transformed_path.invalidate()
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
'Make the original position the active position'
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def _set_artist_props(self, a):
'set the boilerplate props for artists added to axes'
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def cla(self):
'Clear the current axes'
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry(('xlim_changed',
'ylim_changed'))
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False)
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False)
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
else:
self.yaxis.set_scale('linear')
self._autoscaleon = True
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self.legend_ = None
self.collections = [] # collection.Collection instances
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='bottom',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
# the frame draws the border around the axes and we want this
# above. this is a place holder for a more sophisticated
# artist that might just draw a left, bottom frame, or a
# centered frame, etc the axesFrame name is deprecated
self.frame = self.axesFrame = self._gen_axes_patch()
self.frame.set_figure(self.figure)
self.frame.set_facecolor('none')
self.frame.set_edgecolor(rcParams['axes.edgecolor'])
self.frame.set_linewidth(rcParams['axes.linewidth'])
self.frame.set_transform(self.transAxes)
self.frame.set_zorder(2.5)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def clear(self):
'clear the axes'
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
clist is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
def ishold(self):
'return the HOLD status of the axes'
return self._hold
def hold(self, b=None):
"""
call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples:
* toggle hold:
>>> hold()
* turn hold on:
>>> hold(True)
* turn hold off
>>> hold(False)
When hold is True, subsequent plot commands will be added to
the current axes. When hold is False, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
========= ============================
value description
========= ============================
'box' change physical size of axes
'datalim' change xlim or ylim
========= ============================
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' ]
"""
if adjustable in ('box', 'datalim'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.BBox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
'''
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
'''
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable == 'box':
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
'''
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
'''
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view()
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
try: v[0]
except IndexError:
emit = kwargs.get('emit', True)
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
xmin, xmax = self.set_xlim(xmin, xmax, emit)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
ymin, ymax = self.set_ylim(ymin, ymax, emit)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]])
self.set_ylim([v[2], v[3]])
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise DeprecationWarning('Use get_children instead')
def get_frame(self):
'Return the axes Rectangle frame'
warnings.warn('use ax.patch instead', DeprecationWarning)
return self.patch
def get_legend(self):
'Return the legend.Legend instance, or None if no legend is defined'
return self.legend_
def get_images(self):
'return a list of Axes images contained by the Axes'
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
'Return a list of lines contained by the Axes'
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
'Return the XAxis instance'
return self.xaxis
def get_xgridlines(self):
'Get the x grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
'Get the xtick lines as a list of Line2D instances'
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
'Return the YAxis instance'
return self.yaxis
def get_ygridlines(self):
'Get the y grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
'Get the ytick lines as a list of Line2D instances'
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def has_data(self):
'''Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
'''
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the axes'
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
def add_collection(self, collection, autolim=True):
'''
add a :class:`~matplotlib.collections.Collection` instance
to the axes
'''
label = collection.get_label()
if not label:
collection.set_label('collection%d'%len(self.collections))
self.collections.append(collection)
self._set_artist_props(collection)
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
def add_line(self, line):
'''
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
'''
self._set_artist_props(line)
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d'%len(self.lines))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
def _update_line_limits(self, line):
p = line.get_path()
if p.vertices.size > 0:
self.dataLim.update_from_path(p, self.ignore_existing_data_limits,
updatex=line.x_isdata,
updatey=line.y_isdata)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
"""
self._set_artist_props(p)
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
def _update_patch_limits(self, patch):
'update the data limits for patch *p*'
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
if (isinstance(patch, mpatches.Rectangle) and
(patch.get_width()==0 or patch.get_height()==0)):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
transform = (patch.get_data_transform() +
self.transData.inverted())
xys = transform.transform(xys)
self.update_datalim(xys, updatex=patch.x_isdata,
updatey=patch.y_isdata)
def add_table(self, tab):
'''
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
'''
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
def relim(self):
'recompute the data limits based on current artists'
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
'Update the data lim bbox with seq of xy tups or equiv. 2-D array'
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
'Update the data lim bbox with seq of xy tups'
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
'''
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
'''
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
'look for unit *kwargs* and update the axis instances as necessary'
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
'''
return *True* if the given *mouseevent* (in display coords)
is in the Axes
'''
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied on plot commands
"""
return self._autoscaleon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleon = b
def autoscale_view(self, tight=False, scalex=True, scaley=True):
"""
autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
"""
# if image data only just use the datalim
if not self._autoscaleon: return
if scalex:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
if scaley:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
if (tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)):
if scalex:
self.set_xbound(x0, x1)
if scaley:
self.set_ybound(y0, y1)
return
if scalex:
XL = self.xaxis.get_major_locator().view_limits(x0, x1)
self.set_xbound(XL)
if scaley:
YL = self.yaxis.get_major_locator().view_limits(y0, y1)
self.set_ybound(YL)
#### Drawing
def draw(self, renderer=None, inframe=False):
"Draw everything (plot lines, axes, labels)"
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
self.apply_aspect()
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
artists = []
if len(self.images)<=1 or renderer.option_image_nocomposite():
for im in self.images:
im.draw(renderer)
else:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0)
for im in self.images if im.get_visible()]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
renderer.draw_image(
round(l), round(b), im, self.bbox,
self.patch.get_path(),
self.patch.get_transform())
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.append(self.frame)
dsu = [ (a.zorder, i, a) for i, a in enumerate(artists)
if not a.get_animated() ]
dsu.sort()
for zorder, i, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort()
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
def grid(self, b=None, **kwargs):
"""
call signature::
grid(self, b=None, **kwargs)
Set the axes grids on or off; *b* is a boolean
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*
*kawrgs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs): b = True
self.xaxis.grid(b, **kwargs)
self.yaxis.grid(b, **kwargs)
grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd
def ticklabel_format(self, **kwargs):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes.
Optional keyword arguments:
============ =====================================
Keyword Description
============ =====================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`-m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*axis* [ 'x' | 'y' | 'both' ]
============ =====================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
axis = kwargs.pop('axis', 'both').lower()
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError, "comma style remains to be added"
elif style == '':
sb = None
else:
raise ValueError, "%s is not a valid style value"
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
'Return the axis background color'
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left)
def xaxis_inverted(self):
'Returns True if the x-axis is inverted.'
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower)
else:
self.set_xlim(lower, upper)
else:
if lower < upper:
self.set_xlim(lower, upper)
else:
self.set_xlim(upper, lower)
def get_xlim(self):
"""
Get the x-axis range [*xmin*, *xmax*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):
"""
call signature::
set_xlim(self, *args, **kwargs)
Set the limits for the xaxis
Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]
Examples::
set_xlim((valmin, valmax))
set_xlim(valmin, valmax)
set_xlim(xmin=1) # xmax unchanged
set_xlim(xmax=1) # xmin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
ACCEPTS: len(2) sequence of floats
"""
if xmax is None and iterable(xmin):
xmin,xmax = xmin
self._process_unit_info(xdata=(xmin, xmax))
if xmin is not None:
xmin = self.convert_xunits(xmin)
if xmax is not None:
xmax = self.convert_xunits(xmax)
old_xmin,old_xmax = self.get_xlim()
if xmin is None: xmin = old_xmin
if xmax is None: xmax = old_xmax
xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)
xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)
self.viewLim.intervalx = (xmin, xmax)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return xmin, xmax
def get_xscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.xaxis.get_scale()
def set_xscale(self, value, **kwargs):
"""
call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_xticks(self, minor=False):
'Return the x ticks as a list of locations'
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_xticklabels.__doc__ = cbook.dedent(
set_xticklabels.__doc__) % martist.kwdocd
def invert_yaxis(self):
"Invert the y-axis."
left, right = self.get_ylim()
self.set_ylim(right, left)
def yaxis_inverted(self):
'Returns True if the y-axis is inverted.'
left, right = self.get_ylim()
return right < left
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
left, right = self.get_ylim()
if left < right:
return left, right
else:
return right, left
def set_ybound(self, lower=None, upper=None):
"""Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower)
else:
self.set_ylim(lower, upper)
else:
if lower < upper:
self.set_ylim(lower, upper)
else:
self.set_ylim(upper, lower)
def get_ylim(self):
"""
Get the y-axis range [*ymin*, *ymax*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):
"""
call signature::
set_ylim(self, *args, **kwargs):
Set the limits for the yaxis; v = [ymin, ymax]::
set_ylim((valmin, valmax))
set_ylim(valmin, valmax)
set_ylim(ymin=1) # ymax unchanged
set_ylim(ymax=1) # ymin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
Returns the current ylimits as a length 2 tuple
ACCEPTS: len(2) sequence of floats
"""
if ymax is None and iterable(ymin):
ymin,ymax = ymin
if ymin is not None:
ymin = self.convert_yunits(ymin)
if ymax is not None:
ymax = self.convert_yunits(ymax)
old_ymin,old_ymax = self.get_ylim()
if ymin is None: ymin = old_ymin
if ymax is None: ymax = old_ymax
ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)
ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)
self.viewLim.intervaly = (ymin, ymax)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return ymin, ymax
def get_yscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.yaxis.get_scale()
def set_yscale(self, value, **kwargs):
"""
call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_yticks(self, minor=False):
'Return the y ticks as a list of locations'
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ False | True ]
Sets the minor ticks if True
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the ytick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_yticklabels.__doc__ = cbook.dedent(
set_yticklabels.__doc__) % martist.kwdocd
def xaxis_date(self, tz=None):
"""Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
xmin, xmax = self.dataLim.intervalx
if xmin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(xdata=(dmin, dmax))
dmin, dmax = self.convert_xunits([dmin, dmax])
self.viewLim.intervalx = dmin, dmax
self.dataLim.intervalx = dmin, dmax
locator = self.xaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.xaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervalx[0]==0.:
self.viewLim.intervalx = tuple(self.dataLim.intervalx)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.xaxis.set_major_formatter(formatter)
def yaxis_date(self, tz=None):
"""Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
ymin, ymax = self.dataLim.intervaly
if ymin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(ydata=(dmin, dmax))
dmin, dmax = self.convert_yunits([dmin, dmax])
self.viewLim.intervaly = dmin, dmax
self.dataLim.intervaly = dmin, dmax
locator = self.yaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.yaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervaly[0]==0.:
self.viewLim.intervaly = tuple(self.dataLim.intervaly)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.yaxis.set_major_formatter(formatter)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
'return a format string formatting the *x*, *y* coord'
if x is None:
x = '???'
if y is None:
y = '???'
xs = self.format_xdata(x)
ys = self.format_ydata(y)
return 'x=%s, y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes support the zoom box
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ True | False ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = p.trans_inverse.transform_point((p.x, p.y))
lim_points = p.lim.get_points()
result = start + alpha * (lim_points - start)
result = mtransforms.Bbox(result)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
'disconnect from the Axes event.'
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
'return a list of child artists'
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.append(self.frame)
return children
def contains(self,mouseevent):
"""Test whether the mouse event occured in the axes.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def pick(self, *args):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args)>1:
raise DeprecationWarning('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self,args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
def set_title(self, label, fontdict=None, **kwargs):
"""
call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'bottom',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
def set_xlabel(self, xlabel, fontdict=None, **kwargs):
"""
call signature::
set_xlabel(xlabel, fontdict=None, **kwargs)
Set the label for the xaxis.
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.xaxis.get_label()
label.set_text(xlabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
def set_ylabel(self, ylabel, fontdict=None, **kwargs):
"""
call signature::
set_ylabel(ylabel, fontdict=None, **kwargs)
Set the label for the yaxis
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.yaxis.get_label()
label.set_text(ylabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ False | True ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'bottom',
'horizontalalignment' : 'left',
#'verticalalignment' : 'top',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd
def annotate(self, *args, **kwargs):
"""
call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
return a
annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd
#### Lines and spans
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Axis Horizontal Line
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
l.x_isdata = False
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Axis Vertical Line
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
l.y_isdata = False
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
Axis Horizontal Span.
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.x_isdata = False
self.add_patch(p)
return p
axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
Axis Vertical Span.
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.y_isdata = False
self.add_patch(p)
return p
axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
y = self.convert_yunits( y )
xmin = self.convert_xunits( xmin )
xmax = self.convert_xunits( xmax )
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError, 'xmin and y are unequal sized sequences'
if len(xmax)!=len(y):
raise ValueError, 'xmax and y are unequal sized sequences'
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
hlines.__doc__ = cbook.dedent(hlines.__doc__)
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors*
a line collections color args, either a single color
or a len(*x*) list of colors
*linestyles*
one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError, 'ymin and x are unequal sized sequences'
if len(ymax)!=len(x):
raise ValueError, 'ymax and x are unequal sized sequences'
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd
#### Basic plotting
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
'-' solid line style
'--' dashed line style
'-.' dash-dot line style
':' dotted line style
'.' point marker
',' pixel marker
'o' circle marker
'v' triangle_down marker
'^' triangle_up marker
'<' triangle_left marker
'>' triangle_right marker
'1' tri_down marker
'2' tri_up marker
'3' tri_left marker
'4' tri_right marker
's' square marker
'p' pentagon marker
'*' star marker
'h' hexagon1 marker
'H' hexagon2 marker
'+' plus marker
'x' x marker
'D' diamond marker
'd' thin_diamond marker
'|' vline marker
'_' hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12). See
:class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ None | timezone string ]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ True | False ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ False | True ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.ticker.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.ticker.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.ticker.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.ticker.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates`:
for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange`:
for help on creating the required floating point
dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd
def loglog(self, *args, **kwargs):
"""
call signature::
loglog(*args, **kwargs)
Make a plot with log scaling on the *x* and *y* axis.
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
base of the *x*/*y* logarithm
*subsx*/*subsy*: [ None | sequence ]
the location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd
def semilogx(self, *args, **kwargs):
"""
call signature::
semilogx(*args, **kwargs)
Make a plot with log scaling on the *x* axis.
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
base of the *x* logarithm
*subsx*: [ None | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd
def semilogy(self, *args, **kwargs):
"""
call signature::
semilogy(*args, **kwargs)
Make a plot with log scaling on the *y* axis.
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ None | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd
def acorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False,
maxlags=None, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`: For documentation on
valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd
def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, maxlags=None, **kwargs):
"""
call signature::
xcorr(x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd
def legend(self, *args, **kwargs):
"""
call signature::
legend(*args, **kwargs)
Place a legend on the current axes at location *loc*. Labels are a
sequence of strings and *loc* can be a string or an integer specifying
the legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
If none of these are locations are suitable, loc can be a 2-tuple
giving x,y in axes coords, ie::
loc = 0, 1 # left top
loc = 0.5, 0.5 # center
Keyword arguments:
*isaxes*: [ True | False ]
Indicates that this is an axes legend
*numpoints*: integer
The number of points in the legend line, default is 4
*prop*: [ None | FontProperties ]
A :class:`matplotlib.font_manager.FontProperties`
instance, or *None* to use rc settings.
*pad*: [ None | scalar ]
The fractional whitespace inside the legend border, between 0 and 1.
If *None*, use rc settings.
*markerscale*: [ None | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*shadow*: [ None | False | True ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*labelsep*: [ None | scalar ]
The vertical space between the legend entries. If *None*, use rc
settings.
*handlelen*: [ None | scalar ]
The length of the legend lines. If *None*, use rc settings.
*handletextsep*: [ None | scalar ]
The space between the legend line and legend text. If *None*, use rc
settings.
*axespad*: [ None | scalar ]
The border between the axes and legend edge. If *None*, use rc
settings.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
"""
def get_handles():
handles = self.lines[:]
handles.extend(self.patches)
handles.extend([c for c in self.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in self.collections
if isinstance(c, mcoll.RegularPolyCollection)])
return handles
if len(args)==0:
handles = []
labels = []
for handle in get_handles():
label = handle.get_label()
if (label is not None and
label != '' and not label.startswith('_')):
handles.append(handle)
labels.append(label)
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(get_handles(), labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(get_handles(), labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
'''
call signature::
step(x, y, *args, **kwargs)
Make a step plot. Additional keyword args to :func:`step` are the same
as those for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i]
If 'post', that interval has level y[i+1]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
'''
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
def bar(self, left, height, width=0.8, bottom=None,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False,
**kwargs
):
"""
call signature::
bar(left, height, width=0.8, bottom=0,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
bottom = [1e-100]
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
left = [1e-100]
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError, 'invalid orientation: %s' % orientation
# do not convert to array here as unit info is lost
#left = np.asarray(left)
#height = np.asarray(height)
#width = np.asarray(width)
#bottom = np.asarray(bottom)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) < nbars:
edgecolor *= nbars
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*nbars
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "argument 'left' must be %d or scalar" % nbars
assert len(height)==nbars, ("argument 'height' must be %d or scalar" %
nbars)
assert len(width)==nbars, ("argument 'width' must be %d or scalar" %
nbars)
assert len(bottom)==nbars, ("argument 'bottom' must be %d or scalar" %
nbars)
if yerr is not None and len(yerr)!=nbars:
raise ValueError(
"bar() argument 'yerr' must be len(%s) or scalar" % nbars)
if xerr is not None and len(xerr)!=nbars:
raise ValueError(
"bar() argument 'xerr' must be len(%s) or scalar" % nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
xconv = self.xaxis.converter
if xconv is not None:
units = self.xaxis.get_units()
left = xconv.convert( left, units )
width = xconv.convert( width, units )
if self.yaxis is not None:
yconv = self.yaxis.converter
if yconv is not None :
units = self.yaxis.get_units()
bottom = yconv.convert( bottom, units )
height = yconv.convert( height, units )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError, 'invalid alignment: %s' % align
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label=label
)
label = '_nolegend_'
r.update(kwargs)
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
self.errorbar(
x, y,
yerr=yerr, xerr=xerr,
fmt=None, ecolor=ecolor, capsize=capsize)
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin(width[width!=0]) # filter out the 0 width rects
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin(height[height!=0]) # filter out the 0 height rects
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
return patches
bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd
def broken_barh(self, xranges, yrange, **kwargs):
"""
call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'):
"""
call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
`this document`__ for details
:file:`examples/pylab_examples/stem_plot.py`:
for a demo
__ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt)
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [0, thisy], linefmt)
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt)
self.hold(remember_hold)
return markerline, stemlines, baseline
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1):
r"""
call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized.
Keyword arguments:
*explode*: [ None | len(x) sequence ]
If not *None*, is a len(*x*) array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ None | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ None | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ None | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ False | True ]
Draw a shadow beneath the pie.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is None, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
radius = 1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None: return slices, texts
else: return slices, texts, autotexts
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, **kwargs):
"""
call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ]
If a scalar number, len(N) array-like object, or an Nx1 array-like
object, errorbars are drawn +/- value.
If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and
+column2
*fmt*: '-'
The plot format symbol for *y*. If *fmt* is *None*, just plot the
errorbars with no line symbols. This can be useful for creating a
bar plot with errorbars.
*ecolor*: [ None | mpl color ]
a matplotlib color arg which gives the color the errorbar lines; if
*None*, use the marker color.
*elinewidth*: scalar
the linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
the size of the error bar caps in points
*barsabove*: [ True | False ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
All other keyword arguments are passed on to the plot command for the
markers, so you can add additional key=value pairs to control the
errorbar markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Return value is a length 3 tuple. The first element is the
:class:`~matplotlib.lines.Line2D` instance for the *y* symbol
lines. The second element is a list of error bar cap lines,
the third element is a list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
barcols.append( self.hlines(y, left, right, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(left, y, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(right, y, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
barcols.append( self.vlines(x, lower, upper, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, lower, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, upper, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines._get_next_cycle_color()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
return (l0, caplines, barcols)
errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd
def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5,
positions=None, widths=None):
"""
call signature::
boxplot(x, notch=0, sym='+', vert=1, whis=1.5,
positions=None, widths=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
- *notch* = 0 (default) produces a rectangular box plot.
- *notch* = 1 will produce a notched box plot
*sym* (default 'b+') is the default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
- *vert* = 1 (default) makes the boxes vertical.
- *vert* = 0 makes horizontal boxes. This seems goofy, but
that's how Matlab did it.
*whis* (default 1.5) defines the length of the whiskers as
a function of the inner quartile range. They extend to the
most extreme data point within ( ``whis*(75%-25%)`` ) data range.
*positions* (default 1,2,...,n) sets the horizontal positions of
the boxes. The ticks and limits are automatically set to match
the positions.
*widths* is either a scalar or a vector and sets the width of
each box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
*x* is an array or a sequence of vectors.
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created.
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError, "input x can have no more than 2 dimensions"
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'regular' plot
if notch == 0:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
# calculate 'notch' plot
else:
notch_max = med + 1.57*iq/np.sqrt(row)
notch_min = med - 1.57*iq/np.sqrt(row)
if notch_max > q3:
notch_max = q3
if notch_min < q1:
notch_min = q1
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, 'r-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if 1 == vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D
sequences of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
===== ==============
Value Description
===== ==============
's' square
'o' circle
'^' triangle up
'>' triangle right
'v' triangle down
'<' triangle left
'd' diamond
'p' pentagram
'h' hexagon
'8' octagon
'+' plus
'x' cross
===== ==============
The marker can also be a tuple (*numsides*, *style*,
*angle*), which will create a custom, regular symbol.
*numsides*:
the number of sides
*style*:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (*numsides* and *angle* is ignored)
===== =============================================
*angle*:
the angle of rotation of the symbol
Finally, *marker* can be (*verts*, 0): *verts* is a
sequence of (*x*, *y*) vertices for a custom scatter
symbol. Alternatively, use the kwarg combination
*marker* = *None*, *verts* = *verts*.
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ None | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``. *cmap* is only used if *c*
is an array of floats.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are None, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: 0 <= scalar <= 1
The alpha value for the patches
*linewidths*: [ None | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
'none' to plot faces with no outlines
*facecolors*:
'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
syms = { # a dict from symbol to (numsides, angle)
's' : (4,math.pi/4.0,0), # square
'o' : (20,3,0), # circle
'^' : (3,0,0), # triangle up
'>' : (3,math.pi/2.0,0), # triangle right
'v' : (3,math.pi,0), # triangle down
'<' : (3,3*math.pi/2.0,0), # triangle left
'd' : (4,0,0), # diamond
'p' : (5,0,0), # pentagram
'h' : (6,0,0), # hexagon
'8' : (8,0,0), # octagon
'+' : (4,0,2), # plus
'x' : (4,math.pi/4.0,2) # cross
}
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
if is_string_like(c) or cbook.is_sequence_of_strings(c):
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
sh = np.shape(c)
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if len(sh) == 1 and sh[0] == len(x):
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if not iterable(s):
scales = (s,)
else:
scales = s
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
DeprecationWarning) #2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if is_string_like(marker):
# the standard way to define symbols using a string character
sym = syms.get(marker)
if sym is None and verts is None:
raise ValueError('Unknown marker symbol to scatter')
numsides, rotation, symstyle = syms[marker]
elif iterable(marker):
# accept marker to be:
# (numsides, style, [angle])
# or
# (verts[], style, [angle])
if len(marker)<2 or len(marker)>3:
raise ValueError('Cannot create markersymbol from marker')
if cbook.is_numlike(marker[0]):
# (numsides, style, [angle])
if len(marker)==2:
numsides, rotation = marker[0], 0.
elif len(marker)==3:
numsides, rotation = marker[0], marker[2]
sym = True
if marker[1] in (1,2):
symstyle = marker[1]
else:
verts = np.asarray(marker[0])
if sym is not None:
if symstyle==0:
collection = mcoll.RegularPolyCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==1:
collection = mcoll.StarPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==2:
collection = mcoll.AsteriskPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==3:
collection = mcoll.CircleCollection(
scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
else:
rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2))
verts /= rescale
collection = mcoll.PolyCollection(
(verts,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
temp_x = x
temp_y = y
minx = np.amin(temp_x)
maxx = np.amax(temp_x)
miny = np.amin(temp_y)
maxy = np.amax(temp_y)
w = maxx-minx
h = maxy-miny
# the pad is a little hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none',
reduce_C_function = np.mean,
**kwargs):
"""
call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none'
reduce_C_function = np.mean,
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is None
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ None | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ None | Colormap ]
a :class:`matplotlib.cm.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ None | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin*/*vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar
the alpha value for the patches
*linewidths*: [ None | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ None | mpl color | color sequence ]
If 'none', draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collection.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
x = np.log10(x)
if yscale=='log':
y = np.log10(y)
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]]+=1
else:
lattice2[ix2[i], iy2[i]]+=1
else:
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals):
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals):
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
polygons = np.zeros((6, n, 2), float)
polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
if C is not None:
# remove accumulation bins with no data
polygons = polygons[:,good_idxs,:]
accum = accum[good_idxs]
polygons = np.transpose(polygons, axes=[1,0,2])
polygons[:,:,0] *= sx
polygons[:,:,1] *= sy
polygons[:,:,0] += px
polygons[:,:,1] += py
if xscale=='log':
polygons[:,:,0] = 10**(polygons[:,:,0])
xmin = 10**xmin
xmax = 10**xmax
self.set_xscale('log')
if yscale=='log':
polygons[:,:,1] = 10**(polygons[:,:,1])
ymin = 10**ymin
ymax = 10**ymax
self.set_yscale('log')
if edgecolors=='none':
edgecolors = 'face'
collection = mcoll.PolyCollection(
polygons,
edgecolors = edgecolors,
linewidths = linewidths,
transOffset = self.transData,
)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif bins!=None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd
def arrow(self, x, y, dx, dy, **kwargs):
"""
call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*).
Optional kwargs control the arrow properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
barbs.__doc__ = cbook.dedent(barbs.__doc__) % {
'barbs_doc': mquiver.Barbs.barbs_doc}
def fill(self, *args, **kwargs):
"""
call signature::
fill(*args, **kwargs)
Plot filled polygons. *args* is a variable length argument,
allowing for multiple *x*, *y* pairs with an optional color
format string; see :func:`~matplotlib.pyplot.plot` for details
on the argument parsing. For example, to plot a polygon with
vertices at *x*, *y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the Polygon properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd
def fill_between(self, x, y1, y2=0, where=None, **kwargs):
"""
call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x*
an N length np array of the x data
*y1*
an N length scalar or np array of the x data
*y2*
an N length scalar or np array of the x data
*where*
if None, default to fill between everywhere. If not None,
it is a a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs*
keyword args passed on to the :class:`PolyCollection`
kwargs control the Polygon properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between.py
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = np.asarray(self.convert_xunits(x))
y1 = np.asarray(self.convert_yunits(y1))
y2 = np.asarray(self.convert_yunits(y2))
if not cbook.iterable(y1):
y1 = np.ones_like(x)*y1
if not cbook.iterable(y2):
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
where = np.asarray(where)
assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where))
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
theseverts = []
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
X[0] = xslice[0], y2slice[0]
X[N+1] = xslice[-1], y2slice[-1]
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd
#### plotting z(x,y): imshow, pcolor and relatives, contour
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=1.0, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=1.0, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ None | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
*norm*: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ None | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
*origin*: [ None | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ None | scalars (left, right, bottom, top) ]
Eata values of the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ None | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties:
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
xmin, xmax, ymin, ymax = im.get_extent()
corners = (xmin, ymin), (xmax, ymax)
self.update_datalim(corners)
if self._autoscaleon:
self.set_xlim((xmin, xmax))
self.set_ylim((ymin, ymax))
self.images.append(im)
return im
imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) <> 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) <> 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
def pcolor(self, *args, **kwargs):
"""
call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
Create a pseudocolor plot of a 2-D array.
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If *None*, use
rc settings.
norm: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If *None*, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the Matlab(TM) convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`~matplotlib.pyplot.meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to:
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
Matlab :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collection.PolyCollection` properties:
%(PolyCollection)s
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],
X2[:,newaxis], Y2[:,newaxis],
X3[:,newaxis], Y3[:,newaxis],
X4[:,newaxis], Y4[:,newaxis],
X1[:,newaxis], Y1[:,newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
#verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4))
C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())
if shading == 'faceted':
edgecolors = (0,0,0,1),
linewidths = (0.25,)
else:
edgecolors = 'face'
linewidths = (1.0,)
kwargs.setdefault('edgecolors', edgecolors)
kwargs.setdefault('antialiaseds', (0,))
kwargs.setdefault('linewidths', linewidths)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd
def pcolormesh(self, *args, **kwargs):
"""
call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If None, use
rc settings.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If None, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If None, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh`
properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`:
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
edgecolors = kwargs.pop('edgecolors', 'None')
antialiased = kwargs.pop('antialiased', False)
X, Y, C = self._pcolorargs('pcolormesh', *args)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at
# lower left corner
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
if shading == 'faceted' or edgecolors != 'None':
showedges = 1
else:
showedges = 0
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords, showedges,
antialiased=antialiased) # kwargs are not used
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a version of pcolor that
does not draw lines, that provides the fastest
possible rendering with the Agg backend, and that
can handle any quadrilateral grid.
Call signatures::
pcolor(C, **kwargs)
pcolor(xr, yr, C, **kwargs)
pcolor(x, y, C, **kwargs)
pcolor(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``pcolor(C, **kwargs)`` is equivalent to
``pcolor([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ None | Colormap ]
A cm Colormap instance from cm. If None, use rc settings.
*norm*: [ None | Normalize ]
An mcolors.Normalize instance is used to scale luminance data to
0,1. If None, defaults to normalize()
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. If you pass a norm instance, *vmin* and *vmax*
will be *None*.
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a QuadMesh collection in the general
quadrilateral case.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and
np.ptp(dy) < 0.01*np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc+1
Ny = nr+1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0)
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.images.append(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.images.append(im)
ret = im
self._set_artist_props(ret)
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = False
return mcontour.ContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.ContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = True
return mcontour.ContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.ContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
def table(self, **kwargs):
"""
call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Add a table to the current axes. Returns a
:class:`matplotlib.table.Table` instance. For finer grained
control over tables, use the :class:`~matplotlib.table.Table`
class and add it to the axes with
:meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd
def twinx(self):
"""
call signature::
ax = twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
ax2 = self.figure.add_axes(self.get_position(True), sharex=self,
frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
self.yaxis.tick_left()
return ax2
def twiny(self):
"""
call signature::
ax = twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
ax2 = self.figure.add_axes(self.get_position(True), sharey=self,
frameon=False)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
self.xaxis.tick_bottom()
return ax2
def get_shared_x_axes(self):
'Return a copy of the shared axes Grouper object for x axes'
return self._shared_x_axes
def get_shared_y_axes(self):
'Return a copy of the shared axes Grouper object for y axes'
return self._shared_y_axes
#### Data analysis
def hist(self, x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs):
"""
call signature::
hist(x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs)
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. *x* are the data to be binned. *x* can be an array,
a 2D array with multiple data in its columns, or a list of
arrays with data of different length. Note, if *bins*
is an integer input argument=numbins, *bins* + 1 bin edges
will be returned, compatible with the semantics of
:func:`numpy.histogram` with the *new* = True argument.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling is
set off (*autoscale_on* is set to *False*) and the xaxis limits
are set to encompass the full specified bin range.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print np.sum(pdf * np.diff(bins))
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
kwargs are used to update the properties of the hist
:class:`~matplotlib.patches.Rectangle` instances:
%(Rectangle)s
You can use labels for your histogram, and only the first
:class:`~matplotlib.patches.Rectangle` gets the label (the
others get the magic string '_nolegend_'. This will make the
histograms work in the intuitive way for bar charts::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
**Example:**
.. plot:: mpl_examples/pylab_examples/histogram_demo.py
"""
if not self._hold: self.cla()
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in with numpy !!!
if kwargs.get('width') is not None:
raise DeprecationWarning(
'hist now uses the rwidth to give relative width '
'and not absolute width')
try:
# make sure a copy is created: don't use asarray
x = np.transpose(np.array(x))
if len(x.shape)==1:
x.shape = (1,x.shape[0])
elif len(x.shape)==2 and x.shape[1]<x.shape[0]:
warnings.warn('2D hist should be nsamples x nvariables; '
'this looks transposed')
except ValueError:
# multiple hist with data of different length
if iterable(x[0]) and not is_string_like(x[0]):
tx = []
for i in xrange(len(x)):
tx.append( np.array(x[i]) )
x = tx
else:
raise ValueError, 'Can not use providet data to create a histogram'
# Check whether bins or range are given explicitly. In that
# case do not autoscale axes.
binsgiven = (cbook.iterable(bins) or range != None)
# check the version of the numpy
if np.__version__ < "1.3": # version 1.1 and 1.2
hist_kwargs = dict(range=range,
normed=bool(normed), new=True)
else: # version 1.3 and later, drop new=True
hist_kwargs = dict(range=range,
normed=bool(normed))
n = []
for i in xrange(len(x)):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, **hist_kwargs)
n.append(m)
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None,None,-1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
stacked = False
if rwidth is not None: dr = min(1., max(0., rwidth))
elif len(n)>1: dr = 0.8
else: dr = 1.0
if histtype=='bar':
width = dr*totwidth/len(n)
dw = width
if len(n)>1:
boffset = -0.5*dr*totwidth*(1.-1./len(n))
else:
boffset = 0.0
elif histtype=='barstacked':
width = dr*totwidth
boffset, dw = 0.0, 0.0
stacked = True
else:
raise ValueError, 'invalid histtype: %s' % histtype
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
elif align != 'left' and align != 'center':
raise ValueError, 'invalid align: %s' % align
if orientation == 'horizontal':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.barh(bins[:-1]+boffset, m, height=width,
left=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
elif orientation == 'vertical':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.bar(bins[:-1]+boffset, m, width=width,
bottom=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
else:
raise ValueError, 'invalid orientation: %s' % orientation
elif histtype.startswith('step'):
x = np.zeros( 2*len(bins), np.float )
y = np.zeros( 2*len(bins), np.float )
x[0::2], x[1::2] = bins, bins
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
elif align != 'mid' and align != 'edge':
raise ValueError, 'invalid align: %s' % align
if log:
y[0],y[-1] = 1e-100, 1e-100
if orientation == 'horizontal':
self.set_xscale('log')
elif orientation == 'vertical':
self.set_yscale('log')
fill = False
if histtype == 'stepfilled':
fill = True
elif histtype != 'step':
raise ValueError, 'invalid histtype: %s' % histtype
for m in n:
y[1:-1:2], y[2::2] = m, m
if orientation == 'horizontal':
x,y = y,x
elif orientation != 'vertical':
raise ValueError, 'invalid orientation: %s' % orientation
color = self._get_lines._get_next_cycle_color()
if fill:
patches.append( self.fill(x, y,
closed=False, facecolor=color) )
else:
patches.append( self.fill(x, y,
closed=False, edgecolor=color, fill=False) )
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin, xmax = 0, self.dataLim.intervalx[1]
for m in n:
xmin = np.amin(m[m!=0]) # filter out the 0 height bins
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin, ymax = 0, self.dataLim.intervaly[1]
for m in n:
ymin = np.amin(m[m!=0]) # filter out the 0 height bins
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
else:
raise ValueError, 'invalid histtype: %s' % histtype
label = kwargs.pop('label', '')
for patch in patches:
for p in patch:
p.update(kwargs)
p.set_label(label)
label = '_nolegend_'
if binsgiven:
self.set_autoscale_on(False)
if orientation == 'vertical':
self.autoscale_view(scalex=False, scaley=True)
XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_xbound(XL)
else:
self.autoscale_view(scalex=True, scaley=False)
YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_ybound(YL)
if len(n)==1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd
def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The power spectral density by Welch's average periodogram
method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute *Pxx*, with a
scaling to correct for power loss due to windowing. *Fs* is the
sampling frequency.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*Pxx*, *freqs*).
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
"""
if not self._hold: self.cla()
pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,
sides, scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
self.plot(freqs, 10*np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
logi = int(np.log10(intv))
if logi==0: logi=.1
step = 10*logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxx, freqs
psd_doc_dict = dict()
psd_doc_dict.update(martist.kwdocd)
psd_doc_dict.update(mlab.kwdocd)
psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD'])
psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict
def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum
(complex valued), and :math:`10\log_{10}|P_{xy}|` is
plotted.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso:
:meth:`psd`
For a description of the optional parameters.
"""
if not self._hold: self.cla()
pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,
pad_to, sides, scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
step = 10*int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxy, freqs
csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
cohere the coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold: self.cla()
cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,
scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict
def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None):
"""
call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.cm.Colormap` instance; if *None* use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`mlab.specgram`
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is a len(times) x len(freqs) array of power
- *im* is a :class:`matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold: self.cla()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent)
self.axis('auto')
return Pxx, freqs, bins, im
specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict
del psd_doc_dict #So that this does not become an Axes attribute
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs):
"""
call signature::
spy(Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs)
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
If *precision* is 0, any non-zero value will be plotted;
else, values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a
special case: if *precision* is 'present', any value present in
the array will be plotted, even if it is identically zero.
The array will be plotted as it would be printed, with
the first index (row) increasing down and the second
index (column) increasing to the right.
By default aspect is 'equal', so that each array element
occupies a square space; set the aspect kwarg to 'auto'
to allow the plot to fill the plot box, or to any scalar
number to specify the aspect ratio of an array element
directly.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
.. seealso::
:func:`~matplotlib.pyplot.imshow`
For controlling colors, e.g. cyan background and red marks,
use::
cmap = mcolors.ListedColormap(['c','r'])
If *marker* or *markersize* is not *None*, useful kwargs include:
* *marker*
* *markersize*
* *color*
Useful values for *marker* include:
* 's' square (default)
* 'o' circle
* '.' point
* ',' pixel
.. seealso::
:func:`~matplotlib.pyplot.plot`
"""
if precision is None:
precision = 0
warnings.DeprecationWarning("Use precision=0 instead of None")
# 2008/10/03
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z)>precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin='upper', **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z)>precision
y, x = np.nonzero(nonzero)
if marker is None: marker = 's'
if markersize is None: markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc-0.5)
self.set_ylim(ymin=nr-0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
'''
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed,
with the first row at the top. Row and column numbering
is zero-based.
Argument:
*Z* anything that can be interpreted as a 2-D array
kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.
:meth:`matshow` sets defaults for *extent*, *origin*,
*interpolation*, and *aspect*; use care in overriding the
*extent* and *origin* kwargs, because they interact. (Also,
if you want to change them, you probably should be using
imshow directly in your own version of matshow.)
Returns: an :class:`matplotlib.image.AxesImage` instance.
'''
Z = np.asarray(Z)
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
kw = {'extent': extent,
'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
class SubplotBase:
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args)==1:
s = str(args[0])
if len(s) != 3:
raise ValueError('Argument to subplot must be a 3 digits long')
rows, cols, num = map(int, s)
elif len(args)==3:
rows, cols, num = args
else:
raise ValueError( 'Illegal argument to subplot')
total = rows*cols
num -= 1 # convert from matlab to python indexing
# ie num in range(0,total)
if num >= total:
raise ValueError( 'Subplot number exceeds total subplots')
self._rows = rows
self._cols = cols
self._num = num
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def get_geometry(self):
'get the subplot geometry, eg 2,2,3'
return self._rows, self._cols, self._num+1
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, eg. from 1,1,1 to 2,2,3'
self._rows = numrows
self._cols = numcols
self._num = num-1
self.update_params()
self.set_position(self.figbox)
def update_params(self):
'update the subplot position from fig.subplotpars'
rows = self._rows
cols = self._cols
num = self._num
pars = self.figure.subplotpars
left = pars.left
right = pars.right
bottom = pars.bottom
top = pars.top
wspace = pars.wspace
hspace = pars.hspace
totWidth = right-left
totHeight = top-bottom
figH = totHeight/(rows + hspace*(rows-1))
sepH = hspace*figH
figW = totWidth/(cols + wspace*(cols-1))
sepW = wspace*figW
rowNum, colNum = divmod(num, cols)
figBottom = top - (rowNum+1)*figH - rowNum*sepH
figLeft = left + colNum*(figW + sepW)
self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
figW, figH)
self.rowNum = rowNum
self.colNum = colNum
self.numRows = rows
self.numCols = cols
if 0:
print 'rcn', rows, cols, num
print 'lbrt', left, bottom, right, top
print 'self.figBottom', self.figBottom
print 'self.figLeft', self.figLeft
print 'self.figW', self.figW
print 'self.figH', self.figH
print 'self.rowNum', self.rowNum
print 'self.colNum', self.colNum
print 'self.numRows', self.numRows
print 'self.numCols', self.numCols
def is_first_col(self):
return self.colNum==0
def is_first_row(self):
return self.rowNum==0
def is_last_row(self):
return self.rowNum==self.numRows-1
def is_last_col(self):
return self.colNum==self.numCols-1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubclassBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = new.classobj("%sSubplot" % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes)
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
|
agpl-3.0
|
wzbozon/scikit-learn
|
examples/datasets/plot_iris_dataset.py
|
283
|
1928
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
|
bsd-3-clause
|
sjperkins/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator.py
|
6
|
55704
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
value.simple_value = int(dictionary[key])
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, np.int32 or int.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend(hooks)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
else:
saver_for_restore = saver.Saver(sharded=True)
with tf_session.Session('') as session:
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
lookup_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
|
apache-2.0
|
jwplayer/jwalk
|
jwalk/io.py
|
1
|
1641
|
# -*- coding: utf-8 -*-
"""Load and save data."""
import logging
import numpy as np
import scipy.sparse as sps
try:
import pandas as pd
except ImportError:
PANDAS_INSTALLED = False
else:
PANDAS_INSTALLED = True
__all__ = ['load_edges', 'load_graph', 'save_graph']
logger = logging.getLogger(__name__)
def load_edges(fpath, delimiter=None, has_header=False):
"""Load edges in CSV format as numpy ndarray of strings.
Args:
fpath (str): edges file
delimiter (str): alternative argument name for sep (default=None)
has_header (bool): True if has header row
Returns:
np.ndarray: array of edges
"""
if PANDAS_INSTALLED:
header = 'infer' if has_header else None
df = pd.read_csv(fpath, delimiter=delimiter, header=header)
edges = df.values
else:
logger.warning("Pandas not installed. Using numpy to load csv, which "
"is slower.")
header = 1 if has_header else 0
edges = np.genfromtxt(fpath, delimiter=delimiter, skip_header=header,
dtype=object)
return edges.astype('str')
def save_graph(filename, csr_matrix, labels=None):
np.savez(filename,
data=csr_matrix.data,
indices=csr_matrix.indices,
indptr=csr_matrix.indptr,
shape=csr_matrix.shape,
labels=labels)
return filename
def load_graph(filename):
loader = np.load(filename)
sp = sps.csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
return sp, loader['labels']
|
apache-2.0
|
lightsighter/CudaDMA
|
src/examples/saxpy_strided/replot.py
|
2
|
2616
|
#/usr/bin/python
###########################################################################
# Copyright 2010 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###########################################################################
import sys
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
#from scipy.interpolate import griddata
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
### Globals
VERBOSE = False
SEED = 0
def main():
do_compute = False
double_buffer = False
use_opts = False
xs = []
ys = []
zs = []
min_cnt = 1
max_cnt = 64
xticks = [1,2,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60,64]
min_sz = 32*4
max_sz = 2*32*16*4
yticks = [32*4] + range(2*32*4*1, 2*32*16*4+1, 2*32*4*1)
#xticks = range(1,5)
#yticks = [32*16*1, 32*16*2, 32*16*3, 32*16*4]
import pickle
f = open('data.pkl', 'r')
data = pickle.load(f)
xs = data[0]
ys = data[1]
zs = data[2]
fig = plt.figure()
print xs, ys, zs
ax = fig.add_subplot(111)
xi = np.linspace(min_sz, max_sz, 1028)
yi = np.linspace(min_cnt,max_cnt,1028)
zi = griddata(np.array(xs),np.array(ys),np.array(zs),xi,yi)
#zi = griddata((x, y), z, (xi[None,:], yi[:,None]), method='cubic')
xi, yi = np.meshgrid(xi, yi)
CS = plt.contourf(xi,yi,zi,[0,25,50,75,100,125,150],cmap=plt.cm.bone,extend='min')
CS = plt.contourf(xi,yi,zi,[0,25,50,75,100,125,150],cmap=plt.cm.bone,extend='min')
#CS = plt.contourf(xi,yi,zi,cmap=plt.cm.bone,extend='min')
#CS = plt.contourf(xi,yi,zi,cmap=plt.cm.bone,extend='min')
#plt.clim(vmin=-1)
plt.colorbar()
plt.scatter(xs,ys,marker='o',c='w',s=5,zorder=10, edgecolors=None)
lbl_sz = 12
ax.set_xlabel('Size of each element (bytes)', size=lbl_sz)
ax.set_ylabel('Number of elements', size=lbl_sz)
from matplotlib.ticker import MultipleLocator
xMajorLocator = MultipleLocator(512)
ax.xaxis.set_major_locator(xMajorLocator)
yMajorLocator = MultipleLocator(8)
ax.yaxis.set_major_locator(yMajorLocator)
plt.show()
if __name__ == "__main__":
main()
|
apache-2.0
|
2prime/DeepLab
|
RegressionTest/2dtest.py
|
1
|
1195
|
import torch
import torchvision
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision import transforms
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from random import random
from matplotlib import pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from test2D import f,n,TestNet,test_func,block
testnet = torch.load('2dmodel_.pkl')
testnet.eval()
def map_func(func):
return lambda x:list(map(lambda x:func([[x[0],x[1]]]),x))
n1=32
data_test = [[(i/n,j/n) for j in range(n1)]for i in range(n1)]
data_plot = list(map(lambda x:list(map(lambda y:max(testnet.forward(Variable(torch.Tensor([[y[0],y[1]]]))).data.numpy()[0]),x)),data_test))
fig = plt.figure()
ax = Axes3D(fig)
X = np.arange(0, 1, 1/n1)
Y = np.arange(0, 1, 1/n1)
X, Y = np.meshgrid(X, Y)
# 具体函数方法可用 help(function) 查看,如:help(ax.plot_surface)
ax.plot_surface(X, Y, data_plot, rstride=1, cstride=1, cmap='rainbow')
for i in range(n):
ax.scatter(f(i/n),i/n,test_func(i/n))
ax.scatter( f(i / n),i / n, alpha=0.4,marker='+')
plt.show()
|
mit
|
ainafp/nilearn
|
plot_haxby_mass_univariate.py
|
1
|
6640
|
"""
Massively univariate analysis of face vs house recognition
==========================================================
A permuted Ordinary Least Squares algorithm is run at each voxel in
order to detemine whether or not it behaves differently under a "face
viewing" condition and a "house viewing" condition.
We consider the mean image per session and per condition.
Otherwise, the observations cannot be exchanged at random because
a time dependance exists between observations within a same session
(see [1] for more detailed explanations).
The example shows the small differences that exist between
Bonferroni-corrected p-values and family-wise corrected p-values obtained
from a permutation test combined with a max-type procedure [2].
Bonferroni correction is a bit conservative, as revealed by the presence of
a few false negative.
References
----------
[1] Winkler, A. M. et al. (2014).
Permutation inference for the general linear model. Neuroimage.
[2] Anderson, M. J. & Robinson, J. (2001).
Permutation tests for linear models.
Australian & New Zealand Journal of Statistics, 43(1), 75-88.
(http://avesbiodiv.mncn.csic.es/estadistica/permut2.pdf)
"""
# Author: Virgile Fritsch, <[email protected]>, Feb. 2014
import numpy as np
import nibabel
from nilearn import datasets
from nilearn.input_data import NiftiMasker
from nilearn.mass_univariate import permuted_ols
### Load Haxby dataset ########################################################
dataset_files = datasets.fetch_haxby_simple()
### Mask data #################################################################
mask_img = nibabel.load(dataset_files.mask)
nifti_masker = NiftiMasker(
mask=dataset_files.mask,
memory='nilearn_cache', memory_level=1) # cache options
fmri_masked = nifti_masker.fit_transform(dataset_files.func)
### Restrict to faces and houses ##############################################
conditions_encoded, sessions = np.loadtxt(
dataset_files.session_target).astype("int").T
conditions = np.recfromtxt(dataset_files.conditions_target)['f0']
condition_mask = np.logical_or(conditions == 'face', conditions == 'house')
conditions_encoded = conditions_encoded[condition_mask]
fmri_masked = fmri_masked[condition_mask]
# We consider the mean image per session and per condition.
# Otherwise, the observations cannot be exchanged at random because
# a time dependance exists between observations within a same session.
n_sessions = np.unique(sessions).size
grouped_fmri_masked = np.empty((2 * n_sessions, # two conditions per session
fmri_masked.shape[1]))
grouped_conditions_encoded = np.empty((2 * n_sessions, 1))
for s in range(n_sessions):
session_mask = sessions[condition_mask] == s
session_house_mask = np.logical_and(session_mask,
conditions[condition_mask] == 'house')
session_face_mask = np.logical_and(session_mask,
conditions[condition_mask] == 'face')
grouped_fmri_masked[2 * s] = fmri_masked[session_house_mask].mean(0)
grouped_fmri_masked[2 * s + 1] = fmri_masked[session_face_mask].mean(0)
grouped_conditions_encoded[2 * s] = conditions_encoded[
session_house_mask][0]
grouped_conditions_encoded[2 * s + 1] = conditions_encoded[
session_face_mask][0]
### Perform massively univariate analysis with permuted OLS ###################
# We use a two-sided t-test to compute p-values, but we keep trace of the
# effect sign to add it back at the end and thus observe the signed effect
neg_log_pvals, t_scores_original_data, _ = permuted_ols(
grouped_conditions_encoded, grouped_fmri_masked,
# + intercept as a covariate by default
n_perm=10000, two_sided_test=True,
n_jobs=1) # can be changed to use more CPUs
signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)
signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(
signed_neg_log_pvals).get_data()
### scikit-learn F-scores for comparison ######################################
# F-test does not allow to observe the effect sign (pure two-sided test)
from nilearn._utils.fixes import f_regression
_, pvals_bonferroni = f_regression(
grouped_fmri_masked,
grouped_conditions_encoded) # f_regression implicitly adds intercept
pvals_bonferroni *= fmri_masked.shape[1]
pvals_bonferroni[np.isnan(pvals_bonferroni)] = 1
pvals_bonferroni[pvals_bonferroni > 1] = 1
neg_log_pvals_bonferroni = -np.log10(pvals_bonferroni)
neg_log_pvals_bonferroni_unmasked = nifti_masker.inverse_transform(
neg_log_pvals_bonferroni).get_data()
### Visualization #############################################################
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
# Use the fmri mean image as a surrogate of anatomical data
from nilearn import image
mean_fmri = image.mean_img(dataset_files.func).get_data()
# Various plotting parameters
picked_slice = 27 # plotted slice
vmin = -np.log10(0.1) # 10% corrected
vmax = min(np.amax(neg_log_pvals), np.amax(neg_log_pvals_bonferroni))
grid = ImageGrid(plt.figure(), 111, nrows_ncols=(1, 2), direction="row",
axes_pad=0.05, add_all=True, label_mode="1",
share_all=True, cbar_location="right", cbar_mode="single",
cbar_size="7%", cbar_pad="1%")
# Plot thresholded p-values map corresponding to F-scores
ax = grid[0]
p_ma = np.ma.masked_less(neg_log_pvals_bonferroni_unmasked, vmin)
ax.imshow(np.rot90(mean_fmri[..., picked_slice]), interpolation='nearest',
cmap=plt.cm.gray)
ax.imshow(np.rot90(p_ma[..., picked_slice]), interpolation='nearest',
cmap=plt.cm.RdBu_r, vmin=-vmax, vmax=vmax)
ax.set_title(r'Negative $\log_{10}$ p-values'
'\n(Parametric two-sided F-test'
'\n+ Bonferroni correction)'
'\n%d detections' % (~p_ma.mask[..., picked_slice]).sum())
ax.axis('off')
# Plot permutation p-values map
ax = grid[1]
p_ma = np.ma.masked_inside(signed_neg_log_pvals_unmasked, -vmin, vmin)[..., 0]
ax.imshow(np.rot90(mean_fmri[..., picked_slice]), interpolation='nearest',
cmap=plt.cm.gray)
im = ax.imshow(np.rot90(p_ma[..., picked_slice]), interpolation='nearest',
cmap=plt.cm.RdBu_r, vmin=-vmax, vmax=vmax)
ax.set_title(r'Negative $\log_{10}$ p-values'
'\n(Non-parametric two-sided test'
'\n+ max-type correction)'
'\n%d detections' % (~p_ma.mask[..., picked_slice]).sum())
ax.axis('off')
# plot colorbar
colorbar = grid[1].cax.colorbar(im)
plt.subplots_adjust(0., 0.03, 1., 0.83)
plt.show()
|
bsd-3-clause
|
lenovor/BDA_py_demos
|
demos_ch10/demo10_2.py
|
19
|
1606
|
"""Bayesian data analysis
Chapter 10, demo 2
Importance sampling example
"""
from __future__ import division
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2, markeredgewidth=0)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
plt.rc('patch', facecolor='#bfe2ff')
# fake interesting distribution
x = np.linspace(-3, 3, 200)
r = np.array([ 1.1 , 1.3 , -0.1 , -0.7 , 0.2 , -0.4 , 0.06, -1.7 ,
1.7 , 0.3 , 0.7 , 1.6 , -2.06, -0.74, 0.2 , 0.5 ])
# Estimate the density (named q, to emphesize that it does not need to be
# normalized). Parameter bw_method=0.48 is used to mimic the outcome of the
# kernelp function in Matlab.
q_func = stats.gaussian_kde(r, bw_method=0.48)
q = q_func.evaluate(x)
# importance sampling example
g = stats.norm.pdf(x)
w = q/g
r = np.random.randn(100)
r = r[np.abs(r) < 3] # remove samples out of the grid
wr = q_func.evaluate(r)/stats.norm.pdf(r)
# plot
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(10,8))
axes[0].plot(x, q, label=r'$q(\theta|y)$')
axes[0].plot(x, g, label=r'$g(\theta)$')
axes[0].set_yticks(())
axes[0].set_title('target and proposal distributions')
axes[0].legend()
axes[1].plot(x, w, label=r'$q(\theta|y)/g(\theta)$')
axes[1].set_title('samples and importance weights')
axes[1].vlines(r, 0, wr, color='#377eb8', alpha=0.4)
axes[1].set_ylim((0,axes[1].get_ylim()[1]))
axes[1].legend()
plt.show()
|
gpl-3.0
|
alexvmarch/atomic
|
exatomic/widgets/traits.py
|
2
|
5116
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Universe trait functions
#########################
"""
##########
# traits #
##########
import re
import numpy as np
import pandas as pd
from exatomic.base import sym2radius, sym2color
def atom_traits(df, atomcolors=None, atomradii=None, atomlabels=None):
"""
Get atom table traits. Atomic size (using the covalent radius) and atom
colors (using the common `Jmol`_ color scheme) are packed as dicts and
obtained from the static data in exa.
.. _Jmol: http://jmol.sourceforge.net/jscolors/
"""
atomlabels = pd.Series() if atomlabels is None else pd.Series(atomlabels)
atomcolors = pd.Series() if atomcolors is None else pd.Series(atomcolors)
atomradii = pd.Series() if atomradii is None else pd.Series(atomradii)
traits = {}
cols = ['x', 'y', 'z']
grps = df.groupby('frame')
for col in cols:
ncol = 'atom_' + col
traits[ncol] = grps.apply(
lambda y: y[col].to_json(
orient='values', double_precision=3)
).to_json(orient="values").replace('"', '')
syms = grps.apply(lambda g: g['symbol'].cat.codes.values)
symmap = {i: v for i, v in enumerate(df['symbol'].cat.categories)
if v in df.unique_atoms}
unq = df['symbol'].astype(str).unique()
# radii = {k: sym2radius[k][1] for k in unq}
cov_radii = {k: sym2radius[k][0] for k in unq}
van_radii = {k: sym2radius[k][1] for k in unq}
colors = {k: sym2color[k] for k in unq}
labels = symmap
colors.update(atomcolors)
# radii.update(atomradii)
cov_radii.update(atomradii)
van_radii.update(atomradii)
labels.update(atomlabels)
traits['atom_s'] = syms.to_json(orient='values')
traits['atom_cr'] = {i: cov_radii[v] for i, v in symmap.items()}
traits['atom_vr'] = {i: van_radii[v] for i, v in symmap.items()}
traits['atom_c'] = {i: colors[v] for i, v in symmap.items()}
traits['atom_l'] = labels
return traits
def field_traits(df):
"""Get field table traits."""
df['frame'] = df['frame'].astype(int)
df['nx'] = df['nx'].astype(int)
df['ny'] = df['ny'].astype(int)
df['nz'] = df['nz'].astype(int)
grps = df.groupby('frame')
fps = grps.apply(lambda x: x[['ox', 'oy', 'oz',
'nx', 'ny', 'nz',
'fx', 'fy', 'fz']].T.to_dict()).to_dict()
try: idxs = list(map(list, grps.groups.values()))
except: idxs = [list(grp.index) for i, grp in grps]
return {'field_v': [f.to_json(orient='values',
double_precision=5) for f in df.field_values],
'field_i': idxs,
'field_p': fps}
#def two_traits(df, lbls):
def two_traits(uni):
"""Get two table traitlets."""
if not hasattr(uni, "atom_two"):
raise AttributeError("for the catcher")
if "frame" not in uni.atom_two.columns:
uni.atom_two['frame'] = uni.atom_two['atom0'].map(uni.atom['frame'])
lbls = uni.atom.get_atom_labels()
df = uni.atom_two
bonded = df.loc[df['bond'] == True, ['atom0', 'atom1', 'frame']]
lbl0 = bonded['atom0'].map(lbls)
lbl1 = bonded['atom1'].map(lbls)
lbl = pd.concat((lbl0, lbl1), axis=1)
lbl['frame'] = bonded['frame']
bond_grps = lbl.groupby('frame')
frames = df['frame'].unique().astype(np.int64)
b0 = np.empty((len(frames), ), dtype='O')
b1 = b0.copy()
for i, frame in enumerate(frames):
try:
b0[i] = bond_grps.get_group(frame)['atom0'].astype(np.int64).values
b1[i] = bond_grps.get_group(frame)['atom1'].astype(np.int64).values
except Exception:
b0[i] = []
b1[i] = []
b0 = pd.Series(b0).to_json(orient='values')
b1 = pd.Series(b1).to_json(orient='values')
del uni.atom_two['frame']
return {'two_b0': b0, 'two_b1': b1}
def frame_traits(uni):
"""Get frame table traits."""
# ASSUME SIMPLE CUBIC CELL this is a hack for now.
if 'xi' in uni.frame.columns:
return {'frame__a': uni.frame['xi'].max()}
return {}
def tensor_traits(uni):
grps = uni.tensor.groupby('frame')
try: idxs = list(map(list, grps.groups.values()))
except: idxs = [list(grp.index) for i, grp in grps]
return {'tensor_d': grps.apply(lambda x: x.T.to_dict()).to_dict(), 'tensor_i': idxs}
def uni_traits(uni, atomcolors=None, atomradii=None, atomlabels=None):
"""Get Universe traits."""
unargs = {}
fields, tensors = [], None
if hasattr(uni, 'frame'):
unargs.update(frame_traits(uni))
if hasattr(uni, 'atom'):
unargs.update(atom_traits(uni.atom, atomcolors, atomradii, atomlabels))
if hasattr(uni, 'atom_two'):
unargs.update(two_traits(uni))
if hasattr(uni, 'field'):
unargs.update(field_traits(uni.field))
fields = ['null'] + unargs['field_i'][0]
if hasattr(uni, 'tensor'):
unargs.update(tensor_traits(uni))
tensors = unargs['tensor_i'][0]
return unargs, fields, tensors
|
apache-2.0
|
larray-project/larray-editor
|
larray_editor/arraywidget.py
|
1
|
47244
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2012 Pierre Raybaut
# Copyright © 2015-2016 Gaëtan de Menten
# Licensed under the terms of the MIT License
# based on
# github.com/spyder-ide/spyder/blob/master/spyderlib/widgets/arrayeditor.py
"""
Array Editor Dialog based on Qt
"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Note that the canonical way to implement filters in a TableView would
# be to use a QSortFilterProxyModel. In this case, we would need to reimplement
# its filterAcceptsColumn and filterAcceptsRow methods. The problem is that
# it does seem to be really designed for very large arrays and it would
# probably be too slow on those (I have read quite a few people complaining
# about speed issues with those) possibly because it suppose you have the whole
# array in your model. It would also probably not play well with the
# partial/progressive load we have currently implemented.
# TODO:
# * drag & drop to reorder axes
# http://zetcode.com/gui/pyqt4/dragdrop/
# http://stackoverflow.com/questions/10264040/
# how-to-drag-and-drop-into-a-qtablewidget-pyqt
# http://stackoverflow.com/questions/3458542/multiple-drag-and-drop-in-pyqt4
# http://ux.stackexchange.com/questions/34158/
# how-to-make-it-obvious-that-you-can-drag-things-that-you-normally-cant
# * keep header columns & rows visible ("frozen")
# http://doc.qt.io/qt-5/qtwidgets-itemviews-frozencolumn-example.html
# * document default icons situation (limitations)
# * document paint speed experiments
# * filter on headers. In fact this is not a good idea, because that prevents
# selecting whole columns, which is handy. So a separate row for headers,
# like in Excel seems better.
# * tooltip on header with current filter
# * selection change -> select headers too
# * nicer error on plot with more than one row/column
# OR
# * plotting a subset should probably (to think) go via LArray/pandas objects
# so that I have the headers info in the plots (and do not have to deal with
# them manually)
# > ideally, I would like to keep this generic (not LArray-specific)
# ? automatic change digits on resize column
# => different format per column, which is problematic UI-wise
# * keyboard shortcut for filter each dim
# * tab in a filter combo, brings up next filter combo
# * view/edit DataFrames too
# * view/edit LArray over Pandas (ie sparse)
# * resubmit editor back for inclusion in Spyder
# ? custom delegates for each type (spinner for int, checkbox for bool, ...)
# ? "light" headers (do not repeat the same header several times (on the screen)
# it would be nicer but I am not sure it is a good idea because with many
# dimensions, you can no longer see the current label for the first
# dimension(s) if you scroll down a bit. This is solvable if, instead
# of only the first line ever corresponding to the label displaying it,
# I could make it so that it is the first line displayable on the screen
# which gets it. It would be a bit less nice because of strange artifacts
# when scrolling, but would be more useful. The beauty problem could be
# solved later too via fading or something like that, but probably not
# worth it for a while.
import math
import numpy as np
from qtpy.QtCore import Qt, QPoint, QItemSelection, QItemSelectionModel, Signal, QSize
from qtpy.QtGui import QDoubleValidator, QIntValidator, QKeySequence, QFontMetrics, QCursor, QPixmap, QPainter, QIcon
from qtpy.QtWidgets import (QApplication, QTableView, QItemDelegate, QLineEdit, QCheckBox,
QMessageBox, QMenu, QLabel, QSpinBox, QWidget, QToolTip, QShortcut, QScrollBar,
QHBoxLayout, QVBoxLayout, QGridLayout, QSizePolicy, QFrame, QComboBox)
from larray_editor.utils import (keybinding, create_action, clear_layout, get_font, is_number, is_float, _, ima, LinearGradient)
from larray_editor.arrayadapter import get_adapter
from larray_editor.arraymodel import LabelsArrayModel, AxesArrayModel, DataArrayModel
from larray_editor.combo import FilterComboBox
# XXX: define Enum instead ?
TOP, BOTTOM = 0, 1
LEFT, RIGHT = 0, 1
class AbstractView(QTableView):
"""Abstract view class"""
def __init__(self, parent, model, hpos, vpos):
QTableView.__init__(self, parent)
# set model
self.setModel(model)
# set position
if not (hpos == LEFT or hpos == RIGHT):
raise TypeError("Value of hpos must be {} or {}".format(LEFT, RIGHT))
self.hpos = hpos
if not (vpos == TOP or vpos == BOTTOM):
raise TypeError("Value of vpos must be {} or {}".format(TOP, BOTTOM))
self.vpos = vpos
# set selection mode
self.setSelectionMode(QTableView.ContiguousSelection)
# prepare headers + cells size
self.horizontalHeader().setFrameStyle(QFrame.NoFrame)
self.verticalHeader().setFrameStyle(QFrame.NoFrame)
self.set_default_size()
# hide horizontal/vertical headers
if hpos == RIGHT:
self.verticalHeader().hide()
if vpos == BOTTOM:
self.horizontalHeader().hide()
# to fetch more rows/columns when required
self.horizontalScrollBar().valueChanged.connect(self.on_horizontal_scroll_changed)
self.verticalScrollBar().valueChanged.connect(self.on_vertical_scroll_changed)
# Hide scrollbars
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
# update geometry
if not (hpos == RIGHT and vpos == BOTTOM):
self.model().modelReset.connect(self.updateGeometry)
self.horizontalHeader().sectionResized.connect(self.updateGeometry)
self.verticalHeader().sectionResized.connect(self.updateGeometry)
def set_default_size(self):
# make the grid a bit more compact
self.horizontalHeader().setDefaultSectionSize(64)
self.verticalHeader().setDefaultSectionSize(20)
if self.vpos == TOP:
self.horizontalHeader().setFixedHeight(10)
if self.hpos == LEFT:
self.verticalHeader().setFixedWidth(10)
def on_vertical_scroll_changed(self, value):
if value == self.verticalScrollBar().maximum():
self.model().fetch_more_rows()
def on_horizontal_scroll_changed(self, value):
if value == self.horizontalScrollBar().maximum():
self.model().fetch_more_columns()
def updateSectionHeight(self, logicalIndex, oldSize, newSize):
self.setRowHeight(logicalIndex, newSize)
def updateSectionWidth(self, logicalIndex, oldSize, newSize):
self.setColumnWidth(logicalIndex, newSize)
def autofit_columns(self):
"""Resize cells to contents"""
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
# Spyder loads more columns before resizing, but since it does not
# load all columns anyway, I do not see the point
# self.model().fetch_more_columns()
self.resizeColumnsToContents()
QApplication.restoreOverrideCursor()
def updateGeometry(self):
# Set maximum height
if self.vpos == TOP:
maximum_height = self.horizontalHeader().height() + \
sum(self.rowHeight(r) for r in range(self.model().rowCount()))
self.setFixedHeight(maximum_height)
# Set maximum width
if self.hpos == LEFT:
maximum_width = self.verticalHeader().width() + \
sum(self.columnWidth(c) for c in range(self.model().columnCount()))
self.setFixedWidth(maximum_width)
# update geometry
super(AbstractView, self).updateGeometry()
class AxesView(AbstractView):
""""Axes view class"""
allSelected = Signal()
def __init__(self, parent, model):
# check model
if not isinstance(model, AxesArrayModel):
raise TypeError("Expected model of type {}. Received {} instead"
.format(AxesArrayModel.__name__, type(model).__name__))
AbstractView.__init__(self, parent, model, LEFT, TOP)
def selectAll(self):
self.allSelected.emit()
class LabelsView(AbstractView):
""""Labels view class"""
def __init__(self, parent, model, hpos, vpos):
# check model
if not isinstance(model, LabelsArrayModel):
raise TypeError("Expected model of type {}. Received {} instead"
.format(LabelsArrayModel.__name__, type(model).__name__))
AbstractView.__init__(self, parent, model, hpos, vpos)
class ArrayDelegate(QItemDelegate):
"""Array Editor Item Delegate"""
def __init__(self, dtype, parent=None, font=None, minvalue=None, maxvalue=None):
QItemDelegate.__init__(self, parent)
self.dtype = dtype
if font is None:
font = get_font('arrayeditor')
self.font = font
self.minvalue = minvalue
self.maxvalue = maxvalue
# We must keep a count instead of the "current" one, because when
# switching from one cell to the next, the new editor is created
# before the old one is destroyed, which means it would be set to None
# when the old one is destroyed.
self.editor_count = 0
def createEditor(self, parent, option, index):
"""Create editor widget"""
model = index.model()
# TODO: dtype should be taken from the model instead (or even from the actual value?)
value = model.get_value(index)
if self.dtype.name == "bool":
# toggle value
value = not value
model.setData(index, value)
return
elif value is not np.ma.masked:
minvalue, maxvalue = self.minvalue, self.maxvalue
if minvalue is not None and maxvalue is not None:
msg = "value must be between %s and %s" % (minvalue, maxvalue)
elif minvalue is not None:
msg = "value must be >= %s" % minvalue
elif maxvalue is not None:
msg = "value must be <= %s" % maxvalue
else:
msg = None
# Not using a QSpinBox for integer inputs because I could not find
# a way to prevent the spinbox/editor from closing if the value is
# invalid. Using the builtin minimum/maximum of the spinbox works
# but that provides no message so it is less clear.
editor = QLineEdit(parent)
if is_number(self.dtype):
validator = QDoubleValidator(editor) if is_float(self.dtype) \
else QIntValidator(editor)
if minvalue is not None:
validator.setBottom(minvalue)
if maxvalue is not None:
validator.setTop(maxvalue)
editor.setValidator(validator)
def on_editor_text_edited():
if not editor.hasAcceptableInput():
QToolTip.showText(editor.mapToGlobal(QPoint()), msg)
else:
QToolTip.hideText()
if msg is not None:
editor.textEdited.connect(on_editor_text_edited)
editor.setFont(self.font)
editor.setAlignment(Qt.AlignRight)
editor.destroyed.connect(self.on_editor_destroyed)
self.editor_count += 1
return editor
def on_editor_destroyed(self):
self.editor_count -= 1
assert self.editor_count >= 0
def setEditorData(self, editor, index):
"""Set editor widget's data"""
text = index.model().data(index, Qt.DisplayRole)
editor.setText(text)
class DataView(AbstractView):
"""Data array view class"""
signal_copy = Signal()
signal_excel = Signal()
signal_paste = Signal()
signal_plot = Signal()
def __init__(self, parent, model):
# check model
if not isinstance(model, DataArrayModel):
raise TypeError("Expected model of type {}. Received {} instead"
.format(DataArrayModel.__name__, type(model).__name__))
AbstractView.__init__(self, parent, model, RIGHT, BOTTOM)
self.context_menu = self.setup_context_menu()
# TODO: find a cleaner way to do this
# For some reason the shortcuts in the context menu are not available if the widget does not have the focus,
# EVEN when using action.setShortcutContext(Qt.ApplicationShortcut) (or Qt.WindowShortcut) so we redefine them
# here. I was also unable to get the function an action.triggered is connected to, so I couldn't do this via
# a loop on self.context_menu.actions.
shortcuts = [
(keybinding('Copy'), self.parent().copy),
(QKeySequence("Ctrl+E"), self.parent().to_excel),
(keybinding('Paste'), self.parent().paste),
(keybinding('Print'), self.parent().plot)
]
for key_seq, target in shortcuts:
shortcut = QShortcut(key_seq, self)
shortcut.activated.connect(target)
def set_dtype(self, dtype):
model = self.model()
delegate = ArrayDelegate(dtype, self, minvalue=model.minvalue, maxvalue=model.maxvalue)
self.setItemDelegate(delegate)
def selectNewRow(self, row_index):
# if not MultiSelection mode activated, selectRow will unselect previously
# selected rows (unless SHIFT or CTRL key is pressed)
# this produces a selection with multiple QItemSelectionRange. We could merge them here, but it is
# easier to handle in _selection_bounds
self.setSelectionMode(QTableView.MultiSelection)
self.selectRow(row_index)
self.setSelectionMode(QTableView.ContiguousSelection)
def selectNewColumn(self, column_index):
# if not MultiSelection mode activated, selectColumn will unselect previously
# selected columns (unless SHIFT or CTRL key is pressed)
# this produces a selection with multiple QItemSelectionRange. We could merge them here, but it is
# easier to handle in _selection_bounds
self.setSelectionMode(QTableView.MultiSelection)
self.selectColumn(column_index)
self.setSelectionMode(QTableView.ContiguousSelection)
def setup_context_menu(self):
"""Setup context menu"""
self.copy_action = create_action(self, _('Copy'),
shortcut=keybinding('Copy'),
icon=ima.icon('edit-copy'),
triggered=lambda: self.signal_copy.emit())
self.excel_action = create_action(self, _('Copy to Excel'),
shortcut="Ctrl+E",
# icon=ima.icon('edit-copy'),
triggered=lambda: self.signal_excel.emit())
self.paste_action = create_action(self, _('Paste'),
shortcut=keybinding('Paste'),
icon=ima.icon('edit-paste'),
triggered=lambda: self.signal_paste.emit())
self.plot_action = create_action(self, _('Plot'),
shortcut=keybinding('Print'),
# icon=ima.icon('editcopy'),
triggered=lambda: self.signal_plot.emit())
menu = QMenu(self)
menu.addActions([self.copy_action, self.excel_action, self.plot_action, self.paste_action])
return menu
def contextMenuEvent(self, event):
"""Reimplement Qt method"""
self.context_menu.popup(event.globalPos())
event.accept()
def keyPressEvent(self, event):
"""Reimplement Qt method"""
# comparing with the keysequence and not with event directly as we
# did before because that only seems to work for shortcut
# defined using QKeySequence.StandardKey, which is not the case for
# Ctrl + E
keyseq = QKeySequence(event.modifiers() | event.key())
if keyseq == QKeySequence.Copy:
self.copy()
elif keyseq == QKeySequence.Paste:
self.paste()
elif keyseq == QKeySequence.Print:
self.parent().plot()
elif keyseq == "Ctrl+E":
self.to_excel()
# allow to start editing cells by pressing Enter
elif event.key() == Qt.Key_Return and not self.model().readonly:
index = self.currentIndex()
if self.itemDelegate(index).editor_count == 0:
self.edit(index)
else:
QTableView.keyPressEvent(self, event)
def _selection_bounds(self, none_selects_all=True):
"""
Parameters
----------
none_selects_all : bool, optional
If True (default) and selection is empty, returns all data.
Returns
-------
tuple
selection bounds. end bound is exclusive
"""
model = self.model()
selection_model = self.selectionModel()
assert isinstance(selection_model, QItemSelectionModel)
selection = selection_model.selection()
assert isinstance(selection, QItemSelection)
if not selection:
if none_selects_all:
return 0, model.total_rows, 0, model.total_cols
else:
return None
# merge potentially multiple selections into one big rect
row_min = min(srange.top() for srange in selection)
row_max = max(srange.bottom() for srange in selection)
col_min = min(srange.left() for srange in selection)
col_max = max(srange.right() for srange in selection)
# if not all rows/columns have been loaded
if row_min == 0 and row_max == self.model().rows_loaded - 1:
row_max = self.model().total_rows - 1
if col_min == 0 and col_max == self.model().cols_loaded - 1:
col_max = self.model().total_cols - 1
return row_min, row_max + 1, col_min, col_max + 1
def ndigits(value):
"""
number of integer digits
>>> ndigits(1)
1
>>> ndigits(99)
2
>>> ndigits(-99.1)
3
"""
negative = value < 0
value = abs(value)
log10 = math.log10(value) if value > 0 else 0
if log10 == np.inf:
int_digits = 308
else:
# max(1, ...) because there is at least one integer digit.
# explicit conversion to int for Python2.x
int_digits = max(1, int(math.floor(log10)) + 1)
# one digit for sign if negative
return int_digits + negative
class ScrollBar(QScrollBar):
"""
A specialised scrollbar.
"""
def __init__(self, parent, data_scrollbar):
super(ScrollBar, self).__init__(data_scrollbar.orientation(), parent)
self.setMinimum(data_scrollbar.minimum())
self.setMaximum(data_scrollbar.maximum())
self.setSingleStep(data_scrollbar.singleStep())
self.setPageStep(data_scrollbar.pageStep())
data_scrollbar.valueChanged.connect(self.setValue)
self.valueChanged.connect(data_scrollbar.setValue)
data_scrollbar.rangeChanged.connect(self.setRange)
self.rangeChanged.connect(data_scrollbar.setRange)
available_gradients = [
('white', None),
# Hue, Saturation, Value, Alpha-channel
('red-blue', LinearGradient([(0, [0.99, 0.7, 1.0, 0.6]), (1, [0.66, 0.7, 1.0, 0.6])])),
('blue-red', LinearGradient([(0, [0.66, 0.7, 1.0, 0.6]), (1, [0.99, 0.7, 1.0, 0.6])])),
('red-white-blue', LinearGradient([(0, [.99, .85, 1., .6]),
(0.5 - 1e-16, [.99, .15, 1., .6]),
(0.5, [1., 0., 1., 1.]),
(0.5 + 1e-16, [.66, .15, 1., .6]),
(1, [.66, .85, 1., .6])])),
('blue-white-red', LinearGradient([(0, [.66, .85, 1., .6]),
(0.5 - 1e-16, [.66, .15, 1., .6]),
(0.5, [1., 0., 1., 1.]),
(0.5 + 1e-16, [.99, .15, 1., .6]),
(1, [.99, .85, 1., .6])])),
]
gradient_map = dict(available_gradients)
class ArrayEditorWidget(QWidget):
dataChanged = Signal(list)
def __init__(self, parent, data=None, readonly=False, bg_value=None, bg_gradient='blue-red',
minvalue=None, maxvalue=None, digits=None):
QWidget.__init__(self, parent)
assert bg_gradient in gradient_map
if data is not None and np.isscalar(data):
readonly = True
self.readonly = readonly
# prepare internal views and models
self.model_axes = AxesArrayModel(parent=self, readonly=readonly)
self.view_axes = AxesView(parent=self, model=self.model_axes)
self.model_hlabels = LabelsArrayModel(parent=self, readonly=readonly)
self.view_hlabels = LabelsView(parent=self, model=self.model_hlabels, hpos=RIGHT, vpos=TOP)
self.model_vlabels = LabelsArrayModel(parent=self, readonly=readonly)
self.view_vlabels = LabelsView(parent=self, model=self.model_vlabels, hpos=LEFT, vpos=BOTTOM)
self.model_data = DataArrayModel(parent=self, readonly=readonly, minvalue=minvalue, maxvalue=maxvalue)
self.view_data = DataView(parent=self, model=self.model_data)
# in case data is None
self.data_adapter = None
# Create vertical and horizontal scrollbars
self.vscrollbar = ScrollBar(self, self.view_data.verticalScrollBar())
self.hscrollbar = ScrollBar(self, self.view_data.horizontalScrollBar())
# Synchronize resizing
self.view_axes.horizontalHeader().sectionResized.connect(self.view_vlabels.updateSectionWidth)
self.view_axes.verticalHeader().sectionResized.connect(self.view_hlabels.updateSectionHeight)
self.view_hlabels.horizontalHeader().sectionResized.connect(self.view_data.updateSectionWidth)
self.view_vlabels.verticalHeader().sectionResized.connect(self.view_data.updateSectionHeight)
# Synchronize auto-resizing
self.view_axes.horizontalHeader().sectionHandleDoubleClicked.connect(self.resize_axes_column_to_contents)
self.view_hlabels.horizontalHeader().sectionHandleDoubleClicked.connect(self.resize_hlabels_column_to_contents)
self.view_axes.verticalHeader().sectionHandleDoubleClicked.connect(self.resize_axes_row_to_contents)
self.view_vlabels.verticalHeader().sectionHandleDoubleClicked.connect(self.resize_vlabels_row_to_contents)
# synchronize specific methods
self.view_axes.allSelected.connect(self.view_data.selectAll)
self.view_data.signal_copy.connect(self.copy)
self.view_data.signal_excel.connect(self.to_excel)
self.view_data.signal_paste.connect(self.paste)
self.view_data.signal_plot.connect(self.plot)
# propagate changes (add new items in the QUndoStack attribute of MappingEditor)
self.model_data.newChanges.connect(self.data_changed)
# Synchronize scrolling
# data <--> hlabels
self.view_data.horizontalScrollBar().valueChanged.connect(self.view_hlabels.horizontalScrollBar().setValue)
self.view_hlabels.horizontalScrollBar().valueChanged.connect(self.view_data.horizontalScrollBar().setValue)
# data <--> vlabels
self.view_data.verticalScrollBar().valueChanged.connect(self.view_vlabels.verticalScrollBar().setValue)
self.view_vlabels.verticalScrollBar().valueChanged.connect(self.view_data.verticalScrollBar().setValue)
# Synchronize selecting columns(rows) via hor.(vert.) header of x(y)labels view
self.view_hlabels.horizontalHeader().sectionPressed.connect(self.view_data.selectColumn)
self.view_hlabels.horizontalHeader().sectionEntered.connect(self.view_data.selectNewColumn)
self.view_vlabels.verticalHeader().sectionPressed.connect(self.view_data.selectRow)
self.view_vlabels.verticalHeader().sectionEntered.connect(self.view_data.selectNewRow)
# following lines are required to keep usual selection color
# when selecting rows/columns via headers of label views.
# Otherwise, selected rows/columns appear in grey.
self.view_data.setStyleSheet("""QTableView {
selection-background-color: palette(highlight);
selection-color: white;
}""")
# set external borders
array_frame = QFrame(self)
array_frame.setFrameStyle(QFrame.StyledPanel)
# remove borders of internal tables
self.view_axes.setFrameStyle(QFrame.NoFrame)
self.view_hlabels.setFrameStyle(QFrame.NoFrame)
self.view_vlabels.setFrameStyle(QFrame.NoFrame)
self.view_data.setFrameStyle(QFrame.NoFrame)
# Set layout of table views:
# [ axes ][hlabels]|V|
# [vlabels][ data ]|s|
# | H. scrollbar |
array_layout = QGridLayout()
array_layout.addWidget(self.view_axes, 0, 0)
array_layout.addWidget(self.view_hlabels, 0, 1)
array_layout.addWidget(self.view_vlabels, 1, 0)
self.view_data.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
array_layout.addWidget(self.view_data, 1, 1)
array_layout.addWidget(self.vscrollbar, 0, 2, 2, 1)
array_layout.addWidget(self.hscrollbar, 2, 0, 1, 2)
array_layout.setSpacing(0)
array_layout.setContentsMargins(0, 0, 0, 0)
array_frame.setLayout(array_layout)
# Set filters and buttons layout
self.filters_layout = QHBoxLayout()
self.btn_layout = QHBoxLayout()
self.btn_layout.setAlignment(Qt.AlignLeft)
label = QLabel("Digits")
self.btn_layout.addWidget(label)
spin = QSpinBox(self)
spin.valueChanged.connect(self.digits_changed)
self.digits_spinbox = spin
self.btn_layout.addWidget(spin)
self.digits = 0
scientific = QCheckBox(_('Scientific'))
scientific.stateChanged.connect(self.scientific_changed)
self.scientific_checkbox = scientific
self.btn_layout.addWidget(scientific)
self.use_scientific = False
gradient_chooser = QComboBox()
gradient_chooser.setMaximumSize(120, 20)
gradient_chooser.setIconSize(QSize(100, 20))
pixmap = QPixmap(100, 15)
pixmap.fill(Qt.white)
gradient_chooser.addItem(QIcon(pixmap), " ")
pixmap.fill(Qt.transparent)
painter = QPainter(pixmap)
for name, gradient in available_gradients[1:]:
qgradient = gradient.as_qgradient()
# * fill with white because gradient can be transparent and if we do not "start from whilte", it skews the
# colors.
# * 1 and 13 instead of 0 and 15 to have a transparent border around/between the gradients
painter.fillRect(0, 1, 100, 13, Qt.white)
painter.fillRect(0, 1, 100, 13, qgradient)
gradient_chooser.addItem(QIcon(pixmap), name, gradient)
# without this, we can crash python :)
del painter, pixmap
# select default gradient
# requires Qt5+
# gradient_chooser.setCurrentText(bg_gradient)
gradient_chooser.setCurrentIndex(gradient_chooser.findText(bg_gradient))
gradient_chooser.currentIndexChanged.connect(self.gradient_changed)
self.btn_layout.addWidget(gradient_chooser)
self.gradient_chooser = gradient_chooser
# Set widget layout
layout = QVBoxLayout()
layout.addLayout(self.filters_layout)
layout.addWidget(array_frame)
layout.addLayout(self.btn_layout)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
# set gradient
self.model_data.set_bg_gradient(gradient_map[bg_gradient])
# set data
if data is not None:
self.set_data(data, bg_value=bg_value, digits=digits)
# See http://doc.qt.io/qt-4.8/qt-draganddrop-fridgemagnets-dragwidget-cpp.html for an example
self.setAcceptDrops(True)
def gradient_changed(self, index):
gradient = self.gradient_chooser.itemData(index) if index > 0 else None
self.model_data.set_bg_gradient(gradient)
def data_changed(self, data_model_changes):
changes = self.data_adapter.translate_changes(data_model_changes)
self.dataChanged.emit(changes)
def mousePressEvent(self, event):
self.dragLabel = self.childAt(event.pos()) if event.button() == Qt.LeftButton else None
self.dragStartPosition = event.pos()
def mouseMoveEvent(self, event):
from qtpy.QtCore import QMimeData, QByteArray
from qtpy.QtGui import QPixmap, QDrag
if not (event.button() != Qt.LeftButton and isinstance(self.dragLabel, QLabel)):
return
if (event.pos() - self.dragStartPosition).manhattanLength() < QApplication.startDragDistance():
return
axis_index = self.filters_layout.indexOf(self.dragLabel) // 2
# prepare hotSpot, mimeData and pixmap objects
mimeData = QMimeData()
mimeData.setText(self.dragLabel.text())
mimeData.setData("application/x-axis-index", QByteArray.number(axis_index))
pixmap = QPixmap(self.dragLabel.size())
self.dragLabel.render(pixmap)
# prepare drag object
drag = QDrag(self)
drag.setMimeData(mimeData)
drag.setPixmap(pixmap)
drag.setHotSpot(event.pos() - self.dragStartPosition)
drag.exec_(Qt.MoveAction | Qt.CopyAction, Qt.CopyAction)
def dragEnterEvent(self, event):
if event.mimeData().hasText():
if self.filters_layout.geometry().contains(event.pos()):
event.setDropAction(Qt.MoveAction)
event.accept()
else:
event.acceptProposedAction()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasText() and self.filters_layout.geometry().contains(event.pos()):
child = self.childAt(event.pos())
if isinstance(child, QLabel) and child.text() != "Filters":
event.setDropAction(Qt.MoveAction)
event.accept()
else:
event.ignore()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasText():
if self.filters_layout.geometry().contains(event.pos()):
old_index, success = event.mimeData().data("application/x-axis-index").toInt()
new_index = self.filters_layout.indexOf(self.childAt(event.pos())) // 2
data, bg_value = self.data_adapter.data, self.data_adapter.bg_value
data, bg_value = self.data_adapter.move_axis(data, bg_value, old_index, new_index)
self.set_data(data, bg_value)
event.setDropAction(Qt.MoveAction)
event.accept()
else:
event.acceptProposedAction()
else:
event.ignore()
def _update_models(self, reset_model_data, reset_minmax):
# axes names
axes_names = self.data_adapter.get_axes_names(fold_last_axis=True)
self.model_axes.set_data(axes_names)
# horizontal labels
hlabels = self.data_adapter.get_hlabels()
self.model_hlabels.set_data(hlabels)
# vertical labels
vlabels = self.data_adapter.get_vlabels()
self.model_vlabels.set_data(vlabels)
# raw data
# use flag reset=False to avoid calling reset() several times
raw_data = self.data_adapter.get_raw_data()
self.model_data.set_data(raw_data, reset=False)
# bg value
# use flag reset=False to avoid calling reset() several times
bg_value = self.data_adapter.get_bg_value()
self.model_data.set_bg_value(bg_value, reset=False)
# reset min and max values if required
if reset_minmax:
self.model_data.reset_minmax()
# reset the data model if required
if reset_model_data:
self.model_data.reset()
def set_data(self, data, bg_value=None, digits=None):
# get new adapter instance + set data
self.data_adapter = get_adapter(data=data, bg_value=bg_value)
# update filters
self._update_filter()
# update models
# Note: model_data is reset by call of _update_digits_scientific below which call
# set_format which reset the data_model
self._update_models(reset_model_data=False, reset_minmax=True)
# update data format
self._update_digits_scientific(digits=digits)
# update gradient_chooser
self.gradient_chooser.setEnabled(self.model_data.bgcolor_possible)
# reset default size
self._reset_default_size()
# update dtype in view_data
self.view_data.set_dtype(self.data_adapter.dtype)
def _reset_default_size(self):
self.view_axes.set_default_size()
self.view_vlabels.set_default_size()
self.view_hlabels.set_default_size()
self.view_data.set_default_size()
def _update_filter(self):
filters_layout = self.filters_layout
clear_layout(filters_layout)
axes = self.data_adapter.get_axes_filtered_data()
# size > 0 to avoid arrays with length 0 axes and len(axes) > 0 to avoid scalars (scalar.size == 1)
if self.data_adapter.size > 0 and len(axes) > 0:
filters_layout.addWidget(QLabel(_("Filters")))
for axis in axes:
filters_layout.addWidget(QLabel(axis.name))
# FIXME: on very large axes, this is getting too slow. Ideally the combobox should use a model which
# only fetch labels when they are needed to be displayed
if len(axis) < 10000:
filters_layout.addWidget(self.create_filter_combo(axis))
else:
filters_layout.addWidget(QLabel("too big to be filtered"))
filters_layout.addStretch()
def set_format(self, digits, scientific, reset=True):
"""Set format.
Parameters
----------
digits : int
Number of digits to display.
scientific : boolean
Whether or not to display values in scientific format.
reset: boolean, optional
Whether or not to reset the data model. Defaults to True.
"""
type = self.data_adapter.dtype.type
if type in (np.str, np.str_, np.bool_, np.bool, np.object_):
fmt = '%s'
else:
format_letter = 'e' if scientific else 'f'
fmt = '%%.%d%s' % (digits, format_letter)
self.model_data.set_format(fmt, reset)
# two cases:
# * set_data should update both scientific and ndigits
# * toggling scientific checkbox should update only ndigits
def _update_digits_scientific(self, scientific=None, digits=None):
dtype = self.data_adapter.dtype
if dtype.type in (np.str, np.str_, np.bool_, np.bool, np.object_):
scientific = False
ndecimals = 0
else:
data = self.data_adapter.get_sample()
# max_digits = self.get_max_digits()
# default width can fit 8 chars
# FIXME: use max_digits?
avail_digits = 8
frac_zeros, int_digits, has_negative = self.format_helper(data)
# choose whether or not to use scientific notation
# ================================================
if scientific is None:
# use scientific format if there are more integer digits than we can display or if we can display more
# information that way (scientific format "uses" 4 digits, so we have a net win if we have >= 4 zeros --
# *including the integer one*)
# TODO: only do so if we would actually display more information
# 0.00001 can be displayed with 8 chars
# 1e-05
# would
scientific = int_digits > avail_digits or frac_zeros >= 4
# determine best number of decimals to display
# ============================================
# TODO: ndecimals vs self.digits => rename self.digits to either frac_digits or ndecimals
if digits is not None:
ndecimals = digits
else:
data_frac_digits = self._data_digits(data)
if scientific:
int_digits = 2 if has_negative else 1
exp_digits = 4
else:
exp_digits = 0
# - 1 for the dot
ndecimals = avail_digits - 1 - int_digits - exp_digits
if ndecimals < 0:
ndecimals = 0
if data_frac_digits < ndecimals:
ndecimals = data_frac_digits
self.digits = ndecimals
self.use_scientific = scientific
# avoid triggering digits_changed which would cause a useless redraw
self.digits_spinbox.blockSignals(True)
self.digits_spinbox.setValue(ndecimals)
self.digits_spinbox.setEnabled(is_number(dtype))
self.digits_spinbox.blockSignals(False)
# avoid triggering scientific_changed which would call this function a second time
self.scientific_checkbox.blockSignals(True)
self.scientific_checkbox.setChecked(scientific)
self.scientific_checkbox.setEnabled(is_number(dtype))
self.scientific_checkbox.blockSignals(False)
# 1) setting the format explicitly instead of relying on digits_spinbox.digits_changed to set it because
# digits_changed is only triggered when digits actually changed, not when passing from
# scientific -> non scientific or number -> object
# 2) data model is reset in set_format by default
self.set_format(ndecimals, scientific)
def format_helper(self, data):
if not data.size:
return 0, 0, False
data = np.where(np.isfinite(data), data, 0)
vmin, vmax = np.min(data), np.max(data)
absmax = max(abs(vmin), abs(vmax))
logabsmax = math.log10(absmax) if absmax else 0
# minimum number of zeros before meaningful fractional part
frac_zeros = math.ceil(-logabsmax) - 1 if logabsmax < 0 else 0
int_digits = max(ndigits(vmin), ndigits(vmax))
return frac_zeros, int_digits, vmin < 0
def get_max_digits(self, need_sign=False, need_dot=False, scientific=False):
font = get_font("arreditor") # QApplication.font()
col_width = 60
margin_width = 6 # a wild guess
avail_width = col_width - margin_width
metrics = QFontMetrics(font)
def str_width(c):
return metrics.size(Qt.TextSingleLine, c).width()
digit_width = max(str_width(str(i)) for i in range(10))
dot_width = str_width('.')
sign_width = max(str_width('+'), str_width('-'))
if need_sign:
avail_width -= sign_width
if need_dot:
avail_width -= dot_width
if scientific:
avail_width -= str_width('e') + sign_width + 2 * digit_width
return avail_width // digit_width
def _data_digits(self, data, maxdigits=6):
if not data.size:
return 0
threshold = 10 ** -(maxdigits + 1)
for ndigits in range(maxdigits):
maxdiff = np.max(np.abs(data - np.round(data, ndigits)))
if maxdiff < threshold:
return ndigits
return maxdigits
def autofit_columns(self):
self.view_axes.autofit_columns()
for column in range(self.model_axes.columnCount()):
self.resize_axes_column_to_contents(column)
self.view_hlabels.autofit_columns()
for column in range(self.model_hlabels.columnCount()):
self.resize_hlabels_column_to_contents(column)
def resize_axes_column_to_contents(self, column):
# must be connected to view_axes.horizontalHeader().sectionHandleDoubleClicked signal
width = max(self.view_axes.horizontalHeader().sectionSize(column),
self.view_vlabels.sizeHintForColumn(column))
# no need to call resizeSection on view_vlabels (see synchronization lines in init)
self.view_axes.horizontalHeader().resizeSection(column, width)
def resize_hlabels_column_to_contents(self, column):
# must be connected to view_labels.horizontalHeader().sectionHandleDoubleClicked signal
width = max(self.view_hlabels.horizontalHeader().sectionSize(column),
self.view_data.sizeHintForColumn(column))
# no need to call resizeSection on view_data (see synchronization lines in init)
self.view_hlabels.horizontalHeader().resizeSection(column, width)
def resize_axes_row_to_contents(self, row):
# must be connected to view_axes.verticalHeader().sectionHandleDoubleClicked
height = max(self.view_axes.verticalHeader().sectionSize(row),
self.view_hlabels.sizeHintForRow(row))
# no need to call resizeSection on view_hlabels (see synchronization lines in init)
self.view_axes.verticalHeader().resizeSection(row, height)
def resize_vlabels_row_to_contents(self, row):
# must be connected to view_labels.verticalHeader().sectionHandleDoubleClicked
height = max(self.view_vlabels.verticalHeader().sectionSize(row),
self.view_data.sizeHintForRow(row))
# no need to call resizeSection on view_data (see synchronization lines in init)
self.view_vlabels.verticalHeader().resizeSection(row, height)
def scientific_changed(self, value):
self._update_digits_scientific(scientific=value)
def digits_changed(self, value):
self.digits = value
self.set_format(value, self.use_scientific)
def change_filter(self, axis, indices):
self.data_adapter.update_filter(axis, indices)
self._update_models(reset_model_data=True, reset_minmax=False)
def create_filter_combo(self, axis):
def filter_changed(checked_items):
self.change_filter(axis, checked_items)
combo = FilterComboBox(self)
combo.addItems([str(l) for l in axis.labels])
combo.checkedItemsChanged.connect(filter_changed)
return combo
def _selection_data(self, headers=True, none_selects_all=True):
"""
Returns selected labels as lists and raw data as Numpy ndarray
if headers=True or only the raw data otherwise
Parameters
----------
headers : bool, optional
Labels are also returned if True.
none_selects_all : bool, optional
If True (default) and selection is empty, returns all data.
Returns
-------
raw_data: numpy.ndarray
axes_names: list
vlabels: nested list
hlabels: list
"""
bounds = self.view_data._selection_bounds(none_selects_all=none_selects_all)
if bounds is None:
return None
row_min, row_max, col_min, col_max = bounds
raw_data = self.model_data.get_values(row_min, col_min, row_max, col_max)
if headers:
if not self.data_adapter.ndim:
return raw_data, None, None, None
axes_names = self.model_axes.get_values()
hlabels = [label[0] for label in self.model_hlabels.get_values(top=col_min, bottom=col_max)]
vlabels = self.model_vlabels.get_values(left=row_min, right=row_max) if self.data_adapter.ndim > 1 else []
return raw_data, axes_names, vlabels, hlabels
else:
return raw_data
def copy(self):
"""Copy selection as text to clipboard"""
raw_data, axes_names, vlabels, hlabels = self._selection_data()
data = self.data_adapter.selection_to_chain(raw_data, axes_names, vlabels, hlabels)
if data is None:
return
# np.savetxt make things more complicated, especially on py3
# XXX: why don't we use repr for everything?
def vrepr(v):
if isinstance(v, float):
return repr(v)
else:
return str(v)
text = '\n'.join('\t'.join(vrepr(v) for v in line) for line in data)
clipboard = QApplication.clipboard()
clipboard.setText(text)
def to_excel(self):
"""Export selection in Excel"""
raw_data, axes_names, vlabels, hlabels = self._selection_data()
try:
self.data_adapter.to_excel(raw_data, axes_names, vlabels, hlabels)
except ImportError:
QMessageBox.critical(self, "Error", "to_excel() is not available because xlwings is not installed")
def paste(self):
bounds = self.view_data._selection_bounds()
if bounds is None:
return
row_min, row_max, col_min, col_max = bounds
clipboard = QApplication.clipboard()
text = str(clipboard.text())
list_data = [line.split('\t') for line in text.splitlines()]
try:
# take the first cell which contains '\'
pos_last = next(i for i, v in enumerate(list_data[0]) if '\\' in v)
except StopIteration:
# if there isn't any, assume 1d array
pos_last = 0
if pos_last or '\\' in list_data[0][0]:
# ndim > 1
list_data = [line[pos_last + 1:] for line in list_data[1:]]
elif len(list_data) == 2 and list_data[1][0] == '':
# ndim == 1
list_data = [list_data[1][1:]]
new_data = np.array(list_data)
if new_data.shape[0] > 1:
row_max = row_min + new_data.shape[0]
if new_data.shape[1] > 1:
col_max = col_min + new_data.shape[1]
result = self.model_data.set_values(row_min, col_min, row_max, col_max, new_data)
if result is None:
return
# TODO: when pasting near bottom/right boundaries and size of
# new_data exceeds destination size, we should either have an error
# or clip new_data
self.view_data.selectionModel().select(QItemSelection(*result), QItemSelectionModel.ClearAndSelect)
def plot(self):
raw_data, axes_names, vlabels, hlabels = self._selection_data()
try:
from larray_editor.utils import show_figure
figure = self.data_adapter.plot(raw_data, axes_names, vlabels, hlabels)
# Display figure
show_figure(self, figure)
except ImportError:
QMessageBox.critical(self, "Error", "plot() is not available because matplotlib is not installed")
|
gpl-3.0
|
Unidata/MetPy
|
v1.0/_downloads/d04a9494716f000a1433c75d1fe171ba/Natural_Neighbor_Verification.py
|
3
|
10144
|
# Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Natural Neighbor Verification
=============================
Walks through the steps of Natural Neighbor interpolation to validate that the algorithmic
approach taken in MetPy is correct.
"""
###########################################
# Find natural neighbors visual test
#
# A triangle is a natural neighbor for a point if the
# `circumscribed circle <https://en.wikipedia.org/wiki/Circumscribed_circle>`_ of the
# triangle contains that point. It is important that we correctly grab the correct triangles
# for each point before proceeding with the interpolation.
#
# Algorithmically:
#
# 1. We place all of the grid points in a KDTree. These provide worst-case O(n) time
# complexity for spatial searches.
#
# 2. We generate a `Delaunay Triangulation <https://docs.scipy.org/doc/scipy/
# reference/tutorial/spatial.html#delaunay-triangulations>`_
# using the locations of the provided observations.
#
# 3. For each triangle, we calculate its circumcenter and circumradius. Using
# KDTree, we then assign each grid a triangle that has a circumcenter within a
# circumradius of the grid's location.
#
# 4. The resulting dictionary uses the grid index as a key and a set of natural
# neighbor triangles in the form of triangle codes from the Delaunay triangulation.
# This dictionary is then iterated through to calculate interpolation values.
#
# 5. We then traverse the ordered natural neighbor edge vertices for a particular
# grid cell in groups of 3 (n - 1, n, n + 1), and perform calculations to generate
# proportional polygon areas.
#
# Circumcenter of (n - 1), n, grid_location
# Circumcenter of (n + 1), n, grid_location
#
# Determine what existing circumcenters (ie, Delaunay circumcenters) are associated
# with vertex n, and add those as polygon vertices. Calculate the area of this polygon.
#
# 6. Increment the current edges to be checked, i.e.:
# n - 1 = n, n = n + 1, n + 1 = n + 2
#
# 7. Repeat steps 5 & 6 until all of the edge combinations of 3 have been visited.
#
# 8. Repeat steps 4 through 7 for each grid cell.
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import ConvexHull, Delaunay, delaunay_plot_2d, Voronoi, voronoi_plot_2d
from scipy.spatial.distance import euclidean
from metpy.interpolate import geometry
from metpy.interpolate.points import natural_neighbor_point
###########################################
# For a test case, we generate 10 random points and observations, where the
# observation values are just the x coordinate value times the y coordinate
# value divided by 1000.
#
# We then create two test points (grid 0 & grid 1) at which we want to
# estimate a value using natural neighbor interpolation.
#
# The locations of these observations are then used to generate a Delaunay triangulation.
np.random.seed(100)
pts = np.random.randint(0, 100, (10, 2))
xp = pts[:, 0]
yp = pts[:, 1]
zp = (pts[:, 0] * pts[:, 0]) / 1000
tri = Delaunay(pts)
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
ax.ishold = lambda: True # Work-around for Matplotlib 3.0.0 incompatibility
delaunay_plot_2d(tri, ax=ax)
for i, zval in enumerate(zp):
ax.annotate(f'{zval} F', xy=(pts[i, 0] + 2, pts[i, 1]))
sim_gridx = [30., 60.]
sim_gridy = [30., 60.]
ax.plot(sim_gridx, sim_gridy, '+', markersize=10)
ax.set_aspect('equal', 'datalim')
ax.set_title('Triangulation of observations and test grid cell '
'natural neighbor interpolation values')
members, circumcenters = geometry.find_natural_neighbors(tri, list(zip(sim_gridx, sim_gridy)))
val = natural_neighbor_point(xp, yp, zp, (sim_gridx[0], sim_gridy[0]), tri, members[0],
circumcenters)
ax.annotate(f'grid 0: {val:.3f}', xy=(sim_gridx[0] + 2, sim_gridy[0]))
val = natural_neighbor_point(xp, yp, zp, (sim_gridx[1], sim_gridy[1]), tri, members[1],
circumcenters)
ax.annotate(f'grid 1: {val:.3f}', xy=(sim_gridx[1] + 2, sim_gridy[1]))
###########################################
# Using the circumcenter and circumcircle radius information from
# :func:`metpy.interpolate.geometry.find_natural_neighbors`, we can visually
# examine the results to see if they are correct.
def draw_circle(ax, x, y, r, m, label):
th = np.linspace(0, 2 * np.pi, 100)
nx = x + r * np.cos(th)
ny = y + r * np.sin(th)
ax.plot(nx, ny, m, label=label)
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
ax.ishold = lambda: True # Work-around for Matplotlib 3.0.0 incompatibility
delaunay_plot_2d(tri, ax=ax)
ax.plot(sim_gridx, sim_gridy, 'ks', markersize=10)
for i, (x_t, y_t) in enumerate(circumcenters):
r = geometry.circumcircle_radius(*tri.points[tri.simplices[i]])
if i in members[1] and i in members[0]:
draw_circle(ax, x_t, y_t, r, 'm-', str(i) + ': grid 1 & 2')
ax.annotate(str(i), xy=(x_t, y_t), fontsize=15)
elif i in members[0]:
draw_circle(ax, x_t, y_t, r, 'r-', str(i) + ': grid 0')
ax.annotate(str(i), xy=(x_t, y_t), fontsize=15)
elif i in members[1]:
draw_circle(ax, x_t, y_t, r, 'b-', str(i) + ': grid 1')
ax.annotate(str(i), xy=(x_t, y_t), fontsize=15)
else:
draw_circle(ax, x_t, y_t, r, 'k:', str(i) + ': no match')
ax.annotate(str(i), xy=(x_t, y_t), fontsize=9)
ax.set_aspect('equal', 'datalim')
ax.legend()
###########################################
# What?....the circle from triangle 8 looks pretty darn close. Why isn't
# grid 0 included in that circle?
x_t, y_t = circumcenters[8]
r = geometry.circumcircle_radius(*tri.points[tri.simplices[8]])
print('Distance between grid0 and Triangle 8 circumcenter:',
euclidean([x_t, y_t], [sim_gridx[0], sim_gridy[0]]))
print('Triangle 8 circumradius:', r)
###########################################
# Lets do a manual check of the above interpolation value for grid 0 (southernmost grid)
# Grab the circumcenters and radii for natural neighbors
cc = np.array(circumcenters)
r = np.array([geometry.circumcircle_radius(*tri.points[tri.simplices[m]]) for m in members[0]])
print('circumcenters:\n', cc)
print('radii\n', r)
###########################################
# Draw the natural neighbor triangles and their circumcenters. Also plot a `Voronoi diagram
# <https://docs.scipy.org/doc/scipy/reference/tutorial/spatial.html#voronoi-diagrams>`_
# which serves as a complementary (but not necessary)
# spatial data structure that we use here simply to show areal ratios.
# Notice that the two natural neighbor triangle circumcenters are also vertices
# in the Voronoi plot (green dots), and the observations are in the polygons (blue dots).
vor = Voronoi(list(zip(xp, yp)))
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
ax.ishold = lambda: True # Work-around for Matplotlib 3.0.0 incompatibility
voronoi_plot_2d(vor, ax=ax)
nn_ind = np.array([0, 5, 7, 8])
z_0 = zp[nn_ind]
x_0 = xp[nn_ind]
y_0 = yp[nn_ind]
for x, y, z in zip(x_0, y_0, z_0):
ax.annotate(f'{x}, {y}: {z:.3f} F', xy=(x, y))
ax.plot(sim_gridx[0], sim_gridy[0], 'k+', markersize=10)
ax.annotate(f'{sim_gridx[0]}, {sim_gridy[0]}', xy=(sim_gridx[0] + 2, sim_gridy[0]))
ax.plot(cc[:, 0], cc[:, 1], 'ks', markersize=15, fillstyle='none',
label='natural neighbor\ncircumcenters')
for center in cc:
ax.annotate(f'{center[0]:.3f}, {center[1]:.3f}', xy=(center[0] + 1, center[1] + 1))
tris = tri.points[tri.simplices[members[0]]]
for triangle in tris:
x = [triangle[0, 0], triangle[1, 0], triangle[2, 0], triangle[0, 0]]
y = [triangle[0, 1], triangle[1, 1], triangle[2, 1], triangle[0, 1]]
ax.plot(x, y, ':', linewidth=2)
ax.legend()
ax.set_aspect('equal', 'datalim')
def draw_polygon_with_info(ax, polygon, off_x=0, off_y=0):
"""Draw one of the natural neighbor polygons with some information."""
pts = np.array(polygon)[ConvexHull(polygon).vertices]
for i, pt in enumerate(pts):
ax.plot([pt[0], pts[(i + 1) % len(pts)][0]],
[pt[1], pts[(i + 1) % len(pts)][1]], 'k-')
avex, avey = np.mean(pts, axis=0)
ax.annotate(f'area: {geometry.area(pts):.3f}', xy=(avex + off_x, avey + off_y),
fontsize=12)
cc1 = geometry.circumcenter((53, 66), (15, 60), (30, 30))
cc2 = geometry.circumcenter((34, 24), (53, 66), (30, 30))
draw_polygon_with_info(ax, [cc[0], cc1, cc2])
cc1 = geometry.circumcenter((53, 66), (15, 60), (30, 30))
cc2 = geometry.circumcenter((15, 60), (8, 24), (30, 30))
draw_polygon_with_info(ax, [cc[0], cc[1], cc1, cc2], off_x=-9, off_y=3)
cc1 = geometry.circumcenter((8, 24), (34, 24), (30, 30))
cc2 = geometry.circumcenter((15, 60), (8, 24), (30, 30))
draw_polygon_with_info(ax, [cc[1], cc1, cc2], off_x=-15)
cc1 = geometry.circumcenter((8, 24), (34, 24), (30, 30))
cc2 = geometry.circumcenter((34, 24), (53, 66), (30, 30))
draw_polygon_with_info(ax, [cc[0], cc[1], cc1, cc2])
###########################################
# Put all of the generated polygon areas and their affiliated values in arrays.
# Calculate the total area of all of the generated polygons.
areas = np.array([60.434, 448.296, 25.916, 70.647])
values = np.array([0.064, 1.156, 2.809, 0.225])
total_area = np.sum(areas)
print(total_area)
###########################################
# For each polygon area, calculate its percent of total area.
proportions = areas / total_area
print(proportions)
###########################################
# Multiply the percent of total area by the respective values.
contributions = proportions * values
print(contributions)
###########################################
# The sum of this array is the interpolation value!
interpolation_value = np.sum(contributions)
function_output = natural_neighbor_point(xp, yp, zp, (sim_gridx[0], sim_gridy[0]), tri,
members[0], circumcenters)
print(interpolation_value, function_output)
###########################################
# The values are slightly different due to truncating the area values in
# the above visual example to the 3rd decimal place.
plt.show()
|
bsd-3-clause
|
ananth95/ananth95.github.io-simQuad
|
ground_station/gyro_scope.py
|
2
|
5471
|
'''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
IMPORTANT!!
It is suggested you run this script with mpu_level2.ino first to see and understand
its operation.
Basically this script EXPECTS:
Arduino is providing space separated gyro readings @ ~5ms intervals (via MPU Interrupt).
* Each serial packet must be ASCII and look like:
[x_gyro]<space>[y_gyro]<space>[z_gyro]<newline>
+ You need to specify correct Serial port
+ You need to set the Y-limits of the plot axis.
+ You need to use correct value of "dt".
+ You need to set the correct conversion factor for Gyro readings.
Mode 0 1 2 3
Range +-250 +-500 +-1000 +-2000
Conv. 131 65.5 32.75 16.375
AND it DELIVERS:
* 3 axis loss-less Gyro readings plot (almost real time).
* 3D visualisation of current orientation based on gyro vals
If you want to just plot data in ~real time use {oscilloscope.py}.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
'''
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import serial, time
def rotate(v, axis, theta):
'''
Rotates "v" vector about "axis" vector by "theta" radians, returns vector
'''
c = np.cos(theta)
s = np.sin(theta)
t = 1-c
mat = np.array([ [c+axis[0]*axis[0]*t, axis[0]*axis[1]*t-axis[2]*s, axis[0]*axis[2]*t+axis[1]*s],
[axis[0]*axis[1]*t+axis[2]*s, c+axis[1]*axis[1]*t, axis[1]*axis[2]*t-axis[0]*s],
[axis[0]*axis[2]*t-axis[1]*s, axis[1]*axis[2]*t+axis[0]*s, c+axis[2]*axis[2]*t] ])
return mat.dot(v.T)
def calcPose(omega):
'''
Helper function. Finds the "d"-theta, then calls rotate.
Omega must be in ** degrees per second **
'''
theta = omega*dt*np.pi/180 #theta is "d-theta" in radians
rpy[1] = rotate(rpy[1], rpy[0], theta[0])
rpy[0] = rotate(rpy[0], rpy[1], theta[1])
rpy[2] = np.cross(rpy[0], rpy[1])
rpy[1] = rotate(rpy[1], rpy[2], theta[2])
rpy[0] = rotate(rpy[0], rpy[2], theta[2])
plt.ion()
# SET CORRECT PORT NUM HERE
arduino = serial.Serial('/dev/ttyACM0', 57600)
# dt is found experimentally. Contact Ananya for details. Basically this the time between
# 2 MPU(gyro) interrupts. The np.pi/180 converts deg/sec to rad/sec.
# SET CORRECT dt HERE. TIME IN SECONDS BETWEEN TWO SENSOR PACKETS AS RECVD. BY ARDUINO.
dt = .005 # 5msec
# rpy is original orientation. These vectors are updated by calcPose()
rpy = np.eye(3)
fig = plt.figure(figsize=(16,6))
axes = fig.add_subplot(121)
a3d = fig.add_subplot(122, projection='3d')
a3d.set_xlim(-1.2,1.2)
a3d.set_ylim(-1.2,1.2)
a3d.set_zlim(-1.2,1.2)
a3d.scatter([0], [0], [0], s=40)
r, = a3d.plot([0,1], [0,0], [0,0], lw=2, c='black')
p, = a3d.plot([0,0], [0,1], [0,0], lw=2, c='red')
a3d.plot([0,2], [0,0], [0,0], c='cyan')
a3d.plot([0,0], [0,2], [0,0], c='brown')
a3d.plot([0,0], [0,0], [0,2], c='green')
a3d.plot([0,-2], [0,0], [0,0], ls='--', c='cyan')
a3d.plot([0,0], [0,-2], [0,0], ls='--', c='brown')
a3d.plot([0,0], [0,0], [0,-2], ls='--', c='green')
num_samples = 0
buff = 0
# "buff" counts till 50. Every time it reaches fifty, plt.draw() is called, since
# plt.draw() is a costly operation. Normal list append and pose calculations are fast.
# So, do those diligently, for every sample, but update display
# rarely (while ensuring smooth animation).
gyro_x = [0]
gyro_y = [0] # gyro data lists. I use them like queues.
gyro_z = [0]
t = [0]
# scopes is a list of 3 matplotlib.Line_2D objects.
scopes = [axes.plot(t, gyro_x, label=r'$\omega_x$')[0], axes.plot(t, gyro_y, label=r'$\omega_y$')[0], axes.plot(t, gyro_z, label=r'$\omega_z$')[0]]
axes.legend(prop=dict(size=14))
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
axes.set_ylim(-505, 505) # SET CORRECT Y-LIM HERE
conversion = 65.5 #Gyro 500 SET CORRECT CONV FACTOR HERE
# Refer datasheet. Convert ADC result into a Physical measurement.
# If you don't understand this, pls. leave project.
print 'Me Ready'
time.sleep(2)
#Handshake MAY BE REDUNDANT
print arduino.inWaiting()
arduino.flushInput()
arduino.write('e')
print 'Sent Request...'
data = [0]*6
while True:
try:
num = arduino.read(12)
num = [ord(x) for x in num]
except:
print 'Serial error!'
raise RuntimeError
_ind=0 #this var is connected to for loop below!!
for i in range(0,12, 2):
data[_ind] = (num[i]<<8)|num[i+1]
if data[_ind] & 0x8000:
data[_ind] = data[_ind] - 0x10000
_ind += 1
#print data[3:]
datas = np.array([float(data[3])/conversion, float(data[4])/conversion, float(data[5])/conversion])
gyro_x.append(datas[0])
gyro_y.append(datas[1])
gyro_z.append(datas[2])
num_samples += 1
t.append(num_samples)
calcPose(datas) #This function updates the global variable: "rpy"
if num_samples>200:
del t[0]
del gyro_x[0]
del gyro_y[0]
del gyro_z[0]
axes.set_xlim(t[0], num_samples)
scopes[0].set_data(t, gyro_x)
scopes[1].set_data(t, gyro_y)
scopes[2].set_data(t, gyro_z)
# pose matrix is just an easier way of giving input to the .set_data()
# and .set_3d_properties() methods. You see, line needs 2 (end) points:
# the rpy entries AND THE ORIGIN. pose matrix does just that: specifies
# BOTH end points.
pose = np.array([np.array([np.zeros(3), rpy[0]]).T, np.array([np.zeros(3), rpy[1]]).T, np.array([np.zeros(3), rpy[2]]).T])
r.set_data(pose[0][:2])
r.set_3d_properties(pose[0][2])
p.set_data(pose[1][:2])
p.set_3d_properties(pose[1][2])
if buff>25:
buff=0
plt.draw()
buff += 1
plt.ioff()
plt.show()
|
gpl-2.0
|
miaecle/deepchem
|
deepchem/models/tests/test_overfit.py
|
1
|
25768
|
"""
Tests to make sure deepchem models can overfit on tiny datasets.
"""
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import numpy as np
import pytest
import tensorflow as tf
from flaky import flaky
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from tensorflow.python.framework import test_util
import deepchem as dc
from deepchem.models.optimizers import Adam
def test_sklearn_regression_overfit():
"""Test that sklearn models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score)
sklearn_model = RandomForestRegressor()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
def test_sklearn_classification_overfit():
"""Test that sklearn models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_sklearn_skewed_classification_overfit():
"""Test sklearn models can overfit 0/1 datasets with few actives."""
n_samples = 100
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_regression_overfit():
"""Test that MultitaskRegressor can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
# TODO(rbharath): This breaks with optimizer="momentum". Why?
model = dc.models.MultitaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
learning_rate=0.003)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_classification_overfit():
"""Test that MultitaskClassifier can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples,
optimizer=Adam(learning_rate=0.0003, beta1=0.9, beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_residual_classification_overfit():
"""Test that a residual network can overfit simple classification datasets."""
n_samples = 10
n_features = 5
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[20] * 10,
dropouts=0.0,
batch_size=n_samples,
residual=True)
# Fit trained model
model.fit(dataset, nb_epoch=500)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@flaky
def test_fittransform_regression_overfit():
"""Test that MultitaskFitTransformRegressor can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
tf.random.set_seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model = dc.models.MultitaskFitTransformRegressor(
n_tasks, [n_features, n_features],
dropouts=[0.01],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
fit_transformers=fit_transformers,
n_evals=1,
optimizer=Adam(learning_rate=0.003, beta1=0.9, beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_skewed_classification_overfit():
"""Test MultitaskClassifier can overfit 0/1 datasets with few actives."""
#n_samples = 100
n_samples = 100
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples,
learning_rate=0.003)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .75
def test_skewed_missing_classification_overfit():
"""TG, skewed data, few actives
Test MultitaskClassifier overfit 0/1 datasets with missing data and few
actives. This is intended to be as close to singletask MUV datasets as
possible.
"""
n_samples = 5120
n_features = 6
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .002
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
y_flat, w_flat = np.squeeze(y), np.squeeze(w)
y_nonzero = y_flat[w_flat != 0]
num_nonzero = np.count_nonzero(y_nonzero)
weight_nonzero = len(y_nonzero) / num_nonzero
w_flat[y_flat != 0] = weight_nonzero
w = np.reshape(w_flat, (n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[1.],
batch_size=n_samples,
learning_rate=0.003)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .7
def test_sklearn_multitask_classification_overfit():
"""Test SKLearn singletask-to-multitask overfits tiny data."""
n_tasks = 10
tasks = ["task%d" % task for task in range(n_tasks)]
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, task_averager=np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestClassifier()
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@flaky
def test_multitask_classification_overfit():
"""Test MultitaskClassifier overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean, n_tasks=n_tasks)
model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples,
optimizer=Adam(learning_rate=0.0003, beta1=0.9, beta2=0.999))
# Fit trained model
model.fit(dataset)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_robust_multitask_classification_overfit():
"""Test tf robust multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.RobustMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_IRV_multitask_classification_overfit():
"""Test IRV classifier overfits tiny data."""
n_tasks = 5
n_samples = 10
n_features = 128
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.randint(2, size=(n_samples, n_features))
y = np.ones((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
IRV_transformer = dc.trans.IRVTransformer(5, n_tasks, dataset)
dataset_trans = IRV_transformer.transform(dataset)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.MultitaskIRVClassifier(
n_tasks, K=5, learning_rate=0.01, batch_size=n_samples)
# Fit trained model
model.fit(dataset_trans)
# Eval model on train
scores = model.evaluate(dataset_trans, [classification_metric])
assert scores[classification_metric.name] > .9
def test_sklearn_multitask_regression_overfit():
"""Test SKLearn singletask-to-multitask overfits tiny regression data."""
n_tasks = 2
tasks = ["task%d" % task for task in range(n_tasks)]
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.r2_score, task_averager=np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestRegressor()
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
def test_multitask_regression_overfit():
"""Test MultitaskRegressor overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 10
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean, mode="regression")
model = dc.models.MultitaskRegressor(
n_tasks, n_features, dropouts=0.0, batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=1000)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .02
def test_residual_regression_overfit():
"""Test that a residual multitask network can overfit tiny data."""
n_tasks = 10
n_samples = 10
n_features = 10
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean, mode="regression")
model = dc.models.MultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[20] * 10,
dropouts=0.0,
batch_size=n_samples,
residual=True)
# Fit trained model
model.fit(dataset, nb_epoch=1000)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .02
def test_tf_robust_multitask_regression_overfit():
"""Test tf robust multitask overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean, mode="regression")
model = dc.models.RobustMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .2
def test_progressive_classification_overfit():
"""Test progressive multitask overfits tiny data."""
np.random.seed(123)
n_tasks = 5
n_samples = 10
n_features = 6
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
metric = dc.metrics.Metric(dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.ProgressiveMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=300)
# Eval model on train
scores = model.evaluate(dataset, [metric])
assert scores[metric.name] > .9
def test_progressive_regression_overfit():
"""Test progressive multitask overfits tiny data."""
np.random.seed(123)
n_tasks = 5
n_samples = 10
n_features = 6
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
metric = dc.metrics.Metric(dc.metrics.rms_score, task_averager=np.mean)
model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.002,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [metric])
assert scores[metric.name] < .2
def test_multitask_regressor_uncertainty():
"""Test computing uncertainty for a MultitaskRegressor."""
n_tasks = 1
n_samples = 30
n_features = 1
noise = 0.1
# Generate dummy dataset
X = np.random.rand(n_samples, n_features, 1)
y = 10 * X + np.random.normal(scale=noise, size=(n_samples, n_tasks, 1))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[200],
weight_init_stddevs=[.1],
batch_size=n_samples,
dropouts=0.1,
learning_rate=0.003,
uncertainty=True)
# Fit trained model
model.fit(dataset, nb_epoch=2500)
# Predict the output and uncertainty.
pred, std = model.predict_uncertainty(dataset)
assert np.mean(np.abs(y - pred)) < 1.0
assert noise < np.mean(std) < 1.0
@pytest.mark.slow
def test_DAG_singletask_regression_overfit():
"""Test DAG regressor multitask overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_feat = 75
batch_size = 10
transformer = dc.trans.DAGTransformer(max_atoms=50)
dataset = transformer.transform(dataset)
model = dc.models.DAGModel(
n_tasks,
max_atoms=50,
n_atom_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=1200)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
def test_weave_singletask_classification_overfit():
"""Test weave model overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
n_atom_feat = 75
n_pair_feat = 14
n_feat = 128
batch_size = 10
model = dc.models.WeaveModel(
n_tasks,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
n_graph_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="classification")
# Fit trained model
model.fit(dataset, nb_epoch=20)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .65
def test_weave_singletask_regression_overfit():
"""Test weave model overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_atom_feat = 75
n_pair_feat = 14
n_feat = 128
batch_size = 10
model = dc.models.WeaveModel(
n_tasks,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
n_graph_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=120)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
@pytest.mark.slow
def test_MPNN_singletask_regression_overfit():
"""Test MPNN overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_atom_feat = 75
n_pair_feat = 14
batch_size = 10
model = dc.models.MPNNModel(
n_tasks,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
T=2,
M=3,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=50)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
def test_textCNN_singletask_classification_overfit():
"""Test textCNN model overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.RawFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
char_dict, length = dc.models.TextCNNModel.build_char_dict(dataset)
batch_size = 10
model = dc.models.TextCNNModel(
n_tasks,
char_dict,
seq_length=length,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="classification")
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
@flaky()
def test_textCNN_singletask_regression_overfit():
"""Test textCNN model overfits tiny data."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
# Load mini log-solubility dataset.
featurizer = dc.feat.RawFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
char_dict, length = dc.models.TextCNNModel.build_char_dict(dataset)
batch_size = 10
model = dc.models.TextCNNModel(
n_tasks,
char_dict,
seq_length=length,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .9
|
mit
|
paninski-lab/yass
|
tests/unit/evaluation/retinal_evaluation.py
|
1
|
10755
|
import numpy as np
import matplotlib.pyplot as plt
import os
import scipy.io
import yaml
from yass.evaluate.stability import (MeanWaveCalculator,
RecordingAugmentation,
RecordingBatchIterator,
SpikeSortingEvaluation)
from yass.pipeline import run
def main_channels(template):
"""Computes the main channel of a list of templates.
Parameters
----------
template: numpy.ndarray
The shape of the array should be (T, C, K) where T indicates
time samples, C number of channels and K total number of
units/clusters.
"""
return np.argsort(np.max(
np.abs(template), axis=0), axis=0).T
def temp_snr(templates):
"""Computes the PNR of a list of templates.
Parameters
----------
template: numpy.ndarray
The shape of the array should be (T, C, K) where T indicates
time samples, C number of channels and K total number of
units/clusters.
"""
tot = templates.shape[2]
res = np.zeros(tot)
for unit, c in enumerate(main_channels(templates)[:, -1]):
res[unit] = np.linalg.norm(templates[:, c, unit], np.inf)
return res
class EvaluationPlot(object):
"""Standard figure for evaluation comparison."""
def __init__(self, data_set_title, n_dataset, methods=['Method'],
logit_y=True, eval_type='Accuracy'):
"""Setup pyplot figures.
Parameters
----------
data_set_title: str
Title of the data set that will be displayed in the plots.
n_dataset: int
Total umber of data sets that evaluations are performed on.
methods: list of str
The spike sorting methods that evaluations are done for.
logit_y: bool
Logit transform the y-axis (metric axis) to emphasize near 1
and near 0 values.
eval_type: str
Type of metric (for display purposes only) which appears in the
plots.
"""
self.new_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
self.n_dataset = n_dataset
self.methods = methods
self.data_set_title = data_set_title
self.eval_type = eval_type
self.logit_y = logit_y
self.data_set_title = data_set_title
# Contains evaluation metrics for methods and datasets.
self.metric_matrix = {}
for method in self.methods:
self.metric_matrix[method] = []
for i in range(self.n_dataset):
self.metric_matrix[method].append(None)
def logit(self, x, inverse=False):
"""Logit transfors the array x.
Parameters
----------
x: numpy.ndarray
List of values [0-1] only to be logit transformed.
inverse: bool
Inverse-logit transforms if True.
"""
# Add apsilon to avoid boundary conditions for transform.
x[x == 0] += 0.0001
x[x == 1] -= 0.0001
if inverse:
return 1 / (1 + np.exp(-x))
return np.log(x / (1 - x))
def set_logit_labels(
self, labs=np.array([0.001, 0.01, 0.1, 0.5, 0.9, 0.99, 0.999])):
"""Logit transforms the y axis.
Parameters
----------
labs: numpy.ndarray
List of values ([0-1] only) to be displayed as ticks on
y axis.
"""
for i in range(self.n_dataset):
self.ax[i].set_yticks(self.logit(labs))
self.ax[i].set_yticklabels(labs)
def add_metric(self, snr_list, percent_list, dataset_number,
method_name='Method'):
"""Adds accuracy percentages for clusters/units of a method.
Parameters
----------
snr_list: numpy.ndarray of shape(N,)
List of SNR/PNR values for clusters/units of the corresponding
dataset number and spike sorting method.
percent_list: numpy.ndarray of shape(N,)
List of SNR/PNR values for clusters/units of the corresponding
dataset number and spike sorting method.
dataset_number: int
Value should be between 0 and self.n_dataset - 1. Indicates
which dataset are the evaluations for.
method: str
Should be a member of self.methods. Indicates which of the
spike sorting methods the evaluations correspond to.
"""
if method_name not in self.methods:
raise KeyError('Method name does not exist in methods list.')
if np.any(percent_list < 0) or np.any(percent_list > 1):
raise TypeError(
'Percent accuracy list should contain only [0-1] values.')
eval_tup = (snr_list, percent_list)
self.metric_matrix[method_name][dataset_number] = eval_tup
def generate_snr_metric_plot(self):
"""Generate pdf plots of evaluations for the datasets and methods."""
self.fig, self.ax = plt.subplots(self.n_dataset, 1)
for i in range(self.n_dataset):
self.ax[i].set_title(
self.data_set_title + 'Dataset {}'.format(i + 1))
self.ax[i].set_ylabel('Percent {}'.format(self.eval_type))
self.ax[i].legend(self.methods)
self.ax[i].set_xlabel('Log PNR')
if self.logit_y:
self.set_logit_labels()
for method_idx, method in enumerate(self.methods):
for i in range(self.n_dataset):
try:
metric_tuple = self.metric_matrix[method][i]
metrics = metric_tuple[1]
if self.logit_y:
metrics = self.logit(metrics)
self.ax[i].scatter(
metric_tuple[0], metrics,
color=self.new_colors[method_idx])
except Exception:
print("No metric found for {} for dataset {}".format(
method, i + 1))
self.fig.set_size_inches(12, 4 * self.n_dataset)
plt.savefig('{}_{}.pdf'.format(self.data_set_title, self.eval_type))
def main(n_batches=6):
"""Runs the procedure for evaluating yass on retinal data."""
config_file = open('config_template.yaml', 'r')
config = yaml.load(config_file)
config_file.close()
# Extracting window around spikes.
sampling_rate = config['recordings']['sampling_rate']
n_chan = config['recordings']['n_channels']
dtype = config['recordings']['dtype']
spike_length = config['recordings']['spike_size_ms']
window_radius = int(spike_length * sampling_rate / 1e3)
window = range(-window_radius, window_radius)
k_tot_data = 4
# Set up the pyplot figures
stb_plot = EvaluationPlot('EJ Retinal', k_tot_data, eval_type='Stability')
acc_plot = EvaluationPlot('EJ Retinal', k_tot_data)
for data_idx, data_number in enumerate(range(1, k_tot_data + 1)):
# Setting up config file for yass.
bin_file = 'ej49_data{}.bin'.format(data_number)
geom_file = 'ej49_geometry{}.txt'.format(data_number)
config['data']['recordings'] = bin_file
config['data']['geometry'] = geom_file
spike_train = run(config=config)
# Data augmentation setup.
os.path.getsize(bin_file)
file_size_bytes = os.path.getsize(bin_file)
tot_samples = file_size_bytes / (np.dtype(dtype).itemsize * n_chan)
radius = 70
n_batch_samples = int(tot_samples / n_batches)
batch_reader = RecordingBatchIterator(
bin_file, geom_file, sample_rate=sampling_rate,
batch_time_samples=n_batch_samples, n_batches=n_batches,
n_chan=n_chan, radius=radius, whiten=False)
mean_wave = MeanWaveCalculator(
batch_reader, spike_train, window=window)
mean_wave.compute_templates(n_batches=n_batches)
# Augment with new spikes.
stab = RecordingAugmentation(
mean_wave, augment_rate=0.25, move_rate=0.2)
aug_bin_file = 'ej49_data{}.aug.bin'.format(data_number)
aug_gold_spt, status = stab.save_augment_recording(
aug_bin_file, n_batches)
np.save('ej49_data{}.aug.npy'.format(data_number), aug_gold_spt)
# Setting up config file for yass to run on augmented data.
config['data']['recordings'] = aug_bin_file
config['data']['geometry'] = geom_file
yass_aug_spike_train = run(config=config)
# Evaluate accuracy of yass.
gold_std_spike_train_file = 'groundtruth_ej49_data{}.mat'.format(
data_number)
gold_std_map = scipy.io.loadmat(gold_std_spike_train_file)
gold_std_spike_train = np.append(
gold_std_map['spt_gt'], gold_std_map['L_gt'], axis=1)
gold_standard_mean_wave = MeanWaveCalculator(
batch_reader, gold_std_spike_train, window=window)
gold_standard_mean_wave.compute_templates(n_batches=n_batches)
accuracy_eval = SpikeSortingEvaluation(
gold_std_spike_train, spike_train,
gold_standard_mean_wave.templates, mean_wave.templates)
acc_tp = accuracy_eval.true_positive
acc_plot.add_metric(
np.log(temp_snr(gold_standard_mean_wave.templates)),
acc_tp, data_idx)
batch_reader.close_iterator()
# Evaluate stability of yass.
batch_reader = RecordingBatchIterator(
aug_bin_file, geom_file, sample_rate=sampling_rate,
batch_time_samples=n_batch_samples, n_batches=n_batches,
n_chan=n_chan, radius=radius, whiten=False)
aug_gold_standard_mean_wave = MeanWaveCalculator(
batch_reader, aug_gold_spt, window=window)
aug_gold_standard_mean_wave.compute_templates(n_batches=n_batches)
aug_yass_mean_wave = MeanWaveCalculator(
batch_reader, yass_aug_spike_train, window=window)
aug_yass_mean_wave.compute_templates(n_batches=n_batches)
stability_eval = SpikeSortingEvaluation(
aug_gold_spt, yass_aug_spike_train,
aug_gold_standard_mean_wave.templates,
aug_yass_mean_wave.templates)
stb_tp = stability_eval.true_positive
stb_plot.add_metric(
np.log(temp_snr(aug_gold_standard_mean_wave.templates)),
stb_tp, data_idx)
batch_reader.close_iterator()
# Render the plots and save them.
acc_plot.generate_snr_metric_plot()
stb_plot.generate_snr_metric_plot()
if __name__ == '__main__':
main()
|
apache-2.0
|
GuessWhoSamFoo/pandas
|
pandas/tests/frame/test_rank.py
|
1
|
11335
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
from pandas import DataFrame, Series
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
class TestRank(TestData):
s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])
df = DataFrame({'A': s, 'B': s})
results = {
'average': np.array([1.5, 5.5, 7.0, 3.5, np.nan,
3.5, 1.5, 8.0, np.nan, 5.5]),
'min': np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]),
'max': np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]),
'first': np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]),
'dense': np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]),
}
@pytest.fixture(params=['average', 'min', 'max', 'first', 'dense'])
def method(self, request):
"""
Fixture for trying all rank methods
"""
return request.param
def test_rank(self):
rankdata = pytest.importorskip('scipy.stats.rankdata')
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
ranks0 = self.frame.rank()
ranks1 = self.frame.rank(1)
mask = np.isnan(self.frame.values)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
result = df.rank()
exp = df.astype(float).rank()
tm.assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
tm.assert_frame_equal(result, exp)
def test_rank2(self):
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
tm.assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
tm.assert_frame_equal(result, expected)
df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
tm.assert_frame_equal(result, expected)
df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
tm.assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2., np.nan, 1.],
[2., 3., 1.]])
result = df.rank(1, numeric_only=False, ascending=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1., np.nan, 2.],
[2., 1., 3.]])
result = df.rank(1, numeric_only=False, ascending=False)
tm.assert_frame_equal(result, expected)
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
result = self.mixed_frame.rank(1)
expected = self.mixed_frame.rank(1, numeric_only=True)
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10,
1e60, 1e80, 1e-30]})
exp = DataFrame({"a": [3.5, 1., 3.5, 5., 6., 7., 2.]})
tm.assert_frame_equal(df.rank(), exp)
def test_rank_na_option(self):
rankdata = pytest.importorskip('scipy.stats.rankdata')
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
# bottom
ranks0 = self.frame.rank(na_option='bottom')
ranks1 = self.frame.rank(1, na_option='bottom')
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = self.frame.rank(na_option='top')
ranks1 = self.frame.rank(1, na_option='top')
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fval0)
exp1 = np.apply_along_axis(rankdata, 1, fval1)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = self.frame.rank(na_option='top', ascending=False)
ranks1 = self.frame.rank(1, na_option='top', ascending=False)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = self.frame.rank(na_option='bottom', ascending=False)
ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
tm.assert_numpy_array_equal(ranks0.values, exp0)
tm.assert_numpy_array_equal(ranks1.values, exp1)
# bad values throw error
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
with pytest.raises(ValueError, match=msg):
self.frame.rank(na_option='bad', ascending=False)
# invalid type
with pytest.raises(ValueError, match=msg):
self.frame.rank(na_option=True, ascending=False)
def test_rank_axis(self):
# check if using axes' names gives the same result
df = DataFrame([[2, 1], [4, 3]])
tm.assert_frame_equal(df.rank(axis=0), df.rank(axis='index'))
tm.assert_frame_equal(df.rank(axis=1), df.rank(axis='columns'))
def test_rank_methods_frame(self):
pytest.importorskip('scipy.stats.special')
rankdata = pytest.importorskip('scipy.stats.rankdata')
import scipy
xs = np.random.randint(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
cols = [chr(ord('z') - i) for i in range(xs.shape[1])]
for vals in [xs, xs + 1e6, xs * 1e-6]:
df = DataFrame(vals, columns=cols)
for ax in [0, 1]:
for m in ['average', 'min', 'max', 'first', 'dense']:
result = df.rank(axis=ax, method=m)
sprank = np.apply_along_axis(
rankdata, ax, vals,
m if m != 'first' else 'ordinal')
sprank = sprank.astype(np.float64)
expected = DataFrame(sprank, columns=cols)
if (LooseVersion(scipy.__version__) >=
LooseVersion('0.17.0')):
expected = expected.astype('float64')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
def test_rank_descending(self, method, dtype):
if 'i' in dtype:
df = self.df.dropna()
else:
df = self.df.astype(dtype)
res = df.rank(ascending=False)
expected = (df.max() - df).rank()
assert_frame_equal(res, expected)
if method == 'first' and dtype == 'O':
return
expected = (df.max() - df).rank(method=method)
if dtype != 'O':
res2 = df.rank(method=method, ascending=False,
numeric_only=True)
assert_frame_equal(res2, expected)
res3 = df.rank(method=method, ascending=False,
numeric_only=False)
assert_frame_equal(res3, expected)
@pytest.mark.parametrize('axis', [0, 1])
@pytest.mark.parametrize('dtype', [None, object])
def test_rank_2d_tie_methods(self, method, axis, dtype):
df = self.df
def _check2d(df, expected, method='average', axis=0):
exp_df = DataFrame({'A': expected, 'B': expected})
if axis == 1:
df = df.T
exp_df = exp_df.T
result = df.rank(method=method, axis=axis)
assert_frame_equal(result, exp_df)
disabled = {(object, 'first')}
if (dtype, method) in disabled:
return
frame = df if dtype is None else df.astype(dtype)
_check2d(frame, self.results[method], method=method, axis=axis)
@pytest.mark.parametrize(
"method,exp", [("dense",
[[1., 1., 1.],
[1., 0.5, 2. / 3],
[1., 0.5, 1. / 3]]),
("min",
[[1. / 3, 1., 1.],
[1. / 3, 1. / 3, 2. / 3],
[1. / 3, 1. / 3, 1. / 3]]),
("max",
[[1., 1., 1.],
[1., 2. / 3, 2. / 3],
[1., 2. / 3, 1. / 3]]),
("average",
[[2. / 3, 1., 1.],
[2. / 3, 0.5, 2. / 3],
[2. / 3, 0.5, 1. / 3]]),
("first",
[[1. / 3, 1., 1.],
[2. / 3, 1. / 3, 2. / 3],
[3. / 3, 2. / 3, 1. / 3]])])
def test_rank_pct_true(self, method, exp):
# see gh-15630.
df = DataFrame([[2012, 66, 3], [2012, 65, 2], [2012, 65, 1]])
result = df.rank(method=method, pct=True)
expected = DataFrame(exp)
tm.assert_frame_equal(result, expected)
@pytest.mark.single
def test_pct_max_many_rows(self):
# GH 18271
df = DataFrame({'A': np.arange(2**24 + 1),
'B': np.arange(2**24 + 1, 0, -1)})
result = df.rank(pct=True).max()
assert (result == 1).all()
|
bsd-3-clause
|
acmaheri/sms-tools
|
lectures/5-Sinusoidal-model/plots-code/sineModelAnal-bendir.py
|
24
|
1245
|
import numpy as np
import matplotlib.pyplot as plt
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/bendir.wav'))
w = np.hamming(2001)
N = 2048
H = 200
t = -80
minSineDur = .02
maxnSines = 150
freqDevOffset = 10
freqDevSlope = 0.001
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
plt.figure(1, figsize=(9.5, 7))
maxplotfreq = 800.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sinusoidal tracks (bendir.wav)')
plt.tight_layout()
plt.savefig('sineModelAnal-bendir.png')
plt.show()
|
agpl-3.0
|
lordkman/burnman
|
burnman/nonlinear_fitting.py
|
5
|
20066
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from scipy.stats import t, norm, genextreme
import itertools
import copy
from .tools import unit_normalize
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.patches import Ellipse
def nonlinear_least_squares_fit(model,
lm_damping = 0.,
param_tolerance = 1.e-7,
max_lm_iterations = 100,
verbose = False):
"""
Function to compute the "best-fit" parameters for a model
by nonlinear least squares fitting.
The nonlinear least squares algorithm closely follows the logic in
Section 23.1 of Bayesian Probability Theory
(von der Linden et al., 2014; Cambridge University Press).
Parameters
----------
model : class instance
Must have the following attributes:
data : 2D numpy array.
Elements of x[i][j] contain the observed position of
data point i
data_covariances : 3D numpy array
Elements of cov[i][j][k] contain the covariance matrix
of data point i
mle_tolerances : numpy array
The iterations to find the maximum likelihood estimator
for each observed data point will stop when mle_tolerances[i] <
np.linalg.norm(data_mle[i] - model.function(data_mle[i], flag))
delta_params : numpy array
parameter perturbations used to compute the jacobian
Must also contain the following functions:
set_params(self, param_values):
Function to set parameters
get_params(self):
Function to get current model parameters
function(self, x):
Returns value of model function evaluated at x
normal(self, x):
Returns value of normal to the model function
evaluated at x
lm_damping : float (optional, default: 0)
Levenberg-Marquardt parameter for least squares minimization
param_tolerance : float (optional, default: 1.e-5)
Levenberg-Marquardt iterations are terminated when
the maximum fractional change in any of the parameters
during an iteration drops below this value
max_lm_iterations : integer (optional, default: 100)
Maximum number of Levenberg-Marquardt iterations
verbose : bool
Print some information to standard output
Attributes added to model
----------
n_dof : integer
Degrees of freedom of the system
data_mle : 2D numpy array
Maximum likelihood estimates of the observed data points
on the best-fit curve
jacobian : 2D numpy array
d(weighted_residuals)/d(parameter)
weighted_residuals : numpy array
Weighted residuals
weights : numpy array
1/(data variances normal to the best fit curve)
WSS : float
Weighted sum of squares residuals
popt : numpy array
Optimized parameters
pcov : 2D numpy array
Covariance matrix of optimized parameters
noise_variance : float
Estimate of the variance of the data normal to the curve
This function is available as ``burnman.nonlinear_least_squares_fit``.
"""
def _mle_estimate(x, x_m, cov, flag):
n = model.normal(x_m, flag)
var_n = abs_line_project(cov, n)
d = (x_m - x).dot(n)
x_mle = x + d*((n.dot(cov)).T)/var_n
return x_mle, d, var_n
def _find_mle():
x_mle_arr = np.empty_like(model.data)
residual_arr = np.empty(n_data)
var_arr = np.empty(n_data)
for i, (x, cov, flag) in enumerate(zip(*[model.data, model.data_covariances, model.flags])):
x_mle_arr[i] = model.function(x, flag)
x_mle_est, residual_arr[i], var_arr[i] = _mle_estimate(x, x_mle_arr[i], cov, flag)
delta_x = x_mle_arr[i] - x
while np.linalg.norm(delta_x) > model.mle_tolerances[i]:
x_mle_est, residual_arr[i], var_arr[i] = _mle_estimate(x, x_mle_arr[i], cov, flag)
x_mle_arr[i] = model.function(x_mle_est, flag)
delta_x = x_mle_arr[i] - x_mle_est
return x_mle_arr, residual_arr/np.sqrt(var_arr), 1./var_arr
def calculate_jacobian():
model.jacobian = np.empty((n_data, n_params))
diag_delta = np.diag(model.delta_params)
param_values = model.get_params()
for prm_i, value in enumerate(param_values):
model.set_params(param_values - diag_delta[prm_i])
x_mle_arr, residual_arr_0, weights_0 = _find_mle()
model.set_params(param_values + diag_delta[prm_i])
x_mle_arr, residual_arr_1, weights_1 = _find_mle()
model.jacobian[:,prm_i] = (residual_arr_1 - residual_arr_0)/(2.*diag_delta[prm_i][prm_i])
model.set_params(param_values) # reset params
def _update_beta(lmbda):
# Performs a Levenberg-Marquardt iteration
# Note that if lambda = 0, this is a simple Gauss-Newton iteration
calculate_jacobian()
model.data_mle, model.weighted_residuals, model.weights = _find_mle()
J = model.jacobian # this the weighted Jacobian
JTJ = J.T.dot(J)
delta_beta = np.linalg.inv(JTJ + lmbda*np.diag(JTJ)).dot(J.T).dot(model.weighted_residuals)
new_params = model.get_params() - delta_beta
f_delta_beta = delta_beta/new_params
model.set_params(new_params)
return f_delta_beta
n_data = len(model.data)
n_params = len(model.get_params())
n_dimensions = len(model.data[:,0])
model.dof = n_data - n_params
if not hasattr(model, 'flags'):
model.flags = [None] * n_data
for n_it in range(max_lm_iterations):
f_delta_beta = _update_beta(lm_damping)
max_f = np.max(np.abs(f_delta_beta))
if verbose == True:
print('Iteration {0:d}: {1}. Max change in param: {2}'.format(n_it, model.get_params(), max_f))
if max_f < param_tolerance:
break
J = model.jacobian
r = model.weighted_residuals
model.WSS = r.dot(r.T)
model.popt = model.get_params()
model.pcov = np.linalg.inv(J.T.dot(J))*r.dot(r.T)/model.dof
# Estimate the noise variance normal to the curve
model.goodness_of_fit = model.WSS/model.dof
model.noise_variance = r.dot(np.diag(1./model.weights)).dot(r.T)/model.dof
if verbose == True:
if n_it == max_lm_iterations - 1:
print('Max iterations ({0:d}) reached (param tolerance = {1:1e})'.format(max_lm_iterations, param_tolerance))
else:
print('Converged in {0:d} iterations'.format(n_it))
print('\nOptimised parameter values:')
print(model.popt)
print('\nParameter covariance matrix:')
print(model.pcov)
print('')
def confidence_prediction_bands(model, x_array, confidence_interval, f, flag=None):
"""
This function calculates the confidence and prediction bands of the function f(x)
from a best-fit model with uncertainties in its parameters as calculated (for example)
by the function nonlinear_least_squares_fit().
The values are calculated via the delta method, which estimates the variance of f
evaluated at x as var(f(x)) = df(x)/dB var(B) df(x)/dB
where df(x)/dB is the vector of partial derivatives of f(x) with respect to B
Parameters
----------
' model : class instance
As modified (for example) by the function nonlinear_least_squares_fit().
Should contain the following functions:
get_params, set_params, function, normal
And attributes:
delta_params, pcov, dof, noise_variance
x_array : 2D numpy array
coordinates at which to evaluate the bounds
confidence_interval : float
Probability level of finding the true model (confidence bound) or any new
data point (probability bound). For example, the 95% confidence bounds
should be calculated using a confidence interval of 0.95.
f : function
This is the function defining the variable y=f(x) for which the
confidence and prediction bounds are desired
flag : variable type
This (optional) flag is passed to model.function to control how the
modified position of x is calculated. This value is then used by f(x)
Output
------
bounds : 2D numpy array
An element of bounds[i][j] gives the lower and upper confidence (i=0, i=1) and
prediction (i=2, i=3) bounds for the jth data point.
"""
# Check array dimensions
n_dimensions = len(model.data[0])
if len(x_array[0]) != n_dimensions:
raise Exception('Dimensions of each point must be the same as the total number of dimensions')
param_values = model.get_params()
x_m_0s = np.empty_like(x_array)
f_m_0s = np.empty_like(x_array[:,0])
for i, x in enumerate(x_array):
x_m_0s[i] = model.function(x, flag)
f_m_0s[i] = f(x)
diag_delta = np.diag(model.delta_params)
dxdbeta = np.empty([len(param_values), len(x_array)])
for i, value in enumerate(param_values):
model.set_params(param_values + diag_delta[i])
for j, x_m_0 in enumerate(x_m_0s):
x_m_1 = model.function(x_m_0, flag)
dxdbeta[i][j] = (f(x_m_1) - f_m_0s[j])/diag_delta[i][i]
model.set_params(param_values) # reset params
variance = np.empty(len(x_array))
for i, Gprime in enumerate(dxdbeta.T):
variance[i] = Gprime.T.dot(model.pcov).dot(Gprime)
critical_value = t.isf(0.5*(confidence_interval + 1.), model.dof)
confidence_half_widths = critical_value*np.sqrt(variance)
prediction_half_widths = critical_value*np.sqrt(variance + model.noise_variance)
confidence_bound_0 = f_m_0s - confidence_half_widths
confidence_bound_1 = f_m_0s + confidence_half_widths
prediction_bound_0 = f_m_0s - prediction_half_widths
prediction_bound_1 = f_m_0s + prediction_half_widths
return np.array([confidence_bound_0, confidence_bound_1,
prediction_bound_0, prediction_bound_1])
def abs_line_project(M, n):
n = unit_normalize(n)
return n.dot(M).dot(n.T)
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
def corner_plot(popt, pcov, param_names=[], n_std = 1.):
"""
Creates a corner plot of covariances
Parameters
----------
popt : numpy array
Optimized parameters
pcov : 2D numpy array
Covariance matrix of the parameters
param_names : optional list
Parameter names
n_std : float
Number of standard deviations for ellipse
Returns
-------
fig : matplotlib.pyplot.figure object
ax_array : list of matplotlib Axes objects
"""
if len(pcov[0]) != len(pcov[:,0]):
raise Exception('Covariance matrices must be square')
n_params = len(pcov[0])
if n_params < 2:
raise Exception('Covariance matrix must be at least 2x2 for a corner plot to be plotted')
# ellipse plotting is prone to rounding errors, so we scale the plots here
scaling = 1./np.power(10., np.around(np.log10(np.abs(popt)) - 0.5))
scaling = np.outer(scaling, scaling)
fig, ax_array = plt.subplots(n_params-1, n_params-1)
for i in range(n_params-1):
for j in range(1, i+1):
fig.delaxes(ax_array[j-1][i])
for j in range(i+1, n_params):
indices = np.array([i, j])
projected_cov = (pcov*scaling)[indices[:, None], indices]
scaled_pos = np.array([popt[i]*np.sqrt(scaling[i][i]),
popt[j]*np.sqrt(scaling[j][j])])
plot_cov_ellipse(cov=projected_cov, pos=scaled_pos,
nstd=n_std, ax=ax_array[j-1][i], color='grey')
maxx = 1.5*n_std*np.sqrt(projected_cov[0][0])
maxy = 1.5*n_std*np.sqrt(projected_cov[1][1])
ax_array[j-1][i].set_xlim(scaled_pos[0]-maxx, scaled_pos[0]+maxx)
ax_array[j-1][i].set_ylim(scaled_pos[1]-maxy, scaled_pos[1]+maxy)
if param_names != []:
for i in range(n_params-1):
ax_array[n_params-2][i].set_xlabel('{0:s} (x 10^{1:d})'.format(param_names[i], -int(np.log10(np.sqrt(scaling[i][i])))))
for j in range(1, n_params):
ax_array[j-1][0].set_ylabel('{0:s} (x 10^{1:d})'.format(param_names[j], -int(np.log10(np.sqrt(scaling[j][j])))))
return fig, ax_array
def weighted_residual_plot(ax, model, flag=None, sd_limit=3, cmap=plt.cm.RdYlBu, plot_axes=[0, 1], scale_axes=[1., 1.]):
"""
Creates a plot of the weighted residuals
The user can choose the projection axes, and scaling to apply to those axes
The chosen color palette (cmap) is discretised by standard deviation up to a cut off value
of sd_limit.
Parameters
----------
ax : matplotlib Axes object
model : user-defined object
A model as used by nonlinear_least_squares_fit
Must contain the attributes model.data,
model.weighted_residuals and
model.flags (if flag is not None).
flag : string
String to determine which data to plot.
Finds matches with model.flags.
sd_limit : float
Data with weighted residuals exceeding this
limit are plotted in black
cmap : matplotlib color palette
plot_axes : list of integers
Data axes to use as plot axes
scale_axes : list of floats
Plot axes are scaled by multiplication of the data by these values
Returns
-------
ax : matplotlib Axes object
"""
if flag == None:
mask = range(len(model.data[:,0]))
else:
mask = [i for i, flg in enumerate(model.flags) if flg == flag]
cmap.set_under('k')
cmap.set_over('k')
bounds = np.linspace(-sd_limit, sd_limit, sd_limit*2+1)
norm = colors.BoundaryNorm(bounds, cmap.N)
im = ax.scatter(model.data[:,plot_axes[0]][mask]*scale_axes[0], model.data[:,plot_axes[1]][mask]*scale_axes[1], c=model.weighted_residuals[mask], cmap=cmap, norm=norm, s=50)
plt.colorbar(im, ax=ax, label='Misfit (standard deviations)')
def extreme_values(weighted_residuals, confidence_interval):
'''
This function uses extreme value theory to calculate the number of
standard deviations away from the mean at which we should expect to bracket
*all* of our n data points at a certain confidence level.
It then uses that value to identify which (if any) of the data points
lie outside that region, and calculates the corresponding probabilities
of finding a data point at least that many standard deviations away.
Parameters
----------
weighted_residuals : array of floats
Array of residuals weighted by the square root of their
variances wr_i = r_i/sqrt(var_i)
confidence_interval : float
Probability at which all the weighted residuals lie
within the confidence bounds
Returns
-------
confidence_bound : float
Number of standard deviations at which we should expect to encompass all
data at the user-defined confidence interval.
indices : array of floats
Indices of weighted residuals exceeding the confidence_interval
defined by the user
probabilities : array of floats
The probabilities that the extreme data point of the distribution lies
further from the mean than the observed position wr_i for each i in
the "indices" output array.
'''
n=len(weighted_residuals)
mean = norm.isf(1./n)
scale = 0.8/np.power(np.log(n), 1./2.) # good approximation for > 10 data points
c = 0.33/np.power(np.log(n), 3./4.) # good approximation for > 10 data points
# We now need a 1-tailed probability from the given confidence_interval
# p_total = 1. - confidence_interval = p_upper + p_lower - p_upper*p_lower
# p_total = 1. - confidence_interval = 2p - p^2, therefore:
p = 1. - np.sqrt(confidence_interval)
confidence_bound = genextreme.isf(p, c, loc=mean, scale=scale)
indices = [i for i, r in enumerate(weighted_residuals) if np.abs(r) > confidence_bound]
probabilities = 1. - np.power(genextreme.sf(np.abs(weighted_residuals[indices]), c, loc=mean, scale=scale) - 1., 2.) # Convert back to 2-tailed probabilities
return confidence_bound, indices, probabilities
def plot_residuals(ax, weighted_residuals, n_bins=None, flags=[]):
if flags == []:
flags = [''] * len(weighted_residuals)
list_flags = ['']
else:
list_flags = list(set(flags))
if n_bins is None:
try: # Only works for recent versions of numpy
bin_heights, bin_bounds = np.histogram(weighted_residuals,
bins='auto',
normed=1.)
n_bins = len(bin_heights)
except:
n_bins = 11.
mask = [ i for i, f in enumerate(flags) ]
for flag in list_flags:
binwidth = np.ptp(weighted_residuals)/n_bins
dmin = min(weighted_residuals) - binwidth
dmax = max(weighted_residuals) + binwidth
bins = np.linspace(dmin, dmax, n_bins)
bin_heights, bin_bounds = np.histogram(weighted_residuals[mask],
bins=bins,
normed=1.)
normalisation = float(len(weighted_residuals[mask]))/float(len(weighted_residuals))
bin_centers = (bin_bounds[:-1] + bin_bounds[1:])/2.
bin_heights = bin_heights*normalisation
bin_widths = bin_bounds[1] - bin_bounds[0]
plt.bar(bin_centers, bin_heights, width = bin_widths, label=flag, alpha=0.2)
mask = [ i for i, f in enumerate(flags) if f != flag and i in mask ]
x = np.linspace(bin_bounds[0], bin_bounds[-1], 1001)
ax.plot(x, norm.pdf(x)*normalisation)
ax.set_title('Residual plot versus expected normal distribution')
ax.set_xlabel('Number of standard deviations from the mean')
ax.set_ylabel('Probability')
ax.legend(loc='upper right')
|
gpl-2.0
|
mxjl620/scikit-learn
|
examples/svm/plot_svm_kernels.py
|
329
|
1971
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
bnaul/scikit-learn
|
examples/ensemble/plot_gradient_boosting_regression.py
|
11
|
5041
|
"""
============================
Gradient Boosting regression
============================
This example demonstrates Gradient Boosting to produce a predictive
model from an ensemble of weak predictive models. Gradient boosting can be used
for regression and classification problems. Here, we will train a model to
tackle a diabetes regression task. We will obtain the results from
:class:`~sklearn.ensemble.GradientBoostingRegressor` with least squares loss
and 500 regression trees of depth 4.
Note: For larger datasets (n_samples >= 10000), please refer to
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# Maria Telenczuk <https://github.com/maikia>
# Katrina Ni <https://github.com/nilichen>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, ensemble
from sklearn.inspection import permutation_importance
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# %%
# Load the data
# -------------------------------------
#
# First we need to load the data.
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# %%
# Data preprocessing
# -------------------------------------
#
# Next, we will split our dataset to use 90% for training and leave the rest
# for testing. We will also set the regression model parameters. You can play
# with these parameters to see how the results change.
#
# n_estimators : the number of boosting stages that will be performed.
# Later, we will plot deviance against boosting iterations.
#
# max_depth : limits the number of nodes in the tree.
# The best value depends on the interaction of the input variables.
#
# min_samples_split : the minimum number of samples required to split an
# internal node.
#
# learning_rate : how much the contribution of each tree will shrink.
#
# loss : loss function to optimize. The least squares function is used in this
# case however, there are many other options (see
# :class:`~sklearn.ensemble.GradientBoostingRegressor` ).
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1, random_state=13)
params = {'n_estimators': 500,
'max_depth': 4,
'min_samples_split': 5,
'learning_rate': 0.01,
'loss': 'ls'}
# %%
# Fit regression model
# -------------------------------------
#
# Now we will initiate the gradient boosting regressors and fit it with our
# training data. Let's also look and the mean squared error on the test data.
reg = ensemble.GradientBoostingRegressor(**params)
reg.fit(X_train, y_train)
mse = mean_squared_error(y_test, reg.predict(X_test))
print("The mean squared error (MSE) on test set: {:.4f}".format(mse))
# %%
# Plot training deviance
# -------------------------------------
#
# Finally, we will visualize the results. To do that we will first compute the
# test set deviance and then plot it against boosting iterations.
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(reg.staged_predict(X_test)):
test_score[i] = reg.loss_(y_test, y_pred)
fig = plt.figure(figsize=(6, 6))
plt.subplot(1, 1, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, reg.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
fig.tight_layout()
plt.show()
# %%
# Plot feature importance
# -------------------------------------
#
# Careful, impurity-based feature importances can be misleading for
# high cardinality features (many unique values). As an alternative,
# the permutation importances of ``reg`` can be computed on a
# held out test set. See :ref:`permutation_importance` for more details.
#
# For this example, the impurity-based and permutation methods identify the
# same 2 strongly predictive features but not in the same order. The third most
# predictive feature, "bp", is also the same for the 2 methods. The remaining
# features are less predictive and the error bars of the permutation plot
# show that they overlap with 0.
feature_importance = reg.feature_importances_
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
fig = plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, np.array(diabetes.feature_names)[sorted_idx])
plt.title('Feature Importance (MDI)')
result = permutation_importance(reg, X_test, y_test, n_repeats=10,
random_state=42, n_jobs=2)
sorted_idx = result.importances_mean.argsort()
plt.subplot(1, 2, 2)
plt.boxplot(result.importances[sorted_idx].T,
vert=False, labels=np.array(diabetes.feature_names)[sorted_idx])
plt.title("Permutation Importance (test set)")
fig.tight_layout()
plt.show()
|
bsd-3-clause
|
numenta/htmresearch
|
htmresearch/frameworks/grid_cell_learning/CAN.py
|
4
|
33086
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
import copy
import os
from compute_hardwired_weights import compute_hardwired_weights
# STDP kernel time constant in seconds. Used for the default kernel.
SDTP_TIME_CONSTANT = 0.012
# How often to update plots
PLOT_INTERVAL = 0.1
# How often path integration estimates are collected. This needs to be tuned,
# as values that are too small will lead to constant estimates of zero movement.
ESTIMATION_INTERVAL = 0.5
def defaultSTDPKernel(preSynActivation,
postSynActivation,
dt,
inhibitoryPresyn=False,
inhibitoryPostsyn=False):
"""
This function implements a modified version of the STDP kernel from
Widloski & Fiete, 2014.
:param preSynActivation: Vector of pre-synaptic activations
:param postSynActivation: Vector of post-synaptic activations
:param dt: the difference in time between the two (in seconds), positive if
after and negative if before
:return: A matrix of synapse weight changes.
"""
stdpScaler = 1
stdpTimeScaler = 1.
# Set up STDP directions
if inhibitoryPresyn and not inhibitoryPostsyn:
#I-E, anti-Hebbian (weakening inhibitory connections)
stdpScaler *= 1
elif not inhibitoryPresyn and inhibitoryPostsyn:
# E-I, anti-Hebbian
stdpScaler *= -1
elif inhibitoryPresyn and inhibitoryPostsyn:
# I-I, Hebbian (strengthening inhibitory connections)
stdpScaler *= -1
# Set up parameters
if dt < 0 and not inhibitoryPresyn:
# Anti-causal
stdpScaler *= 1
stdpTimeScaler *= 3
elif dt > 0 and not inhibitoryPresyn:
stdpScaler *= 1.2
stdpTimeScaler *= 4
elif dt > 0 and inhibitoryPresyn:
stdpScaler *= .5
stdpTimeScaler *= 4
elif dt < 0 and inhibitoryPresyn:
stdpScaler *= 1
stdpTimeScaler *= 2
timeFactor = np.exp(-1*np.abs(dt)/(SDTP_TIME_CONSTANT*stdpTimeScaler))
preSynActivation *= timeFactor*np.sign(dt)*stdpScaler
updates = np.outer(preSynActivation, postSynActivation)
return updates
"""
This class provides a framework for learning a continuous attractor model of
a grid cell module, using rate coding. It is loosely based on the ideas from
Widloski & Fiete, 2014, who use a similar system to learn a spiking version of
a continuous attractor network.
This class is based on having two populations of excitatory neurons, one which
is hard-wired to prefer "left" movement, and one which is hardwired to prefer
"right" movement. It also includes a population of inhibitory neurons.
It lacks connections between excitatory neurons; all CAN dynamics are based on
inhibition.
"""
class CAN1DNetwork(object):
def __init__(self,
numExcitatory,
numInhibitory,
learningRate,
dt,
stdpWindow=10,
decayConstant=0.03,
velocityGain=0.9,
placeGainE=10,
placeGainI=50,
sigmaLoc=0.01,
stdpKernel=defaultSTDPKernel,
globalTonicMagnitude=0,
constantTonicMagnitude=0,
learnFactorII=7,
learnFactorEI=2,
learnFactorIE=1,
envelopeWidth=0.8,
envelopeFactor=25,
initialWeightScale=0.003,
clip=10,
plotting=True):
"""
:param numExcitatory: Size of each excitatory population. Note that there
are several populations, each of which has this many cells.
:param numInhibitory: Size of inhibitory population.
:param learningRate: The learning rate to use.
:param dt: The time step to use for integration.
:param stdpWindow: Number of time steps in each direction to use for STDP.
Updates are performed only once. This will lead to a buffer of
length (2*sdtpWindow + 1) being created.
:param decayConstant: The time constant for decay of neural activity
:param velocityGain: Multiplier scaling impact of velocity.
:param placeGainE: Multiplier scaling impact of place code on E cells.
:param placeGainI: Multiplier scaling impact of place code on I cells.
:param sigmaLoc: Multiplier scaling width of place code bump.
:param stdpKernel: The STDP kernel to be used. See the function
defaultSTDPKernel for an example.
:param globalTonicMagnitude: The magnitude of the global tonic input
during training.
:param constantTonicMagnitude: The magnitude of the non-velocity-dependent
constant tonic input during training
:param learnFactorII: Extra learning rate for II connections.
:param learnFactorEI: Extra learning rate for EI connections.
:param learnFactorIE: Extra learning rate for IE connections.
:param envelopeWidth: The distance away from a boundary at which
the suppressive envelope is first applied.
:param envelopeFactor: The steepness of the suppressive envelope.
:param initialWeightScale: The maximum initial weight value.
:param clip: The maximum possible activation. Set to np.inf to disable.
:param plotting: Whether or not to generate plots. False speeds training.
"""
# Synapse weights. We assume dense connections.
# Inhibitory neuron recurrent weights.
self.weightsII = np.random.random_sample((numInhibitory, numInhibitory))* \
initialWeightScale * -1.
# Excitatory-to-inhibitory weights
self.weightsELI = np.random.random_sample((numExcitatory, numInhibitory))* \
initialWeightScale
self.weightsERI = np.random.random_sample((numExcitatory, numInhibitory))* \
initialWeightScale
# Inhibitory-to-excitatory weights
self.weightsIEL = np.random.random_sample((numInhibitory, numExcitatory))* \
initialWeightScale * -1.
self.weightsIER = np.random.random_sample((numInhibitory, numExcitatory))* \
initialWeightScale * -1.
# Determine a starting place code, which will govern activation
# during learning. This code is ignored during testing.
self.placeCodeE = np.arange(0, 1, 1./numExcitatory)
self.placeCodeI = np.arange(0, 1, 1./numInhibitory)
self.placeGainE = placeGainE
self.placeGainI = placeGainI
self.velocityGain = velocityGain
self.sigmaLoc = sigmaLoc
self.learningRate = learningRate
self.dt = dt
self.decayConstant = decayConstant
self.activationsI = np.zeros((numInhibitory,), dtype="float32")
self.activationsER = np.zeros((numExcitatory,), dtype="float32")
self.activationsEL = np.zeros((numExcitatory,), dtype="float32")
self.instantaneousI = np.zeros((numInhibitory,), dtype="float32")
self.instantaneousER = np.zeros((numExcitatory,), dtype="float32")
self.instantaneousEL = np.zeros((numExcitatory,), dtype="float32")
self.stdpWindow = stdpWindow
self.stdpKernel = stdpKernel
self.activationBuffer = deque(maxlen=int(self.stdpWindow))
self.globalTonicMagnitude = globalTonicMagnitude
self.constantTonicMagnitude = constantTonicMagnitude
self.envelopeWidth = envelopeWidth
self.envelopeFactor = envelopeFactor
self.learnFactorII = learnFactorII
self.learnFactorEI = learnFactorEI
self.learnFactorIE = learnFactorIE
self.envelopeI = self.computeEnvelope(self.placeCodeI)
self.envelopeE = self.computeEnvelope(self.placeCodeE)
self.clip = clip
self.plotting = plotting
def calculatePathIntegrationError(self, time, dt=None, trajectory=None,
envelope=False, inputNoise=None):
"""
Calculate the error of our path integration, relative to an ideal module.
To do this, we track the movement of an individual bump
Note that the network must be trained before this is done.
:param time: How long to simulate for in seconds. We recommend using a
small value, e.g. ~10s.
:param trajectory: An optional trajectory that specifies how the network moves.
:param inputNoise: Whether or not to apply noise, and how much.
:return: A tuple of the true trajectory and the inferred trajectory.
"""
# Set up plotting
if self.plotting:
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(411)
self.ax2 = self.fig.add_subplot(412)
self.ax3 = self.fig.add_subplot(413)
self.ax4 = self.fig.add_subplot(414)
plt.tight_layout()
plt.ion()
self.fig.show()
self.fig.canvas.draw()
mouse = plt.imread(os.path.dirname(os.path.realpath(__file__))
+ "/mouse_graphic.png")
self.ax1.set_xlabel("Excitatory population activity")
self.ax2.set_xlabel("Inhibitory population activity")
self.ax3.set_xlabel("Movement in cells")
self.ax3.set_ylabel("Cost")
self.ax4.set_xlabel("Location")
plt.tight_layout()
if dt is None:
oldDt = self.dt
else:
oldDt = self.dt
self.dt = dt
# Simulate for a second to get nice starting activation bumps.
# Turn plotting off so as not to confuse the viewer
oldPlotting = self.plotting
self.plotting = False
self.simulate(1, 1, 1, 0, envelope=envelope, inputNoise=None)
self.plotting = oldPlotting
estimatedVelocities = []
trueVelocities = []
times = np.arange(0, time, self.dt)
if trajectory is None:
# Sum together two different sinusoidals for a more interesting path.
trajectory = (np.sin((-times*np.pi/10 - np.pi/2.))+1)*2.5
trajectory += (np.cos((-times*np.pi/3 - np.pi/2.))+1)*.75
velocities = np.diff(trajectory)/self.dt
oldActivations = copy.copy(self.activationsI)
oldX = trajectory[0]
for i, t in enumerate(times[:-1]):
v = velocities[i]
x = trajectory[i]
feedforwardInputI = np.ones(self.activationsI.shape)
feedforwardInputE = np.ones(self.activationsEL.shape)
if inputNoise is not None:
noisesI = np.random.random_sample(feedforwardInputI.shape)*inputNoise
noisesE = np.random.random_sample(feedforwardInputE.shape)*inputNoise
else:
noisesE = 1.
noisesI = 1.
self.update(feedforwardInputI*noisesI, feedforwardInputE*noisesE,
v, True, envelope=envelope)
estimationTime = np.abs(np.mod(t, ESTIMATION_INTERVAL))
if estimationTime <= 0.00001 or \
np.abs(estimationTime - ESTIMATION_INTERVAL) <= 0.00001:
rotations = [np.sum(np.abs(np.roll(oldActivations, i) -
self.activationsI))
for i in range(-20, 21, 1)]
shift = np.argmin(rotations) - 20
trueVelocities.append(x - oldX)
oldX = x
oldActivations = copy.copy(self.activationsI)
estimatedVelocities.append(shift)
if self.plotting:
plotTime = np.abs(np.mod(t, PLOT_INTERVAL))
if plotTime <= 0.00001 or np.abs(plotTime - PLOT_INTERVAL) <= 0.00001:
self.ax3.clear()
self.ax3.plot(np.arange(-len(rotations)/2 + 1, len(rotations)/2 + 1, 1),
rotations,
color="g",
label="Shift")
self.ax3.legend(loc="best")
self.ax3.set_xlabel("Movement in cells")
self.ax3.set_ylabel("Cost")
self.ax3.axvline(x=shift)
self.ax4.clear()
self.ax4.set_xlim(np.amin(trajectory), np.amax(trajectory))
self.ax4.set_ylim(0, 1)
mouse_bound = (x - 0.25*np.sign(v), x + 0.25*np.sign(v), .05, .55)
self.ax4.imshow(mouse,
aspect='auto',
extent=mouse_bound,
zorder=-1)
self.ax4.set_xlabel("Location")
self.ax4.axes.get_yaxis().set_visible(False)
self.fig.canvas.draw()
self.plotActivation(time=t, velocity=v)
self.dt = oldDt
return(np.asarray(trueVelocities), np.asarray(estimatedVelocities))
def hardwireWeights(self, flip=False):
(G_I_EL, G_I_ER, G_EL_I, G_ER_I, G_I_I) = \
compute_hardwired_weights(2.2,
self.activationsEL.shape[0],
self.activationsI.shape[0],
True)
# We need to flip the signs for the inhibitory weights;
# in our convention, inhibitory weights are always negative,
# but in theirs, they are positive and the sign flip is applied
# during activation.
self.weightsII = -1.*G_I_I
# If we want the network to path integrate in the right direction,
# flip ELI and ERI.
if flip:
self.weightsELI = G_ER_I
self.weightsERI = G_EL_I
self.weightsIEL = -1.*G_I_ER
self.weightsIER = -1.*G_I_EL
else:
self.weightsELI = G_EL_I
self.weightsERI = G_ER_I
self.weightsIEL = -1. * G_I_EL
self.weightsIER = -1. * G_I_ER
def simulate(self, time,
feedforwardInputI,
feedforwardInputE,
v,
recurrent=True,
dt=None,
envelope=False,
inputNoise=None):
"""
:param time: Amount of time to simulate.
Divided into chunks of len dt.
:param feedforwardInputI: feedforward input to inhibitory cells. Must have
shape (numInhibitory,). Should be total input over period time.
:param feedforwardInputE: feedforward input to excitatory cells. Must have
shape (numExcitatory,). Applied equally to ER and EL cells.
Should be total input over period time.
:param v: Velocity. Should be a scalar.
:param recurrent: whether or not recurrent connections should be used.
Set to False during training to follow the methods of the original
model.
:return: Nothing. All changes internal.
"""
# Set up plotting
if self.plotting:
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(211)
self.ax2 = self.fig.add_subplot(212)
plt.ion()
self.ax1.set_xlabel("Excitatory population activity")
self.ax2.set_xlabel("Inhibitory population activity")
plt.tight_layout()
self.fig.show()
self.fig.canvas.draw()
self.activationsI = np.random.random_sample(self.activationsI.shape)
self.activationsEL = np.random.random_sample(self.activationsEL.shape)
self.activationsER = np.random.random_sample(self.activationsER.shape)
if dt is None:
oldDt = self.dt
else:
oldDt = self.dt
self.dt = dt
times = np.arange(0, time, self.dt)
for i, t in enumerate(times):
if inputNoise is not None:
noisesI = np.random.random_sample(feedforwardInputI.shape)*inputNoise
noisesE = np.random.random_sample(feedforwardInputE.shape)*inputNoise
else:
noisesE = 1.; noisesI = 1.
self.update(feedforwardInputI*noisesI, feedforwardInputE*noisesE,
v, recurrent, envelope=envelope)
if self.plotting:
plotTime = np.abs(np.mod(t, PLOT_INTERVAL))
if plotTime <= 0.00001 or np.abs(plotTime - PLOT_INTERVAL) <= 0.00001:
self.plotActivation(time=t, velocity=v)
self.dt = oldDt
def update(self, feedforwardInputI, feedforwardInputE, v, recurrent=True,
envelope=False, iSpeedTuning=False, enforceDale=True):
"""
Do one update of the CAN network, of length self.dt.
:param feedforwardInputI: The feedforward input to inhibitory cells.
:param feedforwardInputR: The feedforward input to excitatory cells.
:param v: The current velocity.
:param recurrent: Whether or not recurrent connections should be used.
:param envelope: Whether or not an envelope should be applied.
:param iSpeedTuning: Whether or not inhibitory cells should also have their
activations partially depend on current movement speed. This is
necessary for periodic training, serving a role similar to that of
the envelope.
:param Whether or not Dale's law should be enforced locally. Helps with
training with recurrent weights active, but can slow down training.
"""
self.instantaneousI.fill(0)
self.instantaneousEL.fill(0)
self.instantaneousER.fill(0)
self.instantaneousI += feedforwardInputI
self.instantaneousEL += feedforwardInputE
self.instantaneousER += feedforwardInputE
if enforceDale:
weightsII = np.minimum(self.weightsII, 0)
weightsIER = np.minimum(self.weightsIER, 0)
weightsIEL = np.minimum(self.weightsIEL, 0)
weightsELI = np.maximum(self.weightsELI, 0)
weightsERI = np.maximum(self.weightsERI, 0)
else:
weightsII = self.weightsII
weightsIER = self.weightsIER
weightsIEL = self.weightsIEL
weightsELI = self.weightsELI
weightsERI = self.weightsERI
if recurrent:
self.instantaneousI += (np.matmul(self.activationsEL, weightsELI) +\
np.matmul(self.activationsER, weightsERI) +\
np.matmul(self.activationsI, weightsII))
self.instantaneousEL += np.matmul(self.activationsI, weightsIEL)
self.instantaneousER += np.matmul(self.activationsI, weightsIER)
self.instantaneousEL *= max((1 - self.velocityGain*v), 0)
self.instantaneousER *= max((1 + self.velocityGain*v), 0)
if iSpeedTuning:
self.instantaneousI *= min(self.velocityGain*np.abs(v), 1)
self.instantaneousI += self.constantTonicMagnitude
self.instantaneousEL += self.constantTonicMagnitude
self.instantaneousER += self.constantTonicMagnitude
if envelope:
self.instantaneousI *= self.envelopeI
self.instantaneousER *= self.envelopeE
self.instantaneousEL *= self.envelopeE
# Input must be positive.
np.maximum(self.instantaneousI, 0., self.instantaneousI)
np.maximum(self.instantaneousEL, 0., self.instantaneousEL)
np.maximum(self.instantaneousER, 0., self.instantaneousER)
# Activity decay and timestep adjustment
self.activationsI += (self.instantaneousI - self.activationsI/self.decayConstant)*self.dt
self.activationsEL += (self.instantaneousEL - self.activationsEL/self.decayConstant)*self.dt
self.activationsER += (self.instantaneousER - self.activationsER/self.decayConstant)*self.dt
# Finally, clip activations for stability
np.minimum(self.activationsI, self.clip, self.activationsI)
np.minimum(self.activationsEL, self.clip, self.activationsEL)
np.minimum(self.activationsER, self.clip, self.activationsER)
def decayWeights(self, decayConst=60):
"""
Decay the network's weights.
:param decayConst: The time constant (in seconds) to use for decay.
Note: If applied, decay must be used extremely carefully, as
it has a tendency to cause asymmetries in the network weights.
"""
self.weightsII -= self.weightsII*self.dt/decayConst
self.weightsELI -= self.weightsELI*self.dt/decayConst
self.weightsERI -= self.weightsERI*self.dt/decayConst
self.weightsIEL -= self.weightsIEL*self.dt/decayConst
self.weightsIER -= self.weightsIER*self.dt/decayConst
def learn(self,
runs,
dir=1,
periodic=False,
recurrent=True,
randomSpeed=False):
"""
Traverses a sinusoidal trajectory across the environment, learning during
the process. A pair of runs across the environment (one in each direction)
takes 10 seconds if in a periodic larger environment, and 4 seconds in a
smaller nonperiodic environment.
:param runs: How many runs across the environment to do. Each "run" is
defined as a full sweep across the environment in each direction.
:param dir: Which direction to move in first. Valid values are 1 and -1.
:param periodic: Whether or not the learning environment should be
periodic (toroidal).
:param recurrent: Whether or not recurrent connections should be active
during learning. Warning: True can lead to instability.
:param randomSpeed: Whether or not to use a random maximum speed for each
run, to better simulate real learning. Can degrade performance.
Only supported in periodic environments.
"""
# Set up plotting
if self.plotting:
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(411)
self.ax2 = self.fig.add_subplot(412)
self.ax3 = self.fig.add_subplot(212)
plt.ion()
plt.tight_layout()
self.ax3.set_xlabel("Inhibitory-Inhibitory connections")
self.fig.show()
self.fig.canvas.draw()
# Set up the trajectories and running times.
if not periodic:
time = 4.*runs
timings = [np.arange(0, time, self.dt)]
trajectories = [(np.sin(dir*(times*np.pi/2 - np.pi/2.))+1)/2]
else:
# Space the starting points of the runs out. This tends to improve the
# translation-invariance of the weight profiles, and thus gives better
# overall path integration.
startingPoint = 0
trajectories = []
timings = []
time = 0
residTime = 0
for run in xrange(runs):
if randomSpeed:
speed = np.random.random() + 0.5
else:
speed = 1.
length = 10./speed
runTimes = np.arange(0, length, self.dt)
trajectory = (np.sin(dir*(runTimes*np.pi/(5/speed) - np.pi/2.)) + 1)*\
2.5 + startingPoint
trajectories.append(trajectory)
timings.append(runTimes + time)
time += length
startingPoint += 1./runs
for trajectory, timing in zip(trajectories, timings):
self.activationsI = np.zeros(self.activationsI.shape)
self.activationsER = np.zeros(self.activationsER.shape)
self.activationsEL = np.zeros(self.activationsEL.shape)
velocities = np.diff(trajectory)/self.dt
for i, t in enumerate(timing[:-1]):
x = trajectory[i] % 1
v = velocities[i]
feedforwardInputI = np.exp(-1.*(self.placeCodeI - x)**2 /
(2*self.sigmaLoc**2))
feedforwardInputI *= self.placeGainI
feedforwardInputI += self.globalTonicMagnitude
feedforwardInputE = np.exp(-1.*(self.placeCodeE - x)**2 /
(2*self.sigmaLoc**2))
feedforwardInputE *= self.placeGainE
feedforwardInputE += self.globalTonicMagnitude
self.update(feedforwardInputI,
feedforwardInputE,
v,
recurrent=recurrent,
envelope=(not periodic),
iSpeedTuning=periodic,
enforceDale=True,
)
self.stdpUpdate(time=i)
if self.plotting:
residTime += self.dt
if residTime > PLOT_INTERVAL:
residTime -= PLOT_INTERVAL
self.ax3.matshow(self.weightsII, cmap=plt.cm.coolwarm)
self.plotActivation(position=x, time=t)
# Carry out any hanging STDP updates.
self.stdpUpdate(time=i, clearBuffer=True)
# Finally, enforce Dale's law. Inhibitory neurons must be inhibitory,
# excitatory neurons must be excitatory.
# This could be handled through update, but it's faster to do it here.
np.minimum(self.weightsII, 0, self.weightsII)
np.minimum(self.weightsIER, 0, self.weightsIER)
np.minimum(self.weightsIEL, 0, self.weightsIEL)
np.maximum(self.weightsELI, 0, self.weightsELI)
np.maximum(self.weightsERI, 0, self.weightsERI)
def normalize_weights(self, IIMax, IEMax, EIMax):
"""
Rescale our weight matrices to have a certain maximum absolute value.
:param IINorm: The target maximum for the II weights
:param IENorm: The target maximum for both IE weight matrices
:param EINorm: The target maximum for both EI weight matrices
"""
weights = [self.weightsII, self.weightsIEL, self.weightsIER,
self.weightsELI, self.weightsERI]
norms = [IIMax, IEMax, IEMax, EIMax, EIMax]
for w, n in zip(weights, norms):
maximum = np.amax(np.abs(w))
w /= maximum
w *= n
def computeEnvelope(self, placeCode):
"""
Compute an envelope for use in suppressing border cells.
:param placeCode: The place code representing the population the envelope
will be used for.
:return: A numpy array that can be elementwise-multiplied with activations
for the given cell population to apply the envelope.
"""
places = np.abs(placeCode - 0.5)
envelope = [1 if p < 1 - self.envelopeWidth else
np.exp(-1.*self.envelopeFactor *
((p - 1 + self.envelopeWidth)/self.envelopeWidth)**2)
for p in places]
return np.asarray(envelope)
def plotActivation(self, position=None, time=None, velocity=None):
"""
Plot the activation of the current cell populations. Assumes that
two axes have already been created, ax1 and ax2. If done in a Jupyter
notebook, this plotting will overwrite the old plot.
:param position: The current location of the animal
:param time: The current time in the simulation
:param velocity: The current velocity of the animal
"""
self.ax1.clear()
x = np.arange(0, len(self.activationsER), 1)
self.ax1.plot(x, self.activationsEL, color = "b", label = "EL Activation")
self.ax1.plot(x, self.activationsER, color = "r", label = "ER Activation")
if position is not None:
self.ax1.axvline(x=position*len(self.activationsER))
self.ax1.legend(loc = "best")
self.ax2.clear()
x = np.arange(0, len(self.activationsI), 1)
self.ax2.plot(x, self.activationsI, color = "k", label = "I Activation")
if position is not None:
self.ax2.axvline(x=position*len(self.activationsI))
self.ax2.legend(loc = "best")
titleString = ""
if time is not None:
titleString += "Time = {}".format(str(time))
if velocity is not None:
titleString += " Velocity = {}".format(str(velocity)[:4])
if position is not None:
titleString += " Position = {}".format(str(position)[:4])
self.ax1.set_title(titleString)
self.ax1.set_xlabel("Excitatory activity")
self.ax2.set_xlabel("Inhibitory activity")
self.fig.canvas.draw()
def stdpUpdate(self, time, clearBuffer=False):
"""
Adds the current activations to the tracking queue, and then performs an
STDP update if possible.
:param time: The current time. Must be provided.
:param clearBuffer: Set as True to clear the activation buffer.
This should be done at the end of training.
"""
if clearBuffer:
while len(self.activationBuffer) > 1:
baseI, baseEL, baseER, t = self.activationBuffer.popleft()
for (I, EL, ER, i) in self.activationBuffer:
t = 1. * (i - t) * self.dt
self.weightsII += self.stdpKernel(self.learningRate *\
self.learnFactorII *\
self.dt *\
baseI, I, t,
True, True)
self.weightsIEL += self.stdpKernel(self.learningRate *\
self.learnFactorIE *\
self.dt *\
baseI, EL, t,
True, False)
self.weightsIER += self.stdpKernel(self.learningRate *\
self.learnFactorIE *\
self.dt *\
baseI, ER, t,
True, False)
self.weightsELI += self.stdpKernel(self.learningRate *\
self.learnFactorEI *\
self.dt *\
baseEL, I, t,
False, True)
self.weightsERI += self.stdpKernel(self.learningRate *\
self.learnFactorEI *\
self.dt *\
baseER, I, t,
False, True)
else:
for I, EL, ER, i in reversed(self.activationBuffer):
t = (i - time) * self.dt
self.weightsII += self.stdpKernel(self.learningRate *\
self.learnFactorII *\
self.dt *\
self.instantaneousI, I, t,
True, True)
self.weightsIEL += self.stdpKernel(self.learningRate *\
self.learnFactorIE *\
self.dt *\
self.instantaneousI, EL, t,
True, False)
self.weightsIER += self.stdpKernel(self.learningRate *\
self.learnFactorIE *\
self.dt *\
self.instantaneousI, ER, t,
True, False)
self.weightsELI += self.stdpKernel(self.learningRate *\
self.learnFactorEI *\
self.dt *\
self.instantaneousEL, I, t,
False, True)
self.weightsERI += self.stdpKernel(self.learningRate *\
self.learnFactorEI *\
self.dt *\
self.instantaneousER, I, t,
False, True)
for I, EL, ER, i in self.activationBuffer:
t = (time - i) * self.dt
self.weightsII += self.stdpKernel(self.learningRate *\
self.learnFactorII *\
self.dt *\
I, self.instantaneousI, t,
True, True)
self.weightsIEL += self.stdpKernel(self.learningRate *\
self.learnFactorIE *\
self.dt *\
I, self.instantaneousEL, t,
True, False)
self.weightsIER += self.stdpKernel(self.learningRate *\
self.learnFactorIE *\
self.dt *\
I, self.instantaneousER, t,
True, False)
self.weightsELI += self.stdpKernel(self.learningRate *\
self.learnFactorEI *\
self.dt *\
EL, self.instantaneousI, t,
False, True)
self.weightsERI += self.stdpKernel(self.learningRate *\
self.learnFactorEI *\
self.dt *\
ER, self.instantaneousI, t,
False, True)
self.activationBuffer.append((np.copy(self.instantaneousI),
np.copy(self.instantaneousEL),
np.copy(self.instantaneousER), time))
|
agpl-3.0
|
numenta/htmresearch
|
projects/union_path_integration/plot_feature_distributions.py
|
4
|
5137
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Plot capacity trend charts."""
import argparse
from collections import defaultdict
import json
import math
import os
import itertools
import matplotlib.pyplot as plt
import matplotlib.lines
import numpy as np
import scipy.special
CWD = os.path.dirname(os.path.realpath(__file__))
CHART_DIR = os.path.join(CWD, "charts")
DETAILED_LABELS = False
def createTheChart(inFilename, outFilename, xlim2):
if not os.path.exists(CHART_DIR):
os.makedirs(CHART_DIR)
convergenceResultsByParams = defaultdict(lambda: defaultdict(list))
capacityResults = defaultdict(lambda: defaultdict(list))
with open(inFilename, "r") as f:
experiments = json.load(f)
for exp in experiments:
featureDistribution = exp[0]["featureDistribution"]
featuresPerObject = exp[0]["featuresPerObject"]
numObjects = exp[0]["numObjects"]
numUniqueFeatures = exp[0]["numFeatures"]
failed = exp[1]["convergence"].get("null", 0)
accuracy = 1.0 - (float(failed) / float(numObjects))
capacityResults[(featureDistribution, numUniqueFeatures, featuresPerObject)][numObjects].append(accuracy)
for occurrences, result in exp[1]["occurrencesConvergenceLog"]:
# k = np.median(occurrences)
k = min(occurrences)
convergenceResultsByParams[
(featureDistribution, numUniqueFeatures, featuresPerObject)][
k].append(result)
resultsByParams = {}
for params, convergenceResultsByMin in convergenceResultsByParams.iteritems():
resultsByParams[params] = sorted(
(sampleMinimum, float(sum(1 if r != None else 0
for r in results)) / len(results))
for sampleMinimum, results in convergenceResultsByMin.iteritems())
fig, (ax1, ax2) = plt.subplots(figsize=(6.75, 3), ncols=2, sharey=True,
tight_layout = {"pad": 0})
objectSets = [
("AllFeaturesEqual_Replacement", 100, 10, "o", 3),
("AllFeaturesEqual_Replacement", 40, 10, "o", 3),
("AllFeaturesEqual_Replacement", 100, 5, "o", 3),
("AllFeaturesEqual_NoReplacement", 100, 10, "^", 4),
("TwoPools_Replacement", 100, 10, "^", 4),
("TwoPools_Structured", 100, 10, "^", 4),
]
for i, (featureDistribution, numUniqueFeatures, featuresPerObject, marker, markerSize) in enumerate(objectSets):
if DETAILED_LABELS:
if featureDistribution == "AllFeaturesEqual_Replacement":
label = "{}, {} features, {} per object".format(featureDistribution, numUniqueFeatures, featuresPerObject)
else:
label = featureDistribution
else:
label = "Object set {}".format(i + 1)
resultsByNumObjects = capacityResults[(featureDistribution, numUniqueFeatures, featuresPerObject)]
expResults = sorted((numObjects, sum(results) / len(results))
for numObjects, results in resultsByNumObjects.iteritems())
x, y = zip(*expResults)
ax1.plot(x, y, "{}-".format(marker), label=label, markersize=markerSize)
results = resultsByParams[(featureDistribution, numUniqueFeatures, featuresPerObject)]
x, y = zip(*results)
ax2.plot(x, y, "{}-".format(marker), label=label, markersize=markerSize)
ax1.set_xlabel("Number of Learned Objects")
ax1.set_ylabel("Recognition Accuracy\nAfter Many Sensations")
ax2.set_xlabel("Number of Locations Recalled by\nObject's Rarest Feature")
if xlim2 is not None:
ax2.set_xlim([0, xlim2])
# If there's any opacity, when we export a copy of this from Illustrator, it
# creates a PDF that isn't compatible with Word.
framealpha = 1.0
ax2.legend(loc="upper right", framealpha=framealpha)
filePath = os.path.join(CHART_DIR, outFilename)
print "Saving", filePath
plt.savefig(filePath)
if __name__ == "__main__":
plt.rc("font",**{"family": "sans-serif",
"sans-serif": ["Arial"],
"size": 8})
parser = argparse.ArgumentParser()
parser.add_argument("--inFile", type=str, required=True)
parser.add_argument("--outFile", type=str, required=True)
parser.add_argument("--xlim2", type=float, default=None)
args = parser.parse_args()
createTheChart(args.inFile, args.outFile, args.xlim2)
|
agpl-3.0
|
jadsonjs/DataScience
|
MachineLearning/clustering-hierarchical-agglomerative.py
|
1
|
4277
|
#
# This program is distributed without any warranty and it
# can be freely redistributed for research, classes or private studies,
# since the copyright notices are not removed.
#
# This code Calculates the unsupervised model *** Hierarchical Agglemerative ***
#
# Jadson Santos - [email protected]
# base on: alexlimatds - https://github.com/alexlimatds/doctorate_machine_learning
#
# to run this exemple install pyhton modules:
#
# pip3 install scikit-learn
# pip3 install pandas
# pip3 install numpy
#
import pandas as pd
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import silhouette_samples, davies_bouldin_score, adjusted_rand_score
#
# DEFINE YOUR DATASET HERE, ALREADY PRE PROCESSED !
#
# read the CSV file with your data base and put into a Pandas DataFrame
# https://www.shanelynn.ie/using-pandas-dataframe-creating-editing-viewing-data-in-python/
#
df = pd.read_csv('/Users/jadson/tmp/WDBC_preprocessed.csv')
#Getting ground truth
gtruth = df['diagnosis']
# remove the class because is unsupervisor
df = df.drop(columns="diagnosis")
######################### Davies-Bouldin #########################
# keep the final indexes the will be save to a file
dbsIndexes = {}
# calc DB index using
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.davies_bouldin_score.html
def calcDBsIndexes(preds, k):
db = davies_bouldin_score(df, preds)
dbsIndexes[k].append(db)
# salve in a file the DB indexes
def printDBsIndexesToFile():
log_file = open('agglomerative-DBs.txt', 'w+')
log_file.write('k,DB_1\n')
for k in dbsIndexes.keys():
log_file.write('{},{}\n'.format(k, dbsIndexes[k][0]))
log_file.close()
######################### silhouettesIndexes #########################
# keep the final indexes the will be save to a file
silhouettesIndexes = {}
# calc silhouette index using
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_samples.html
def calcSilhouettesIndexes(preds, k):
# Compute the silhouette scores for each instance
sample_silhouette_values = silhouette_samples(df, preds)
#iterate over clusters numbers
clusters = np.unique(preds)
avg = 0
for c_i in clusters:
#getting silhouette of ith cluster
avg += sample_silhouette_values[preds == c_i].mean()
avg = avg / clusters.size
silhouettesIndexes[k].append(avg)
# salve in a file the silhouttes indexes
def printSilhouettesIndexesToFile():
log_file = open('agglomerative-silhouettesIndexes.txt', 'w+')
log_file.write('k,silhouette_1\n')
for k in silhouettesIndexes.keys():
v = ','.join(map(str, silhouettesIndexes[k]))
log_file.write('{},{}\n'.format(k, v))
log_file.close()
######################### Adjusted Rand #########################
# keep the final indexes the will be save to a file
crsIndexes = {}
# calc CR index using
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.adjusted_rand_score.html
def calcCRsIndexes(preds, k):
cr = adjusted_rand_score(gtruth, preds)
crsIndexes[k].append(cr)
# salve in a file the CR indexes
def printCRsIndexesToFile():
log_file = open('agglomerative-CRs.txt', 'w+')
log_file.write('k,CR_1\n')
for k in crsIndexes.keys():
log_file.write('{},{}\n'.format(k, crsIndexes[k][0]))
log_file.close()
######################### Experiment of Check Point 2 #########################
# As in k-means, experiments will be performed with the number of groups ranging from 2 to 20.
# Since this is a deterministic algorithm, it is necessary to perform only one execution of
# the algorithm by k, calculating in the sequence the same indexes discussed previously.
# Also, create the same graphs shown in section 2.1.1,
# and finally define the best number of groups for the three indexes.
for k in range(2, 21):
silhouettesIndexes[k] = []
dbsIndexes[k] = []
crsIndexes[k] = []
for i in range(0, k):
silhouettesIndexes[k].insert(i, [])
algorithm = AgglomerativeClustering(n_clusters=k, linkage='average')
predictions = algorithm.fit_predict(df)
calcSilhouettesIndexes(predictions, k)
calcDBsIndexes(predictions, k)
calcCRsIndexes(predictions, k)
printSilhouettesIndexesToFile()
printDBsIndexesToFile()
printCRsIndexesToFile()
|
apache-2.0
|
loli/semisupervisedforests
|
sklearn/preprocessing/label.py
|
8
|
28313
|
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The multilabel_ attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The indicator_matrix_ attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute indicator_matrix_ is deprecated and will be "
"removed in 0.17. Use 'y_type_ == 'multilabel-indicator'' "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute multilabel_ is deprecated and will be removed "
"in 0.17. Use 'y_type_.startswith('multilabel')' "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.argsort(classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i+1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
|
bsd-3-clause
|
surhudm/scipy
|
scipy/signal/filter_design.py
|
6
|
122824
|
"""Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray, allclose,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array)
from numpy import mintypecode
import numpy as np
from scipy import special, optimize
from scipy.special import comb
from scipy.misc import factorial
from numpy.polynomial.polynomial import polyval as npp_polyval
import math
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
'sosfreqz']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ---- = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Parameters
----------
b : array_like
numerator of a linear filter
a : array_like
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
sosfreqz
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def sosfreqz(sos, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.sosfreqz(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... ['$-\pi$', '$-\pi/2$', '0', '$\pi/2$', '$\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for row in sos:
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole)
h *= rowh
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _align_nums(nums):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2d
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = asarray(nums)
if not np.issubdtype(nums.dtype, np.number):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max(num.size for num in nums)
# pre-allocate
aligned_nums = np.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2d array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1d.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1d
array. A 2d-array if the input `num` is a 2d array.
den: 1d-array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
"""
num, den = b, a
den = np.atleast_1d(den)
num = np.atleast_2d(_align_nums(num))
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if np.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for col in num.T:
if np.allclose(col, 0, atol=1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase'):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Magnitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_'+norm)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
# TODO: Make this a real public function scipy.misc.ff
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498 , and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2*n - k, n)
den = 2**(n - k) * factorial(k, exact=True)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return asarray([-1+0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n+1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j*y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1/(x[k] - x[k+1:]))
beta[k] += np.sum(1/(x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N+0.5, 1/x)
# First derivative of above
def fp(x):
return (special.kve(N-0.5, 1/x)/(2*x**2) -
special.kve(N+0.5, 1/x)/(x**2) +
special.kve(N+1.5, 1/x)/(2*x**2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = asarray(p, dtype=complex)
def G(w):
"""
Gain of filter
"""
return abs(k / prod(1j*w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1/np.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase'):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g. rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g. 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
Returns
-------
z : ndarray
Zeros of the transfer function. Is always an empty array.
p : ndarray
Poles of the transfer function.
k : scalar
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", arXiv:1105.0957 [math-ph],
http://arxiv.org/abs/1105.0957
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
DOI:10.1145/363067.363115
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998, http://www.rane.com/note147.html
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1/_bessel_zeros(N)
a_last = _falling_factorial(2*N, N) // 2**N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor**-N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10**(-math.log10(a_last)/N)
k = 1
else:
raise ValueError('normalization not understood')
return asarray([]), asarray(p, dtype=complex), float(k)
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
|
bsd-3-clause
|
florian-f/sklearn
|
sklearn/gaussian_process/tests/test_gaussian_process.py
|
7
|
3344
|
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# License: BSD style
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a one-dimensional Gaussian Process model.
Check random start optimization.
Test the interpolating property.
"""
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a two-dimensional Gaussian Process model accounting for
anisotropy. Check random start optimization.
Test the interpolating property.
"""
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
"""
Repeat test_1d and test_2d for several built-in correlation
models specified as strings.
"""
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
"""
Repeat test_1d and test_2d with given regression weights (beta0) for
different regression models (Ordinary Kriging).
"""
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
|
bsd-3-clause
|
NicovincX2/Python-3.5
|
Analyse (mathématiques)/Analyse à plusieurs variables/Équation aux dérivées partielles/Système à réaction-diffusion/turing.py
|
1
|
1289
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
a = 2.8e-4
b = 5e-3
tau = .1
k = -.005
size = 100 # size of the 2D grid
dx = 2. / size # space step
T = 10.0 # total time
dt = .9 * dx**2 / 2 # time step
n = int(T / dt)
U = np.random.rand(size, size)
V = np.random.rand(size, size)
def laplacian(Z):
Ztop = Z[0:-2, 1:-1]
Zleft = Z[1:-1, 0:-2]
Zbottom = Z[2:, 1:-1]
Zright = Z[1:-1, 2:]
Zcenter = Z[1:-1, 1:-1]
return (Ztop + Zleft + Zbottom + Zright - 4 * Zcenter) / dx**2
# We simulate the PDE with the finite difference method.
for i in range(n):
# We compute the Laplacian of u and v.
deltaU = laplacian(U)
deltaV = laplacian(V)
# We take the values of u and v inside the grid.
Uc = U[1:-1, 1:-1]
Vc = V[1:-1, 1:-1]
# We update the variables.
U[1:-1, 1:-1], V[1:-1, 1:-1] = \
Uc + dt * (a * deltaU + Uc - Uc**3 - Vc + k), \
Vc + dt * (b * deltaV + Uc - Vc) / tau
# Neumann conditions: derivatives at the edges
# are null.
for Z in (U, V):
Z[0, :] = Z[1, :]
Z[-1, :] = Z[-2, :]
Z[:, 0] = Z[:, 1]
Z[:, -1] = Z[:, -2]
plt.imshow(U, cmap=plt.cm.copper, extent=[-1, 1, -1, 1])
plt.xticks([])
plt.yticks([])
os.system("pause")
|
gpl-3.0
|
zorojean/tushare
|
tushare/stock/trading.py
|
14
|
21568
|
# -*- coding:utf-8 -*-
"""
交易数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from __future__ import division
import time
import json
import lxml.html
from lxml import etree
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
import re
from pandas.compat import StringIO
from tushare.util import dateu as du
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_hist_data(code=None, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
获取个股历史交易记录
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取到API所提供的最早日期数据
end:string
结束日期 format:YYYY-MM-DD 为空时取到最近一个交易日数据
ktype:string
数据类型,D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟,默认为D
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
属性:日期 ,开盘价, 最高价, 收盘价, 最低价, 成交量, 价格变动 ,涨跌幅,5日均价,10日均价,20日均价,5日均量,10日均量,20日均量,换手率
"""
symbol = _code_to_symbol(code)
url = ''
if ktype.upper() in ct.K_LABELS:
url = ct.DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
ct.K_TYPE[ktype.upper()], symbol)
elif ktype in ct.K_MIN_LABELS:
url = ct.DAY_PRICE_MIN_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
symbol, ktype)
else:
raise TypeError('ktype input error.')
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
cols = []
if (code in ct.INDEX_LABELS) & (ktype.upper() in ct.K_LABELS):
cols = ct.INX_DAY_PRICE_COLUMNS
else:
cols = ct.DAY_PRICE_COLUMNS
if len(js['record'][0]) == 14:
cols = ct.INX_DAY_PRICE_COLUMNS
df = pd.DataFrame(js['record'], columns=cols)
if ktype.upper() in ['D', 'W', 'M']:
df = df.applymap(lambda x: x.replace(u',', u''))
for col in cols[1:]:
df[col] = df[col].astype(float)
if start is not None:
df = df[df.date >= start]
if end is not None:
df = df[df.date <= end]
if (code in ct.INDEX_LABELS) & (ktype in ct.K_MIN_LABELS):
df = df.drop('turnover', axis=1)
df = df.set_index('date')
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _parsing_dayprice_json(pageNum=1):
"""
处理当日行情分页数据,格式为json
Parameters
------
pageNum:页码
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
"""
ct._write_console()
request = Request(ct.SINA_DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], pageNum))
text = urlopen(request, timeout=10).read()
if text == 'null':
return None
reg = re.compile(r'\,(.*?)\:')
text = reg.sub(r',"\1":', text.decode('gbk') if ct.PY3 else text)
text = text.replace('"{symbol', '{"symbol')
text = text.replace('{symbol', '{"symbol"')
if ct.PY3:
jstr = json.dumps(text)
else:
jstr = json.dumps(text, encoding='GBK')
js = json.loads(jstr)
df = pd.DataFrame(pd.read_json(js, dtype={'code':object}),
columns=ct.DAY_TRADING_COLUMNS)
df = df.drop('symbol', axis=1)
df = df.ix[df.volume > 0]
return df
def get_tick_data(code=None, date=None, retry_count=3, pause=0.001):
"""
获取分笔数据
Parameters
------
code:string
股票代码 e.g. 600848
date:string
日期 format:YYYY-MM-DD
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 or date is None:
return None
symbol = _code_to_symbol(code)
for _ in range(retry_count):
time.sleep(pause)
try:
re = Request(ct.TICK_PRICE_URL % (ct.P_TYPE['http'], ct.DOMAINS['sf'], ct.PAGES['dl'],
date, symbol))
lines = urlopen(re, timeout=10).read()
lines = lines.decode('GBK')
if len(lines) < 100:
return None
df = pd.read_table(StringIO(lines), names=ct.TICK_COLUMNS,
skiprows=[0])
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_ticks(code=None, retry_count=3, pause=0.001):
"""
获取当日分笔明细数据
Parameters
------
code:string
股票代码 e.g. 600848
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 :
return None
symbol = _code_to_symbol(code)
date = du.today()
try:
request = Request(ct.TODAY_TICKS_PAGE_URL % (ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], date,
symbol))
data_str = urlopen(request, timeout=10).read()
data_str = data_str.decode('GBK')
data_str = data_str[1:-1]
data_str = eval(data_str, type('Dummy', (dict,),
dict(__getitem__ = lambda s, n:n))())
data_str = json.dumps(data_str)
data_str = json.loads(data_str)
pages = len(data_str['detailPages'])
data = pd.DataFrame()
ct._write_head()
for pNo in range(1, pages):
data = data.append(_today_ticks(symbol, date, pNo,
retry_count, pause), ignore_index=True)
except Exception as er:
print(str(er))
return data
def _today_ticks(symbol, tdate, pageNo, retry_count, pause):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
html = lxml.html.parse(ct.TODAY_TICKS_URL % (ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['t_ticks'],
symbol, tdate, pageNo
))
res = html.xpath('//table[@id=\"datatbl\"]/tbody/tr')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
sarr = sarr.replace('--', '0')
df = pd.read_html(StringIO(sarr), parse_dates=False)[0]
df.columns = ct.TODAY_TICK_COLUMNS
df['pchange'] = df['pchange'].map(lambda x : x.replace('%', ''))
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_all():
"""
一次性获取最近一个日交易日所有股票的交易数据
return
-------
DataFrame
属性:代码,名称,涨跌幅,现价,开盘价,最高价,最低价,最日收盘价,成交量,换手率
"""
ct._write_head()
df = _parsing_dayprice_json(1)
if df is not None:
for i in range(2, ct.PAGE_NUM[0]):
newdf = _parsing_dayprice_json(i)
df = df.append(newdf, ignore_index=True)
return df
def get_realtime_quotes(symbols=None):
"""
获取实时交易数据 getting real time quotes data
用于跟踪交易情况(本次执行的结果-上一次执行的数据)
Parameters
------
symbols : string, array-like object (list, tuple, Series).
return
-------
DataFrame 实时交易数据
属性:0:name,股票名字
1:open,今日开盘价
2:pre_close,昨日收盘价
3:price,当前价格
4:high,今日最高价
5:low,今日最低价
6:bid,竞买价,即“买一”报价
7:ask,竞卖价,即“卖一”报价
8:volumn,成交量 maybe you need do volumn/100
9:amount,成交金额(元 CNY)
10:b1_v,委买一(笔数 bid volume)
11:b1_p,委买一(价格 bid price)
12:b2_v,“买二”
13:b2_p,“买二”
14:b3_v,“买三”
15:b3_p,“买三”
16:b4_v,“买四”
17:b4_p,“买四”
18:b5_v,“买五”
19:b5_p,“买五”
20:a1_v,委卖一(笔数 ask volume)
21:a1_p,委卖一(价格 ask price)
...
30:date,日期;
31:time,时间;
"""
symbols_list = ''
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for code in symbols:
symbols_list += _code_to_symbol(code) + ','
else:
symbols_list = _code_to_symbol(symbols)
symbols_list = symbols_list[:-1] if len(symbols_list) > 8 else symbols_list
request = Request(ct.LIVE_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['sinahq'],
_random(), symbols_list))
text = urlopen(request,timeout=10).read()
text = text.decode('GBK')
reg = re.compile(r'\="(.*?)\";')
data = reg.findall(text)
regSym = re.compile(r'(?:sh|sz)(.*?)\=')
syms = regSym.findall(text)
data_list = []
syms_list = []
for index, row in enumerate(data):
if len(row)>1:
data_list.append([astr for astr in row.split(',')])
syms_list.append(syms[index])
if len(syms_list) == 0:
return None
df = pd.DataFrame(data_list, columns=ct.LIVE_DATA_COLS)
df = df.drop('s', axis=1)
df['code'] = syms_list
ls = [cls for cls in df.columns if '_v' in cls]
for txt in ls:
df[txt] = df[txt].map(lambda x : x[:-2])
return df
def get_h_data(code, start=None, end=None, autype='qfq',
index=False, retry_count=3, pause=0.001):
'''
获取历史复权数据
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取当前日期
end:string
结束日期 format:YYYY-MM-DD 为空时取去年今日
autype:string
复权类型,qfq-前复权 hfq-后复权 None-不复权,默认为qfq
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
date 交易日期 (index)
open 开盘价
high 最高价
close 收盘价
low 最低价
volume 成交量
amount 成交金额
'''
start = du.today_last_year() if start is None else start
end = du.today() if end is None else end
qs = du.get_quarts(start, end)
qt = qs[0]
ct._write_head()
data = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
if len(qs)>1:
for d in range(1, len(qs)):
qt = qs[d]
ct._write_console()
df = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
data = data.append(df, ignore_index=True)
if len(data) == 0 or len(data[(data.date>=start)&(data.date<=end)]) == 0:
return None
data = data.drop_duplicates('date')
if index:
data = data[(data.date>=start) & (data.date<=end)]
data = data.set_index('date')
data = data.sort_index(ascending=False)
return data
if autype == 'hfq':
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
if autype == 'qfq':
data = data.drop('factor', axis=1)
df = _parase_fq_factor(code, start, end)
df = df.drop_duplicates('date')
df = df.sort('date', ascending=False)
frow = df.head(1)
rt = get_realtime_quotes(code)
if rt is None:
return None
if ((float(rt['high']) == 0) & (float(rt['low']) == 0)):
preClose = float(rt['pre_close'])
else:
if du.is_holiday(du.today()):
preClose = float(rt['price'])
else:
print(du.get_hour())
print((du.get_hour() > 9) & (du.get_hour() < 18) )
if (du.get_hour() > 9) & (du.get_hour() < 18):
preClose = float(rt['pre_close'])
else:
preClose = float(rt['price'])
rate = float(frow['factor']) / preClose
data = data[(data.date >= start) & (data.date <= end)]
for label in ['open', 'high', 'low', 'close']:
data[label] = data[label] / rate
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label] / data['factor']
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data = data.set_index('date')
data = data.sort_index(ascending=False)
data = data.astype(float)
return data
def _parase_fq_factor(code, start, end):
symbol = _code_to_symbol(code)
request = Request(ct.HIST_FQ_FACTOR_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], symbol))
text = urlopen(request, timeout=10).read()
text = text[1:len(text)-1]
text = text.decode('utf-8') if ct.PY3 else text
text = text.replace('{_', '{"')
text = text.replace('total', '"total"')
text = text.replace('data', '"data"')
text = text.replace(':"', '":"')
text = text.replace('",_', '","')
text = text.replace('_', '-')
text = json.loads(text)
df = pd.DataFrame({'date':list(text['data'].keys()), 'factor':list(text['data'].values())})
df['date'] = df['date'].map(_fun_except) # for null case
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
df['factor'] = df['factor'].astype(float)
return df
def _fun_except(x):
if len(x) > 10:
return x[-10:]
else:
return x
def _parse_fq_data(url, index, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath('//table[@id=\"FundHoldSharesTable\"]')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr, skiprows = [0, 1])[0]
if len(df) == 0:
return pd.DataFrame()
if index:
df.columns = ct.HIST_FQ_COLS[0:7]
else:
df.columns = ct.HIST_FQ_COLS
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_index():
"""
获取大盘指数行情
return
-------
DataFrame
code:指数代码
name:指数名称
change:涨跌幅
open:开盘价
preclose:昨日收盘价
close:收盘价
high:最高价
low:最低价
volume:成交量(手)
amount:成交金额(亿元)
"""
request = Request(ct.INDEX_HQ_URL%(ct.P_TYPE['http'],
ct.DOMAINS['sinahq']))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('var hq_str_sh', '').replace('var hq_str_sz', '')
text = text.replace('";', '').replace('"', '').replace('=', ',')
text = '%s%s'%(ct.INDEX_HEADER, text)
df = pd.read_csv(StringIO(text), sep=',', thousands=',')
df['change'] = (df['close'] / df['preclose'] - 1 ) * 100
df['amount'] = df['amount'] / 100000000
df['change'] = df['change'].map(ct.FORMAT)
df['amount'] = df['amount'].map(ct.FORMAT)
df = df[ct.INDEX_COLS]
df['code'] = df['code'].map(lambda x:str(x).zfill(6))
df['change'] = df['change'].astype(float)
df['amount'] = df['amount'].astype(float)
return df
def _get_index_url(index, code, qt):
if index:
url = ct.HIST_INDEX_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
else:
url = ct.HIST_FQ_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
return url
def get_hists(symbols, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口
"""
df = pd.DataFrame()
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for symbol in symbols:
data = get_hist_data(symbol, start=start, end=end,
ktype=ktype, retry_count=retry_count,
pause=pause)
data['code'] = symbol
df = df.append(data, ignore_index=True)
return df
else:
return None
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
def _code_to_symbol(code):
"""
生成symbol代码标志
"""
if code in ct.INDEX_LABELS:
return ct.INDEX_LIST[code]
else:
if len(code) != 6 :
return ''
else:
return 'sh%s'%code if code[:1] in ['5', '6'] else 'sz%s'%code
|
bsd-3-clause
|
nsat/gnuradio
|
gr-filter/examples/synth_to_chan.py
|
18
|
3875
|
#!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
fmtx = list()
for fi in freqs:
s = analog.sig_source_f(fs, analog.GR_SIN_WAVE, fi, 1)
fm = analog.nbfm_tx(fs, 4*fs, max_dev=10000, tau=75e-6, fh=0.925*(4*fs)/2.0)
sigs.append(s)
fmtx.append(fm)
syntaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Synthesis Num. Taps = %d (taps per filter = %d)" % (len(syntaps),
len(syntaps)/nchans)
chtaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Channelizer Num. Taps = %d (taps per filter = %d)" % (len(chtaps),
len(chtaps)/nchans)
filtbank = filter.pfb_synthesizer_ccf(nchans, syntaps)
channelizer = filter.pfb.channelizer_ccf(nchans, chtaps)
noise_level = 0.01
head = blocks.head(gr.sizeof_gr_complex, N)
noise = analog.noise_source_c(analog.GR_GAUSSIAN, noise_level)
addnoise = blocks.add_cc()
snk_synth = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(noise, (addnoise,0))
tb.connect(filtbank, head, (addnoise, 1))
tb.connect(addnoise, channelizer)
tb.connect(addnoise, snk_synth)
snk = list()
for i,si in enumerate(sigs):
tb.connect(si, fmtx[i], (filtbank, i))
for i in xrange(nchans):
snk.append(blocks.vector_sink_c())
tb.connect((channelizer, i), snk[i])
tb.run()
if 1:
channel = 1
data = snk[channel].data()[1000:]
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(data[10000:10200] )
s1.set_title(("Output Signal from Channel %d" % channel))
fftlen = 2048
winfunc = scipy.blackman
#winfunc = scipy.hamming
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.psd(data, NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s2.set_title(("Output PSD from Channel %d" % channel))
f3 = pylab.figure(3)
s3 = f3.add_subplot(1,1,1)
s3.psd(snk_synth.data()[1000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s3.set_title("Output of Synthesis Filter")
pylab.show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
olafhauk/mne-python
|
examples/decoding/decoding_rsa.py
|
15
|
6873
|
"""
.. _ex-rsa-noplot:
====================================
Representational Similarity Analysis
====================================
Representational Similarity Analysis is used to perform summary statistics
on supervised classifications where the number of classes is relatively high.
It consists in characterizing the structure of the confusion matrix to infer
the similarity between brain responses and serves as a proxy for characterizing
the space of mental representations
:footcite:`Shepard1980,LaaksoCottrell2000,KriegeskorteEtAl2008`.
In this example, we perform RSA on responses to 24 object images (among
a list of 92 images). Subjects were presented with images of human, animal
and inanimate objects :footcite:`CichyEtAl2014`. Here we use the 24 unique
images of faces and body parts.
.. note:: this example will download a very large (~6GB) file, so we will not
build the images below.
"""
# Authors: Jean-Remi King <[email protected]>
# Jaakko Leppakangas <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from pandas import read_csv
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.manifold import MDS
import mne
from mne.io import read_raw_fif, concatenate_raws
from mne.datasets import visual_92_categories
print(__doc__)
data_path = visual_92_categories.data_path()
# Define stimulus - trigger mapping
fname = op.join(data_path, 'visual_stimuli.csv')
conds = read_csv(fname)
print(conds.head(5))
##############################################################################
# Let's restrict the number of conditions to speed up computation
max_trigger = 24
conds = conds[:max_trigger] # take only the first 24 rows
##############################################################################
# Define stimulus - trigger mapping
conditions = []
for c in conds.values:
cond_tags = list(c[:2])
cond_tags += [('not-' if i == 0 else '') + conds.columns[k]
for k, i in enumerate(c[2:], 2)]
conditions.append('/'.join(map(str, cond_tags)))
print(conditions[:10])
##############################################################################
# Let's make the event_id dictionary
event_id = dict(zip(conditions, conds.trigger + 1))
event_id['0/human bodypart/human/not-face/animal/natural']
##############################################################################
# Read MEG data
n_runs = 4 # 4 for full data (use less to speed up computations)
fname = op.join(data_path, 'sample_subject_%i_tsss_mc.fif')
raws = [read_raw_fif(fname % block, verbose='error')
for block in range(n_runs)] # ignore filename warnings
raw = concatenate_raws(raws)
events = mne.find_events(raw, min_duration=.002)
events = events[events[:, 2] <= max_trigger]
##############################################################################
# Epoch data
picks = mne.pick_types(raw.info, meg=True)
epochs = mne.Epochs(raw, events=events, event_id=event_id, baseline=None,
picks=picks, tmin=-.1, tmax=.500, preload=True)
##############################################################################
# Let's plot some conditions
epochs['face'].average().plot()
epochs['not-face'].average().plot()
##############################################################################
# Representational Similarity Analysis (RSA) is a neuroimaging-specific
# appelation to refer to statistics applied to the confusion matrix
# also referred to as the representational dissimilarity matrices (RDM).
#
# Compared to the approach from Cichy et al. we'll use a multiclass
# classifier (Multinomial Logistic Regression) while the paper uses
# all pairwise binary classification task to make the RDM.
# Also we use here the ROC-AUC as performance metric while the
# paper uses accuracy. Finally here for the sake of time we use
# RSA on a window of data while Cichy et al. did it for all time
# instants separately.
# Classify using the average signal in the window 50ms to 300ms
# to focus the classifier on the time interval with best SNR.
clf = make_pipeline(StandardScaler(),
LogisticRegression(C=1, solver='liblinear',
multi_class='auto'))
X = epochs.copy().crop(0.05, 0.3).get_data().mean(axis=2)
y = epochs.events[:, 2]
classes = set(y)
cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)
# Compute confusion matrix for each cross-validation fold
y_pred = np.zeros((len(y), len(classes)))
for train, test in cv.split(X, y):
# Fit
clf.fit(X[train], y[train])
# Probabilistic prediction (necessary for ROC-AUC scoring metric)
y_pred[test] = clf.predict_proba(X[test])
##############################################################################
# Compute confusion matrix using ROC-AUC
confusion = np.zeros((len(classes), len(classes)))
for ii, train_class in enumerate(classes):
for jj in range(ii, len(classes)):
confusion[ii, jj] = roc_auc_score(y == train_class, y_pred[:, jj])
confusion[jj, ii] = confusion[ii, jj]
##############################################################################
# Plot
labels = [''] * 5 + ['face'] + [''] * 11 + ['bodypart'] + [''] * 6
fig, ax = plt.subplots(1)
im = ax.matshow(confusion, cmap='RdBu_r', clim=[0.3, 0.7])
ax.set_yticks(range(len(classes)))
ax.set_yticklabels(labels)
ax.set_xticks(range(len(classes)))
ax.set_xticklabels(labels, rotation=40, ha='left')
ax.axhline(11.5, color='k')
ax.axvline(11.5, color='k')
plt.colorbar(im)
plt.tight_layout()
plt.show()
##############################################################################
# Confusion matrix related to mental representations have been historically
# summarized with dimensionality reduction using multi-dimensional scaling [1].
# See how the face samples cluster together.
fig, ax = plt.subplots(1)
mds = MDS(2, random_state=0, dissimilarity='precomputed')
chance = 0.5
summary = mds.fit_transform(chance - confusion)
cmap = plt.get_cmap('rainbow')
colors = ['r', 'b']
names = list(conds['condition'].values)
for color, name in zip(colors, set(names)):
sel = np.where([this_name == name for this_name in names])[0]
size = 500 if name == 'human face' else 100
ax.scatter(summary[sel, 0], summary[sel, 1], s=size,
facecolors=color, label=name, edgecolors='k')
ax.axis('off')
ax.legend(loc='lower right', scatterpoints=1, ncol=2)
plt.tight_layout()
plt.show()
##############################################################################
# References
# ----------
# .. footbibliography::
|
bsd-3-clause
|
frank-tancf/scikit-learn
|
examples/neighbors/plot_classification.py
|
287
|
1790
|
"""
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
|
bsd-3-clause
|
JsNoNo/scikit-learn
|
sklearn/preprocessing/tests/test_label.py
|
156
|
17626
|
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
|
bsd-3-clause
|
moutai/scikit-learn
|
examples/ensemble/plot_ensemble_oob.py
|
259
|
3265
|
"""
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
|
bsd-3-clause
|
caidongyun/BuildingMachineLearningSystemsWithPython
|
ch03/noise_analysis.py
|
24
|
2412
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import sklearn.datasets
groups = [
'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware', 'comp.windows.x', 'sci.space']
train_data = sklearn.datasets.fetch_20newsgroups(subset="train",
categories=groups)
labels = train_data.target
num_clusters = 50 # sp.unique(labels).shape[0]
import nltk.stem
english_stemmer = nltk.stem.SnowballStemmer('english')
from sklearn.feature_extraction.text import TfidfVectorizer
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))
vectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5,
stop_words='english', decode_error='ignore'
)
vectorized = vectorizer.fit_transform(train_data.data)
post_group = zip(train_data.data, train_data.target)
# Create a list of tuples that can be sorted by
# the length of the posts
all = [(len(post[0]), post[0], train_data.target_names[post[1]])
for post in post_group]
graphics = sorted([post for post in all if post[2] == 'comp.graphics'])
print(graphics[5])
# (245, 'From: [email protected]\nSubject: test....(sorry)\nOrganization:
# The University of Birmingham, United Kingdom\nLines: 1\nNNTP-Posting-Host: ibm3090.bham.ac.uk
# \n\n==============================================================================\n',
# 'comp.graphics')
noise_post = graphics[5][1]
analyzer = vectorizer.build_analyzer()
print(list(analyzer(noise_post)))
useful = set(analyzer(noise_post)).intersection(vectorizer.get_feature_names())
print(sorted(useful))
# ['ac', 'birmingham', 'host', 'kingdom', 'nntp', 'sorri', 'test', 'uk', 'unit', 'univers']
for term in sorted(useful):
print('IDF(%s)=%.2f' % (term,
vectorizer._tfidf.idf_[vectorizer.vocabulary_[term]]))
# IDF(ac)=3.51
# IDF(birmingham)=6.77
# IDF(host)=1.74
# IDF(kingdom)=6.68
# IDF(nntp)=1.77
# IDF(sorri)=4.14
# IDF(test)=3.83
# IDF(uk)=3.70
# IDF(unit)=4.42
# IDF(univers)=1.91
|
mit
|
HyukjinKwon/spark
|
python/pyspark/pandas/tests/plot/test_series_plot_matplotlib.py
|
14
|
13615
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
from distutils.version import LooseVersion
from io import BytesIO
import unittest
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.testing.pandasutils import (
have_matplotlib,
matplotlib_requirement_message,
PandasOnSparkTestCase,
TestUtils,
)
if have_matplotlib:
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use("agg")
@unittest.skipIf(not have_matplotlib, matplotlib_requirement_message)
class SeriesPlotMatplotlibTest(PandasOnSparkTestCase, TestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pd.set_option("plotting.backend", "matplotlib")
set_option("plotting.backend", "matplotlib")
set_option("plotting.max_rows", 1000)
@classmethod
def tearDownClass(cls):
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pd.reset_option("plotting.backend")
reset_option("plotting.backend")
reset_option("plotting.max_rows")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@property
def psdf2(self):
return ps.range(1002)
@property
def pdf2(self):
return self.psdf2.to_pandas()
@staticmethod
def plot_to_base64(ax):
bytes_data = BytesIO()
ax.figure.savefig(bytes_data, format="png")
bytes_data.seek(0)
b64_data = base64.b64encode(bytes_data.read())
plt.close(ax.figure)
return b64_data
def test_bar_plot(self):
pdf = self.pdf1
psdf = self.psdf1
ax1 = pdf["a"].plot(kind="bar", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot(kind="bar", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["a"].plot(kind="bar", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot(kind="bar", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_bar_plot_limited(self):
pdf = self.pdf2
psdf = self.psdf2
_, ax1 = plt.subplots(1, 1)
ax1 = pdf["id"][:1000].plot.bar(colormap="Paired")
ax1.text(
1,
1,
"showing top 1000 elements only",
size=6,
ha="right",
va="bottom",
transform=ax1.transAxes,
)
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["id"].plot.bar(colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_pie_plot(self):
pdf = self.pdf1
psdf = self.psdf1
ax1 = pdf["a"].plot.pie(colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot.pie(colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["a"].plot(kind="pie", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot(kind="pie", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_pie_plot_limited(self):
pdf = self.pdf2
psdf = self.psdf2
_, ax1 = plt.subplots(1, 1)
ax1 = pdf["id"][:1000].plot.pie(colormap="Paired")
ax1.text(
1,
1,
"showing top 1000 elements only",
size=6,
ha="right",
va="bottom",
transform=ax1.transAxes,
)
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["id"].plot.pie(colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_line_plot(self):
pdf = self.pdf1
psdf = self.psdf1
ax1 = pdf["a"].plot(kind="line", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot(kind="line", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["a"].plot.line(colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot.line(colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_barh_plot(self):
pdf = self.pdf1
psdf = self.psdf1
ax1 = pdf["a"].plot(kind="barh", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot(kind="barh", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_barh_plot_limited(self):
pdf = self.pdf2
psdf = self.psdf2
_, ax1 = plt.subplots(1, 1)
ax1 = pdf["id"][:1000].plot.barh(colormap="Paired")
ax1.text(
1,
1,
"showing top 1000 elements only",
size=6,
ha="right",
va="bottom",
transform=ax1.transAxes,
)
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["id"].plot.barh(colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_hist(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
psdf = ps.from_pandas(pdf)
def plot_to_base64(ax):
bytes_data = BytesIO()
ax.figure.savefig(bytes_data, format="png")
bytes_data.seek(0)
b64_data = base64.b64encode(bytes_data.read())
plt.close(ax.figure)
return b64_data
_, ax1 = plt.subplots(1, 1)
# Using plot.hist() because pandas changes ticks props when called hist()
ax1 = pdf["a"].plot.hist()
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["a"].hist()
self.assert_eq(plot_to_base64(ax1), plot_to_base64(ax2))
def test_hist_plot(self):
pdf = self.pdf1
psdf = self.psdf1
_, ax1 = plt.subplots(1, 1)
ax1 = pdf["a"].plot.hist()
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["a"].plot.hist()
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["a"].plot.hist(bins=15)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot.hist(bins=15)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["a"].plot(kind="hist", bins=15)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot(kind="hist", bins=15)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["a"].plot.hist(bins=3, bottom=[2, 1, 3])
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["a"].plot.hist(bins=3, bottom=[2, 1, 3])
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_area_plot(self):
pdf = pd.DataFrame(
{
"sales": [3, 2, 3, 9, 10, 6],
"signups": [5, 5, 6, 12, 14, 13],
"visits": [20, 42, 28, 62, 81, 50],
},
index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="M"),
)
psdf = ps.from_pandas(pdf)
ax1 = pdf["sales"].plot(kind="area", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["sales"].plot(kind="area", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf["sales"].plot.area(colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf["sales"].plot.area(colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# just a sanity check for df.col type
ax1 = pdf.sales.plot(kind="area", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.sales.plot(kind="area", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
def test_box_plot(self):
def check_box_plot(pser, psser, *args, **kwargs):
_, ax1 = plt.subplots(1, 1)
ax1 = pser.plot.box(*args, **kwargs)
_, ax2 = plt.subplots(1, 1)
ax2 = psser.plot.box(*args, **kwargs)
diffs = [
np.array([0, 0.5, 0, 0.5, 0, -0.5, 0, -0.5, 0, 0.5]),
np.array([0, 0.5, 0, 0]),
np.array([0, -0.5, 0, 0]),
]
try:
for i, (line1, line2) in enumerate(zip(ax1.get_lines(), ax2.get_lines())):
expected = line1.get_xydata().ravel()
actual = line2.get_xydata().ravel()
if i < 3:
actual += diffs[i]
self.assert_eq(pd.Series(expected), pd.Series(actual))
finally:
ax1.cla()
ax2.cla()
# Non-named Series
pser = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50], [0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10])
psser = ps.from_pandas(pser)
spec = [(self.pdf1.a, self.psdf1.a), (pser, psser)]
for p, k in spec:
check_box_plot(p, k)
check_box_plot(p, k, showfliers=True)
check_box_plot(p, k, sym="")
check_box_plot(p, k, sym=".", color="r")
check_box_plot(p, k, use_index=False, labels=["Test"])
check_box_plot(p, k, usermedians=[2.0])
check_box_plot(p, k, conf_intervals=[(1.0, 3.0)])
val = (1, 3)
self.assertRaises(
ValueError, lambda: check_box_plot(self.pdf1, self.psdf1, usermedians=[2.0, 3.0])
)
self.assertRaises(
ValueError, lambda: check_box_plot(self.pdf1, self.psdf1, conf_intervals=[val, val])
)
self.assertRaises(
ValueError, lambda: check_box_plot(self.pdf1, self.psdf1, conf_intervals=[(1,)])
)
def test_kde_plot(self):
def moving_average(a, n=10):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
def check_kde_plot(pdf, psdf, *args, **kwargs):
_, ax1 = plt.subplots(1, 1)
ax1 = pdf["a"].plot.kde(*args, **kwargs)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["a"].plot.kde(*args, **kwargs)
try:
for i, (line1, line2) in enumerate(zip(ax1.get_lines(), ax2.get_lines())):
expected = line1.get_xydata().ravel()
actual = line2.get_xydata().ravel()
# TODO: Due to implementation difference, the output is different comparing
# to pandas'. We should identify the root cause of difference, and reduce
# the diff.
# Note: Data is from 1 to 50. So, it smooths them by moving average and compares
# both.
self.assertTrue(
np.allclose(moving_average(actual), moving_average(expected), rtol=3)
)
finally:
ax1.cla()
ax2.cla()
check_kde_plot(self.pdf1, self.psdf1, bw_method=0.3)
check_kde_plot(self.pdf1, self.psdf1, ind=[1, 2, 3, 4, 5], bw_method=3.0)
def test_empty_hist(self):
pdf = self.pdf1.assign(categorical="A")
psdf = ps.from_pandas(pdf)
psser = psdf["categorical"]
with self.assertRaisesRegex(TypeError, "Empty 'DataFrame': no numeric data to plot"):
psser.plot.hist()
def test_single_value_hist(self):
pdf = self.pdf1.assign(single=2)
psdf = ps.from_pandas(pdf)
_, ax1 = plt.subplots(1, 1)
ax1 = pdf["single"].plot.hist()
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf["single"].plot.hist()
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
if __name__ == "__main__":
from pyspark.pandas.tests.plot.test_series_plot_matplotlib import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
karlnapf/kernel_exp_family
|
kernel_exp_family/examples/demo_xvalidation_grid_search_manual.py
|
1
|
2087
|
from kernel_exp_family.estimators.lite.gaussian import KernelExpLiteGaussian
from kernel_exp_family.examples.tools import visualise_fit_2d
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
"""
This simple demo demonstrates how to select the kernel parameter for the lite
estimator, based on a simple manual grid search, using the in-built x-validation.
As the xvalidation is implemented in the API base class, this can be easily
changed to other parameters or estimators.
"""
N = 200
D = 2
# fit model to samples from a standard Gaussian
X = np.random.randn(N, D)
# create grid over sigma parameters, fixed regulariser
log_sigmas = np.linspace(-5, 10, 20)
lmbda = 0.001
# evaluate objective function over all those parameters
O = np.zeros(len(log_sigmas))
O_lower = np.zeros(len(log_sigmas))
O_upper = np.zeros(len(log_sigmas))
# grid search
for i, sigma in enumerate(log_sigmas):
est = KernelExpLiteGaussian(np.exp(sigma), lmbda, D, N)
# this is an array num_repetitions x num_folds, each containing a objective
xval_result = est.xvalidate_objective(X, num_folds=5, num_repetitions=2)
O[i] = np.mean(xval_result)
O_lower[i] = np.percentile(xval_result, 10)
O_upper[i] = np.percentile(xval_result, 90)
# best parameter
best_log_sigma = log_sigmas[np.argmin(O)]
# visualisation
plt.figure()
plt.plot([best_log_sigma, best_log_sigma], [np.min(O), np.max(O)], 'r')
plt.plot(log_sigmas, O, 'b-')
plt.plot(log_sigmas, O_lower, 'b--')
plt.plot(log_sigmas, O_upper, 'b--')
plt.xlim([np.min(log_sigmas) - 1, np.max(log_sigmas) + 1])
plt.xlabel("log sigma")
plt.ylabel("objective")
plt.title("lmbda=%.4f" % lmbda)
plt.legend(["Best sigma", "Performance"])
plt.legend(["Best sigma", "Performance", "80% percentile"])
plt.tight_layout()
est.sigma = np.exp(best_log_sigma)
est.fit(X)
visualise_fit_2d(est, X)
plt.show()
|
bsd-3-clause
|
TheSriram/MLT4Trading
|
Project 2/KNNLearner.py
|
2
|
1751
|
import math,random,sys,bisect,time
import numpy
import scipy
import math
import matplotlib
class KNNLearner(object):
"""The Main KNNLearner class"""
def __init__(self, k):
super(KNNLearner, self).__init__()
self.k = k
def addEvidence(self, XTrain, Ytrain):
self.XTrain = XTrain
self.Ytrain = Ytrain
def query(self, XTes):
final_Yresult = []
for XTest in XTes:
seconday_hash={}
k_compare_list=[]
final = numpy.absolute(numpy.array([XTest.__sub__(row) for row in self.XTrain]))
final_distance = numpy.array([math.sqrt(math.pow(row[0],2)+(math.pow(row[1],2))) for row in final])
for key,value in zip(final_distance,self.XTrain):
seconday_hash[key]=value
sorted_final_distance = numpy.sort(final_distance)
for i in xrange(0,self.k):
k_compare_list.append(seconday_hash[sorted_final_distance[i]])
k_neighbour = numpy.array([self.XtoYMap[tuple(value)] for value in k_compare_list])
final_Yresult.append(k_neighbour.mean())
final_XYresult = numpy.insert(XTes, 2, values=final_Yresult, axis=1)
return (numpy.squeeze(numpy.asarray(final_XYresult)),final_Yresult)
def getflatcsv(self,fname):
inf = open(fname)
return numpy.array([map(float,s.strip().split(',')) for s in inf.readlines()])
def build_hash(self, Total):
new_dict ={}
for element in Total:
element_key = tuple(element[:2])
element_value = tuple(element[2:])
new_dict[element_key]=element_value
self.XtoYMap = new_dict
|
apache-2.0
|
skrueger111/zazzie
|
src/sassie/test_sassie/calculate/sld_mol/test_smaller2.py
|
2
|
44871
|
'''
SASSIE: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sys
import string
import shutil
import numpy
import multiprocessing
import sasmol.sasmol as sasmol
import sassie.calculate.sld_mol.gui_mimic_sld_mol as gui_mimic_sld_mol
#import gui_mimic_sld_mol as gui_mimic_sld_mol
import filecmp
from unittest import main
from nose.tools import assert_equals
from mocker import Mocker, MockerTestCase
pdb_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'data', 'pdb_common') + os.path.sep
dcd_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'data', 'dcd_common') + os.path.sep
other_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'data', 'other_common') + os.path.sep
module_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'data', 'calculate', 'sld_mol') + os.path.sep
paths = {'pdb_data_path' : pdb_data_path, 'dcd_data_path' : dcd_data_path, 'other_data_path' : other_data_path, 'module_data_path' : module_data_path}
class Test_Sld_Mol(MockerTestCase):
'''
System integration test for sld_mol.py / sassie 1.0
SLD_MOL is the module that calculates the scattering length density profile
from a dcd/pdb file
This method compares an experimentally derived SLD profile with heavy atom
distribution from a pdb or dcd file containing protein structure(s).
It performs a fit allowing a normalization factor, z-pos & constant shift.
The SLD profile is convolved with a Gaussian of user defined width to mimic instrument
resolution and roughness and outputs a text file with frame number and fit_error.
INPUT: variable descriptions:
runname: project name
pdbfile: reference PDB file
dcdfile: input filename (DCD or PDB)
expdatafile: experimental SLD data file name
outputfile: output file name
runtype: 0 = average SLD over all structures, 1 = best fit SLD for each individual structure
bulk_sld: SLD for bulk solvent
xon: scattering type: 0 = neutron, 1 = x-ray
num_deut_regions: number of fully deuterated regions in molecule
deut_low_res: low residue number(s) for deuterated region(s)
deut_high_res: high residue number(s) for deuterated region(s)
sldfit: 0 = no optimization of z0 and A0, 1 = optimize z0 and A0
sldoffset: offset correction to experimental SLD
dbin: bin width for z values
width: SLD width for Gaussian smoothing
zfit0: z0 value (anchoring position) for calculated SLDs initial estimate
zfitmin: minimum z value used during optimization
zfitmax: maximum z value used during optimization
zevalmin: minimum z to evaluate experimental SLD during error calculation
zevalmax: maximum z to evaluate experimental SLD during error calculation
A0: fraction surface coverage for calculated SLDs initial estimate
Amin: minimum A0 value used during optimization
Amax: maximum A0 value used during optimization
OUTPUT:
files stored in ./"runname"/sld_mol/ directory:
outputfile: output file containing z0, A0 and fit-error for each calculated SLD
average_sld.txt: file containing average SLD
sldfile*.txt: files containing individual SLDs for each frame (runtype = 1 only)
bestworstfile.txt: file containing the filenames and error values for the best- and worst-fitting structures
Use cases:
1. runtype = 0; sldfit = 0
a. input file is DCD
b. input file is PDB
2. runtype = 0; sldfit = 1
a. input file is DCD
b. input file is PDB
3. runtype = 1; sldfit = 0
a. input file is DCD
b. input file is PDB
4. runtype = 1; sldfit = 1 ##not recommended but allowed by software so test for at least one case
a. input file is DCD
b. input file is PDB
Selection options (apply to all cases above):
a. sldoffset
b. xon
c. num_deut_regions
i. single deuterated region
ii. multiple deuterated regions
Inputs tested:
runname: string project name
path: string input file path
pdbfile: string input PDB file
dcdfile: string input DCD or PDB file
expdatafile: string experimental SLD file
outputfile: string output file containing z0, A0 and fit-error for each SLD
runtype: integer 0 = average sld over all structures, 1 = best fit sld for each individual structure
bulk_sld: float SLD for bulk solvent
xon: integer 0 = neutron, 1 = x-ray
num_deut_regions: integer number of deuterated regions in molecule
deut_low_res: int_array low residue number(s) for deuterated region(s)
deut_high_res: int_array high residue number(s) for deuterated region(s)
sldfit: integer 0 = no optimization of z0 and A0, 1 = optimize z0 and A0
sldoffset: float offset to experimental SLD
dbin: float bin size for z values
width: float bin width for Gaussian smoothing
zfit0: float z0 value for calculated SLDs
zfitmin: float minimum z value used during optimization
zfitmax: float maximum z value used during optimization
zevalmin: float minimum z to evaluate experimental SLD
zevalmax: float maximum z to evaluate experimental SLD
A0: float fraction surface coverage for calculated SLDs
Amin: float minimum A0 value used during optimization
Amax: float maximum A0 value used during optimization
plotflag: integer flag for plotting data (0: no plot; 1: matplotlib; 2: gnuplot)
Test tree:
project name
input/output path
*****************************
runtype = 0; sldfit = 0
*****************************
reference PDB reference PDB reference PDB reference PDB reference PDB reference PDB
input DCD input PDB input DCD input PDB input DCD input PDB
no deut regions no deut regions single deut region single deut region mult deut regions multiple deut regions
sldoffset
xon* *for no deut regions only
'''
module = 'sld_mol'
def setUp(self):
gui_mimic_sld_mol.test_variables(self, paths)
def assert_list_almost_equal(self, a, b, places=5):
if (len(a) != len(b)):
raise TypeError
else:
for i in range(len(a)):
if isinstance(a[i], (int, float, numpy.generic)):
if (numpy.isnan(a[i]) and numpy.isnan(b[i])):
continue
self.assertAlmostEqual(a[i], b[i], places)
else:
self.assert_list_almost_equal(a[i], b[i], places)
def check_lines(self, fname, expected_lines):
'''
compares number of expected lines with number of actual lines in a file
'''
lines = 0
with open(fname) as f:
for line in f:
lines = lines + 1
# print 'lines: ', lines
if (lines == expected_lines):
return True
else:
return False
def test_15(self):
'''
test runtype=0, sldfit = 1, input DCD, no deut regions
'''
self.sldfit = '1'
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_none', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 3)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_none', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_16(self):
'''
test runtype=0, sldfit = 1, input DCD, single deut region
'''
self.numdregions = '1'
self.z0 = '-1.1'
self.A0 = '0.12'
self.sldfit = '1'
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_one', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 3)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_one', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_17(self):
'''
test runtype=0, sldfit = 1, input DCD, multiple deut regions
'''
self.numdregions = '2'
self.lowres = '1,150'
self.highres = '145,200'
self.z0 = '-1.1'
self.A0 = '0.12'
self.sldfit = '1'
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_mult', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 3)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_mult', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_18(self):
'''
test runtype=0, sldfit = 1, input DCD, no deut regions, sldoffset
'''
self.sldfit = '1'
self.z0 = '2.7'
self.A0 = '0.29'
self.sldoffset = '10.0'
self.zevalmin = '17.5'
self.zevalmax = '189.0'
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_none_sldoffset', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 3)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_none_sldoffset', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_19(self):
'''
test runtype=0, sldfit = 1, input DCD, one deut region, sldoffset
'''
self.sldfit = '1'
self.numdregions = '1'
self.z0 = '3.8'
self.A0 = '0.12'
self.sldoffset = '10.0'
self.zevalmin = '17.5'
self.zevalmax = '189.0'
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_one_sldoffset', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 3)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_one_sldoffset', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_20(self):
'''
test runtype=0, sldfit = 1, input DCD, mult deut regions, sldoffset
'''
self.sldfit = '1'
self.numdregions = '2'
self.lowres = '1,150'
self.highres = '145,200'
self.z0 = '3.8'
self.A0 = '0.12'
self.sldoffset = '10.0'
self.zevalmin = '17.5'
self.zevalmax = '189.0'
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_mult_sldoffset', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 3)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_mult_sldoffset', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_21(self):
'''
test runtype=0, sldfit = 1, input PDB, no deut regions
'''
self.sldfit = '1'
self.dcdfile = os.path.join(pdb_data_path, 'f12.pdb')
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_none', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 3)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_none', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_22(self):
'''
test runtype=0, sldfit = 1, input PDB, one deut region
'''
self.numdregions = '1'
self.z0 = '-1.1'
self.A0 = '0.12'
self.sldfit = '1'
self.dcdfile = os.path.join(pdb_data_path, 'f12.pdb')
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_one', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 3)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_one', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_23(self):
'''
test runtype=0, sldfit = 1, input PDB, multiple deut regions
'''
self.numdregions = '2'
self.lowres = '1,150'
self.highres = '145,200'
self.z0 = '-1.1'
self.A0 = '0.12'
self.sldfit = '1'
self.dcdfile = os.path.join(pdb_data_path, 'f12.pdb')
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_mult', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 3)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_mult', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_24(self):
'''
test runtype=0, sldfit = 1, input PDB, no deut regions, sldoffset
'''
self.sldfit = '1'
self.z0 = '2.7'
self.A0 = '0.29'
self.sldoffset = '10.0'
self.zevalmin = '17.5'
self.zevalmax = '189.0'
self.dcdfile = os.path.join(pdb_data_path,'f12.pdb')
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_none_sldoffset', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 3)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_none_sldoffset', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_25(self):
'''
test runtype=0, sldfit = 1, input PDB, one deut region, sldoffset
'''
self.sldfit = '1'
self.numdregions = '1'
self.z0 = '3.8'
self.A0 = '0.12'
self.sldoffset = '10.0'
self.zevalmin = '17.5'
self.zevalmax = '189.0'
self.dcdfile = os.path.join(pdb_data_path,'f12.pdb')
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_one_sldoffset', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 3)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_one_sldoffset', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_26(self):
'''
test runtype=0, sldfit = 1, input PDB, mult deut regions, sldoffset
'''
self.sldfit = '1'
self.numdregions = '2'
self.lowres = '1,150'
self.highres = '145,200'
self.z0 = '3.8'
self.A0 = '0.12'
self.sldoffset = '10.0'
self.zevalmin = '17.5'
self.zevalmax = '189.0'
self.dcdfile = os.path.join(pdb_data_path,'f12.pdb')
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_mult_sldoffset', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 3)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_mult_sldoffset', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_27(self):
'''
test runtype=0, sldfit = 1, input DCD, no deut regions, xon
'''
self.sldfit = '1'
self.xon = '1'
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_none_xon', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 1)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_dcd_none_xon', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def test_28(self):
'''
test runtype=0, sldfit = 1, input pdb, no deut regions, xon
'''
self.sldfit = '1'
self.xon = '1'
self.dcdfile = os.path.join(pdb_data_path,'f12.pdb')
gui_mimic_sld_mol.run_module(self)
''' confirm values in output files are correct to within 3 decimal places '''
outfile = open(os.path.join(self.runname, self.module, self.outputfile), 'r').readlines()
outz = []
outa = []
outerr = []
for i in range(len(outfile)):
lin = string.split(outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
outz.append(float(lin[1]))
outa.append(float(lin[2]))
outerr.append(float(lin[3]))
correct_outfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_none_xon', self.outputfile), 'r').readlines()
corroutz = []
corrouta = []
corrouterr = []
for i in range(len(correct_outfile)):
lin = string.split(correct_outfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corroutz.append(float(lin[1]))
corrouta.append(float(lin[2]))
corrouterr.append(float(lin[3]))
self.assert_list_almost_equal(corroutz, outz, self.precision)
self.assert_list_almost_equal(corrouta, outa, self.precision)
self.assert_list_almost_equal(corrouterr, outerr, 1)
avgfile = open(os.path.join(self.runname, self.module, 'average_sld.txt'), 'r').readlines()
avgz = []
avgsld = []
for i in xrange(len(avgfile)):
lin = string.split(avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
avgz.append(float(lin[0]))
avgsld.append(float(lin[1]))
corr_avgfile = open(os.path.join(
module_data_path, self.runname, self.module, '01_pdb_none_xon', 'average_sld.txt'), 'r').readlines()
corravgz = []
corravgsld = []
for i in xrange(len(corr_avgfile)):
lin = string.split(corr_avgfile[i])
if(lin[0][0] != "#" and len(lin) >= 2):
corravgz.append(float(lin[0]))
corravgsld.append(float(lin[1]))
self.assert_list_almost_equal(corravgz, avgz, self.precision)
self.assert_list_almost_equal(corravgsld, avgsld, self.precision)
def tearDown(self):
if os.path.exists(self.runname):
shutil.rmtree(self.runname)
if __name__=='__main__':
main()
|
gpl-3.0
|
anntzer/scikit-learn
|
sklearn/mixture/tests/test_gaussian_mixture.py
|
11
|
42047
|
# Author: Wei Xue <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import sys
import copy
import warnings
import pytest
import numpy as np
from scipy import stats, linalg
from sklearn.covariance import EmpiricalCovariance
from sklearn.datasets import make_spd_matrix
from io import StringIO
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.mixture import GaussianMixture
from sklearn.mixture._gaussian_mixture import (
_estimate_gaussian_covariances_full,
_estimate_gaussian_covariances_tied,
_estimate_gaussian_covariances_diag,
_estimate_gaussian_covariances_spherical,
_compute_precision_cholesky,
_compute_log_det_cholesky,
)
from sklearn.exceptions import ConvergenceWarning, NotFittedError
from sklearn.utils.extmath import fast_logdet
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
def generate_data(n_samples, n_features, weights, means, precisions,
covariance_type):
rng = np.random.RandomState(0)
X = []
if covariance_type == 'spherical':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['spherical'])):
X.append(rng.multivariate_normal(m, c * np.eye(n_features),
int(np.round(w * n_samples))))
if covariance_type == 'diag':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['diag'])):
X.append(rng.multivariate_normal(m, np.diag(c),
int(np.round(w * n_samples))))
if covariance_type == 'tied':
for _, (w, m) in enumerate(zip(weights, means)):
X.append(rng.multivariate_normal(m, precisions['tied'],
int(np.round(w * n_samples))))
if covariance_type == 'full':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['full'])):
X.append(rng.multivariate_normal(m, c,
int(np.round(w * n_samples))))
X = np.vstack(X)
return X
class RandomData:
def __init__(self, rng, n_samples=200, n_components=2, n_features=2,
scale=50):
self.n_samples = n_samples
self.n_components = n_components
self.n_features = n_features
self.weights = rng.rand(n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.rand(n_components, n_features) * scale
self.covariances = {
'spherical': .5 + rng.rand(n_components),
'diag': (.5 + rng.rand(n_components, n_features)) ** 2,
'tied': make_spd_matrix(n_features, random_state=rng),
'full': np.array([
make_spd_matrix(n_features, random_state=rng) * .5
for _ in range(n_components)])}
self.precisions = {
'spherical': 1. / self.covariances['spherical'],
'diag': 1. / self.covariances['diag'],
'tied': linalg.inv(self.covariances['tied']),
'full': np.array([linalg.inv(covariance)
for covariance in self.covariances['full']])}
self.X = dict(zip(COVARIANCE_TYPE, [generate_data(
n_samples, n_features, self.weights, self.means, self.covariances,
covar_type) for covar_type in COVARIANCE_TYPE]))
self.Y = np.hstack([np.full(int(np.round(w * n_samples)), k,
dtype=int)
for k, w in enumerate(self.weights)])
def test_gaussian_mixture_attributes():
# test bad parameters
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
n_components_bad = 0
gmm = GaussianMixture(n_components=n_components_bad)
assert_raise_message(ValueError,
"Invalid value for 'n_components': %d "
"Estimation requires at least one component"
% n_components_bad, gmm.fit, X)
# covariance_type should be in [spherical, diag, tied, full]
covariance_type_bad = 'bad_covariance_type'
gmm = GaussianMixture(covariance_type=covariance_type_bad)
assert_raise_message(ValueError,
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% covariance_type_bad,
gmm.fit, X)
tol_bad = -1
gmm = GaussianMixture(tol=tol_bad)
assert_raise_message(ValueError,
"Invalid value for 'tol': %.5f "
"Tolerance used by the EM must be non-negative"
% tol_bad, gmm.fit, X)
reg_covar_bad = -1
gmm = GaussianMixture(reg_covar=reg_covar_bad)
assert_raise_message(ValueError,
"Invalid value for 'reg_covar': %.5f "
"regularization on covariance must be "
"non-negative" % reg_covar_bad, gmm.fit, X)
max_iter_bad = 0
gmm = GaussianMixture(max_iter=max_iter_bad)
assert_raise_message(ValueError,
"Invalid value for 'max_iter': %d "
"Estimation requires at least one iteration"
% max_iter_bad, gmm.fit, X)
n_init_bad = 0
gmm = GaussianMixture(n_init=n_init_bad)
assert_raise_message(ValueError,
"Invalid value for 'n_init': %d "
"Estimation requires at least one run"
% n_init_bad, gmm.fit, X)
init_params_bad = 'bad_method'
gmm = GaussianMixture(init_params=init_params_bad)
assert_raise_message(ValueError,
"Unimplemented initialization method '%s'"
% init_params_bad,
gmm.fit, X)
# test good parameters
n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1
covariance_type, init_params = 'full', 'random'
gmm = GaussianMixture(n_components=n_components, tol=tol, n_init=n_init,
max_iter=max_iter, reg_covar=reg_covar,
covariance_type=covariance_type,
init_params=init_params).fit(X)
assert gmm.n_components == n_components
assert gmm.covariance_type == covariance_type
assert gmm.tol == tol
assert gmm.reg_covar == reg_covar
assert gmm.max_iter == max_iter
assert gmm.n_init == n_init
assert gmm.init_params == init_params
def test_check_X():
from sklearn.mixture._base import _check_X
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 2, 2
X_bad_dim = rng.rand(n_components - 1, n_features)
assert_raise_message(ValueError,
'Expected n_samples >= n_components '
'but got n_components = %d, n_samples = %d'
% (n_components, X_bad_dim.shape[0]),
_check_X, X_bad_dim, n_components)
X_bad_dim = rng.rand(n_components, n_features + 1)
assert_raise_message(ValueError,
'Expected the input data X have %d features, '
'but got %d features'
% (n_features, X_bad_dim.shape[1]),
_check_X, X_bad_dim, n_components, n_features)
X = rng.rand(n_samples, n_features)
assert_array_equal(X, _check_X(X, n_components, n_features))
def test_check_weights():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check bad shape
weights_bad_shape = rng.rand(n_components, 1)
g.weights_init = weights_bad_shape
assert_raise_message(ValueError,
"The parameter 'weights' should have the shape of "
"(%d,), but got %s" %
(n_components, str(weights_bad_shape.shape)),
g.fit, X)
# Check bad range
weights_bad_range = rng.rand(n_components) + 1
g.weights_init = weights_bad_range
assert_raise_message(ValueError,
"The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights_bad_range),
np.max(weights_bad_range)),
g.fit, X)
# Check bad normalization
weights_bad_norm = rng.rand(n_components)
weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1)
g.weights_init = weights_bad_norm
assert_raise_message(ValueError,
"The parameter 'weights' should be normalized, "
"but got sum(weights) = %.5f"
% np.sum(weights_bad_norm),
g.fit, X)
# Check good weights matrix
weights = rand_data.weights
g = GaussianMixture(weights_init=weights, n_components=n_components)
g.fit(X)
assert_array_equal(weights, g.weights_init)
def test_check_means():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check means bad shape
means_bad_shape = rng.rand(n_components + 1, n_features)
g.means_init = means_bad_shape
assert_raise_message(ValueError,
"The parameter 'means' should have the shape of ",
g.fit, X)
# Check good means matrix
means = rand_data.means
g.means_init = means
g.fit(X)
assert_array_equal(means, g.means_init)
def test_check_precisions():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
# Define the bad precisions for each covariance_type
precisions_bad_shape = {
'full': np.ones((n_components + 1, n_features, n_features)),
'tied': np.ones((n_features + 1, n_features + 1)),
'diag': np.ones((n_components + 1, n_features)),
'spherical': np.ones((n_components + 1))}
# Define not positive-definite precisions
precisions_not_pos = np.ones((n_components, n_features, n_features))
precisions_not_pos[0] = np.eye(n_features)
precisions_not_pos[0, 0, 0] = -1.
precisions_not_positive = {
'full': precisions_not_pos,
'tied': precisions_not_pos[0],
'diag': np.full((n_components, n_features), -1.),
'spherical': np.full(n_components, -1.)}
not_positive_errors = {
'full': 'symmetric, positive-definite',
'tied': 'symmetric, positive-definite',
'diag': 'positive',
'spherical': 'positive'}
for covar_type in COVARIANCE_TYPE:
X = RandomData(rng).X[covar_type]
g = GaussianMixture(n_components=n_components,
covariance_type=covar_type,
random_state=rng)
# Check precisions with bad shapes
g.precisions_init = precisions_bad_shape[covar_type]
assert_raise_message(ValueError,
"The parameter '%s precision' should have "
"the shape of" % covar_type,
g.fit, X)
# Check not positive precisions
g.precisions_init = precisions_not_positive[covar_type]
assert_raise_message(ValueError,
"'%s precision' should be %s"
% (covar_type, not_positive_errors[covar_type]),
g.fit, X)
# Check the correct init of precisions_init
g.precisions_init = rand_data.precisions[covar_type]
g.fit(X)
assert_array_equal(rand_data.precisions[covar_type], g.precisions_init)
def test_suffstat_sk_full():
# compare the precision matrix compute from the
# EmpiricalCovariance.covariance fitted on X*sqrt(resp)
# with _sufficient_sk_full, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
# special case 1, assuming data is "centered"
X = rng.rand(n_samples, n_features)
resp = rng.rand(n_samples, 1)
X_resp = np.sqrt(resp) * X
nk = np.array([n_samples])
xk = np.zeros((1, n_features))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=True)
ecov.fit(X_resp)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
# special case 2, assuming resp are all ones
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean(axis=0).reshape((1, -1))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=False)
ecov.fit(X)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
def test_suffstat_sk_tied():
# use equation Nk * Sk / N = S_tied
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_full = np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full,
0) / n_samples
covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
ecov.covariance_ = covars_pred_full
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, 'tied')
precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T)
precs_est = linalg.inv(covars_pred_tied)
assert_array_almost_equal(precs_est, precs_pred)
def test_suffstat_sk_diag():
# test against 'full' case
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
for (cov_full, cov_diag) in zip(covars_pred_full, covars_pred_diag):
ecov.covariance_ = np.diag(np.diag(cov_full))
cov_diag = np.diag(cov_diag)
assert_almost_equal(ecov.error_norm(cov_diag, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(cov_diag, norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, 'diag')
assert_almost_equal(covars_pred_diag, 1. / precs_chol_pred ** 2)
def test_gaussian_suffstat_sk_spherical():
# computing spherical covariance equals to the variance of one-dimension
# data after flattening, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
X = rng.rand(n_samples, n_features)
X = X - X.mean()
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean()
covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X,
nk, xk, 0)
covars_pred_spherical2 = (np.dot(X.flatten().T, X.flatten()) /
(n_features * n_samples))
assert_almost_equal(covars_pred_spherical, covars_pred_spherical2)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical,
'spherical')
assert_almost_equal(covars_pred_spherical, 1. / precs_chol_pred ** 2)
def test_compute_log_det_cholesky():
n_features = 2
rand_data = RandomData(np.random.RandomState(0))
for covar_type in COVARIANCE_TYPE:
covariance = rand_data.covariances[covar_type]
if covar_type == 'full':
predected_det = np.array([linalg.det(cov) for cov in covariance])
elif covar_type == 'tied':
predected_det = linalg.det(covariance)
elif covar_type == 'diag':
predected_det = np.array([np.prod(cov) for cov in covariance])
elif covar_type == 'spherical':
predected_det = covariance ** n_features
# We compute the cholesky decomposition of the covariance matrix
expected_det = _compute_log_det_cholesky(_compute_precision_cholesky(
covariance, covar_type), covar_type, n_features=n_features)
assert_array_almost_equal(expected_det, - .5 * np.log(predected_det))
def _naive_lmvnpdf_diag(X, means, covars):
resp = np.empty((len(X), len(means)))
stds = np.sqrt(covars)
for i, (mean, std) in enumerate(zip(means, stds)):
resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1)
return resp
def test_gaussian_mixture_log_probabilities():
from sklearn.mixture._gaussian_mixture import _estimate_log_gaussian_prob
# test against with _naive_lmvnpdf_diag
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_samples = 500
n_features = rand_data.n_features
n_components = rand_data.n_components
means = rand_data.means
covars_diag = rng.rand(n_components, n_features)
X = rng.rand(n_samples, n_features)
log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag)
# full covariances
precs_full = np.array([np.diag(1. / np.sqrt(x)) for x in covars_diag])
log_prob = _estimate_log_gaussian_prob(X, means, precs_full, 'full')
assert_array_almost_equal(log_prob, log_prob_naive)
# diag covariances
precs_chol_diag = 1. / np.sqrt(covars_diag)
log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, 'diag')
assert_array_almost_equal(log_prob, log_prob_naive)
# tied
covars_tied = np.array([x for x in covars_diag]).mean(axis=0)
precs_tied = np.diag(np.sqrt(1. / covars_tied))
log_prob_naive = _naive_lmvnpdf_diag(X, means,
[covars_tied] * n_components)
log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, 'tied')
assert_array_almost_equal(log_prob, log_prob_naive)
# spherical
covars_spherical = covars_diag.mean(axis=1)
precs_spherical = 1. / np.sqrt(covars_diag.mean(axis=1))
log_prob_naive = _naive_lmvnpdf_diag(X, means,
[[k] * n_features for k in
covars_spherical])
log_prob = _estimate_log_gaussian_prob(X, means,
precs_spherical, 'spherical')
assert_array_almost_equal(log_prob, log_prob_naive)
# skip tests on weighted_log_probabilities, log_weights
def test_gaussian_mixture_estimate_log_prob_resp():
# test whether responsibilities are normalized
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=5)
n_samples = rand_data.n_samples
n_features = rand_data.n_features
n_components = rand_data.n_components
X = rng.rand(n_samples, n_features)
for covar_type in COVARIANCE_TYPE:
weights = rand_data.weights
means = rand_data.means
precisions = rand_data.precisions[covar_type]
g = GaussianMixture(n_components=n_components, random_state=rng,
weights_init=weights, means_init=means,
precisions_init=precisions,
covariance_type=covar_type)
g.fit(X)
resp = g.predict_proba(X)
assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples))
assert_array_equal(g.weights_init, weights)
assert_array_equal(g.means_init, means)
assert_array_equal(g.precisions_init, precisions)
def test_gaussian_mixture_predict_predict_proba():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
Y = rand_data.Y
g = GaussianMixture(n_components=rand_data.n_components,
random_state=rng, weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions[covar_type],
covariance_type=covar_type)
# Check a warning message arrive if we don't do fit
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this estimator.", g.predict, X)
g.fit(X)
Y_pred = g.predict(X)
Y_pred_proba = g.predict_proba(X).argmax(axis=1)
assert_array_equal(Y_pred, Y_pred_proba)
assert adjusted_rand_score(Y, Y_pred) > .95
@pytest.mark.filterwarnings("ignore:.*did not converge.*")
@pytest.mark.parametrize('seed, max_iter, tol', [
(0, 2, 1e-7), # strict non-convergence
(1, 2, 1e-1), # loose non-convergence
(3, 300, 1e-7), # strict convergence
(4, 300, 1e-1), # loose convergence
])
def test_gaussian_mixture_fit_predict(seed, max_iter, tol):
rng = np.random.RandomState(seed)
rand_data = RandomData(rng)
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
Y = rand_data.Y
g = GaussianMixture(n_components=rand_data.n_components,
random_state=rng, weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions[covar_type],
covariance_type=covar_type,
max_iter=max_iter, tol=tol)
# check if fit_predict(X) is equivalent to fit(X).predict(X)
f = copy.deepcopy(g)
Y_pred1 = f.fit(X).predict(X)
Y_pred2 = g.fit_predict(X)
assert_array_equal(Y_pred1, Y_pred2)
assert adjusted_rand_score(Y, Y_pred2) > .95
def test_gaussian_mixture_fit_predict_n_init():
# Check that fit_predict is equivalent to fit.predict, when n_init > 1
X = np.random.RandomState(0).randn(1000, 5)
gm = GaussianMixture(n_components=5, n_init=5, random_state=0)
y_pred1 = gm.fit_predict(X)
y_pred2 = gm.predict(X)
assert_array_equal(y_pred1, y_pred2)
def test_gaussian_mixture_fit():
# recover the ground truth
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_features = rand_data.n_features
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=20,
reg_covar=0, random_state=rng,
covariance_type=covar_type)
g.fit(X)
# needs more data to pass the test with rtol=1e-7
assert_allclose(np.sort(g.weights_), np.sort(rand_data.weights),
rtol=0.1, atol=1e-2)
arg_idx1 = g.means_[:, 0].argsort()
arg_idx2 = rand_data.means[:, 0].argsort()
assert_allclose(g.means_[arg_idx1], rand_data.means[arg_idx2],
rtol=0.1, atol=1e-2)
if covar_type == 'full':
prec_pred = g.precisions_
prec_test = rand_data.precisions['full']
elif covar_type == 'tied':
prec_pred = np.array([g.precisions_] * n_components)
prec_test = np.array([rand_data.precisions['tied']] * n_components)
elif covar_type == 'spherical':
prec_pred = np.array([np.eye(n_features) * c
for c in g.precisions_])
prec_test = np.array([np.eye(n_features) * c for c in
rand_data.precisions['spherical']])
elif covar_type == 'diag':
prec_pred = np.array([np.diag(d) for d in g.precisions_])
prec_test = np.array([np.diag(d) for d in
rand_data.precisions['diag']])
arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort()
arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort()
for k, h in zip(arg_idx1, arg_idx2):
ecov = EmpiricalCovariance()
ecov.covariance_ = prec_test[h]
# the accuracy depends on the number of data and randomness, rng
assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.15)
def test_gaussian_mixture_fit_best_params():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
n_init = 10
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type)
ll = []
for _ in range(n_init):
g.fit(X)
ll.append(g.score(X))
ll = np.array(ll)
g_best = GaussianMixture(n_components=n_components,
n_init=n_init, reg_covar=0, random_state=rng,
covariance_type=covar_type)
g_best.fit(X)
assert_almost_equal(ll.min(), g_best.score(X))
def test_gaussian_mixture_fit_convergence_warning():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=1)
n_components = rand_data.n_components
max_iter = 1
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=max_iter, reg_covar=0, random_state=rng,
covariance_type=covar_type)
assert_warns_message(ConvergenceWarning,
'Initialization %d did not converge. '
'Try different init parameters, '
'or increase max_iter, tol '
'or check for degenerate data.'
% max_iter, g.fit, X)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
for cv_type in COVARIANCE_TYPE:
train1 = GaussianMixture(n_components=n_components,
covariance_type=cv_type,
random_state=0).fit(X).score(X)
train2 = GaussianMixture(n_components=n_components,
covariance_type=cv_type,
random_state=0, n_init=5).fit(X).score(X)
assert train2 >= train1
def test_gaussian_mixture_n_parameters():
# Test that the right number of parameters is estimated
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type,
random_state=rng).fit(X)
assert g._n_parameters() == n_params[cv_type]
def test_bic_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
rng = np.random.RandomState(0)
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
bic_full = GaussianMixture(n_components=n_components,
covariance_type='full',
random_state=rng).fit(X).bic(X)
for covariance_type in ['tied', 'diag', 'spherical']:
bic = GaussianMixture(n_components=n_components,
covariance_type=covariance_type,
random_state=rng).fit(X).bic(X)
assert_almost_equal(bic_full, bic)
def test_gaussian_mixture_aic_bic():
# Test the aic and bic criteria
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 3, 2
X = rng.randn(n_samples, n_features)
# standard gaussian entropy
sgh = 0.5 * (fast_logdet(np.cov(X.T, bias=1)) +
n_features * (1 + np.log(2 * np.pi)))
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type,
random_state=rng, max_iter=200)
g.fit(X)
aic = 2 * n_samples * sgh + 2 * g._n_parameters()
bic = (2 * n_samples * sgh +
np.log(n_samples) * g._n_parameters())
bound = n_features / np.sqrt(n_samples)
assert (g.aic(X) - aic) / n_samples < bound
assert (g.bic(X) - bic) / n_samples < bound
def test_gaussian_mixture_verbose():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type,
verbose=1)
h = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
h.fit(X)
finally:
sys.stdout = old_stdout
@pytest.mark.filterwarnings('ignore:.*did not converge.*')
@pytest.mark.parametrize("seed", (0, 1, 2))
def test_warm_start(seed):
random_state = seed
rng = np.random.RandomState(random_state)
n_samples, n_features, n_components = 500, 2, 2
X = rng.rand(n_samples, n_features)
# Assert the warm_start give the same result for the same number of iter
g = GaussianMixture(n_components=n_components, n_init=1, max_iter=2,
reg_covar=0, random_state=random_state,
warm_start=False)
h = GaussianMixture(n_components=n_components, n_init=1, max_iter=1,
reg_covar=0, random_state=random_state,
warm_start=True)
g.fit(X)
score1 = h.fit(X).score(X)
score2 = h.fit(X).score(X)
assert_almost_equal(g.weights_, h.weights_)
assert_almost_equal(g.means_, h.means_)
assert_almost_equal(g.precisions_, h.precisions_)
assert score2 > score1
# Assert that by using warm_start we can converge to a good solution
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=5, reg_covar=0, random_state=random_state,
warm_start=False, tol=1e-6)
h = GaussianMixture(n_components=n_components, n_init=1,
max_iter=5, reg_covar=0, random_state=random_state,
warm_start=True, tol=1e-6)
g.fit(X)
assert not g.converged_
h.fit(X)
# depending on the data there is large variability in the number of
# refit necessary to converge due to the complete randomness of the
# data
for _ in range(1000):
h.fit(X)
if h.converged_:
break
assert h.converged_
@ignore_warnings(category=ConvergenceWarning)
def test_convergence_detected_with_warm_start():
# We check that convergence is detected when warm_start=True
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
X = rand_data.X['full']
for max_iter in (1, 2, 50):
gmm = GaussianMixture(n_components=n_components, warm_start=True,
max_iter=max_iter, random_state=rng)
for _ in range(100):
gmm.fit(X)
if gmm.converged_:
break
assert gmm.converged_
assert max_iter >= gmm.n_iter_
def test_score():
covar_type = 'full'
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm1 = GaussianMixture(n_components=n_components, n_init=1,
max_iter=1, reg_covar=0, random_state=rng,
covariance_type=covar_type)
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this estimator.", gmm1.score, X)
# Check score value
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
gmm1.fit(X)
gmm_score = gmm1.score(X)
gmm_score_proba = gmm1.score_samples(X).mean()
assert_almost_equal(gmm_score, gmm_score_proba)
# Check if the score increase
gmm2 = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng,
covariance_type=covar_type).fit(X)
assert gmm2.score(X) > gmm1.score(X)
def test_score_samples():
covar_type = 'full'
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type)
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this estimator.", gmm.score_samples, X)
gmm_score_samples = gmm.fit(X).score_samples(X)
assert gmm_score_samples.shape[0] == rand_data.n_samples
def test_monotonic_likelihood():
# We check that each step of the EM without regularization improve
# monotonically the training set likelihood
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, reg_covar=0,
warm_start=True, max_iter=1, random_state=rng,
tol=1e-7)
current_log_likelihood = -np.infty
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
prev_log_likelihood = current_log_likelihood
current_log_likelihood = gmm.fit(X).score(X)
assert current_log_likelihood >= prev_log_likelihood
if gmm.converged_:
break
assert gmm.converged_
def test_regularisation():
# We train the GaussianMixture on degenerate data by defining two clusters
# of a 0 covariance.
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = np.vstack((np.ones((n_samples // 2, n_features)),
np.zeros((n_samples // 2, n_features))))
for covar_type in COVARIANCE_TYPE:
gmm = GaussianMixture(n_components=n_samples, reg_covar=0,
covariance_type=covar_type, random_state=rng)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_raise_message(ValueError,
"Fitting the mixture model failed because "
"some components have ill-defined empirical "
"covariance (for instance caused by "
"singleton or collapsed samples). Try to "
"decrease the number of components, or "
"increase reg_covar.", gmm.fit, X)
gmm.set_params(reg_covar=1e-6).fit(X)
def test_property():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, random_state=rng,
n_init=5)
gmm.fit(X)
if covar_type == 'full':
for prec, covar in zip(gmm.precisions_, gmm.covariances_):
assert_array_almost_equal(linalg.inv(prec), covar)
elif covar_type == 'tied':
assert_array_almost_equal(linalg.inv(gmm.precisions_),
gmm.covariances_)
else:
assert_array_almost_equal(gmm.precisions_, 1. / gmm.covariances_)
def test_sample():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7, n_components=3)
n_features, n_components = rand_data.n_features, rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, random_state=rng)
# To sample we need that GaussianMixture is fitted
assert_raise_message(NotFittedError, "This GaussianMixture instance "
"is not fitted", gmm.sample, 0)
gmm.fit(X)
assert_raise_message(ValueError, "Invalid value for 'n_samples",
gmm.sample, 0)
# Just to make sure the class samples correctly
n_samples = 20000
X_s, y_s = gmm.sample(n_samples)
for k in range(n_components):
if covar_type == 'full':
assert_array_almost_equal(gmm.covariances_[k],
np.cov(X_s[y_s == k].T), decimal=1)
elif covar_type == 'tied':
assert_array_almost_equal(gmm.covariances_,
np.cov(X_s[y_s == k].T), decimal=1)
elif covar_type == 'diag':
assert_array_almost_equal(gmm.covariances_[k],
np.diag(np.cov(X_s[y_s == k].T)),
decimal=1)
else:
assert_array_almost_equal(
gmm.covariances_[k], np.var(X_s[y_s == k] - gmm.means_[k]),
decimal=1)
means_s = np.array([np.mean(X_s[y_s == k], 0)
for k in range(n_components)])
assert_array_almost_equal(gmm.means_, means_s, decimal=1)
# Check shapes of sampled data, see
# https://github.com/scikit-learn/scikit-learn/issues/7701
assert X_s.shape == (n_samples, n_features)
for sample_size in range(1, 100):
X_s, _ = gmm.sample(sample_size)
assert X_s.shape == (sample_size, n_features)
@ignore_warnings(category=ConvergenceWarning)
def test_init():
# We check that by increasing the n_init number we have a better solution
for random_state in range(15):
rand_data = RandomData(np.random.RandomState(random_state),
n_samples=50, scale=1)
n_components = rand_data.n_components
X = rand_data.X['full']
gmm1 = GaussianMixture(n_components=n_components, n_init=1,
max_iter=1, random_state=random_state).fit(X)
gmm2 = GaussianMixture(n_components=n_components, n_init=10,
max_iter=1, random_state=random_state).fit(X)
assert gmm2.lower_bound_ >= gmm1.lower_bound_
|
bsd-3-clause
|
birdsarah/bokeh
|
examples/plotting/server/burtin.py
|
42
|
4826
|
# The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import figure, show, output_server
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
x = np.zeros(len(df))
y = np.zeros(len(df))
output_server("burtin")
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color="black",
background_fill="#f0e1d2", border_fill="#f0e1d2")
p.line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(x, y, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(x, y, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(x, y, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(x, y, radius=radii, fill_color=None, line_color="white")
p.text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(x, y, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()),
text_font_size="9pt", text_align="left", text_baseline="middle")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
show(p)
|
bsd-3-clause
|
giorgiop/scikit-learn
|
examples/cross_decomposition/plot_compare_cross_decomposition.py
|
55
|
4761
|
"""
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
|
bsd-3-clause
|
zhenwendai/RGP
|
autoreg/rnn_encoder.py
|
1
|
12640
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 23 18:51:22 2017
@author: grigoral
"""
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn as nn
from GPy.core import Model, Parameterized, Param
import numpy as np
import sys
import matplotlib.pyplot as plt
#_ = torch.manual_seed(1)
class Mean_var_rnn(nn.Module):
def __init__(self, p_input_dim, p_output_dim, p_hidden_dim, rnn_type='rnn', with_input_variance = True, bidirectional=False):
super(Mean_var_rnn, self).__init__()
if rnn_type == 'rnn':
self.rnn = nn.RNN(input_size=p_input_dim, hidden_size=p_hidden_dim, num_layers=1, bidirectional=bidirectional)
# input: ( seq_len, batch, input_size)
elif rnn_type == 'lstm':
self.rnn = nn.LSTM(input_size=p_input_dim, hidden_size=p_hidden_dim, num_layers=1, bidirectional=bidirectional)
# input: (seq_len, batch, input_size)
elif rnn_type == 'gru':
self.rnn = nn.GRU(input_size=p_input_dim, hidden_size=p_hidden_dim, num_layers=1, bidirectional=bidirectional)
else:
raise ValueError("Unknow rnn type")
self.rnn_type = rnn_type
self.bidirectional=bidirectional
self.with_input_variance = with_input_variance
# in_features, out_features
self.dir_number = 1 if bidirectional==False else 2
self.linear_mean = nn.Linear(in_features=p_hidden_dim*self.dir_number, out_features=p_output_dim)
self.linear_var = nn.Linear(in_features=p_hidden_dim*self.dir_number, out_features=p_output_dim)
self.soft_plus = nn.Softplus()
def forward(self, mean_input, h_0, var_input=None, c_0=None):
"""
Input:
------------
c_0 - lstm init cell state
"""
#import pdb; pdb.set_trace()
if self.with_input_variance:
#import pdb; pdb.set_trace()
comb_input = torch.cat( (mean_input, var_input), dim=2 )
else:
comb_input = mean_input
#comb_input = torch.cat( (mean_input, torch.zeros(mean_input.size()).double() ), dim=2 )
#import pdb; pdb.set_trace()
if self.rnn_type=='lstm':
rnn_outputs,_ = self.rnn( comb_input, (h_0, c_0) ) # (seq_len, batch, hidden_size * num_directions)
else:
rnn_outputs,_ = self.rnn( comb_input, h_0) # (seq_len, batch, hidden_size * num_directions)
# Linear input: (N,∗,in_features)
self.mean_out = self.linear_mean(rnn_outputs) #(N,∗,out_features)
#if self.with_input_variance:
self.var_out = self.soft_plus( self.linear_var(rnn_outputs) )
#else:
# self.var_out = None
# maybe transform shapes
return self.mean_out, self.var_out
class Mean_var_multilayer(nn.Module):
def __init__(self, p_layer_num, p_input_dims, p_output_dims, p_hidden_dim, rnn_type='rnn', h_0_type='zero',
bidirectional=False):
"""
"""
super(Mean_var_multilayer, self).__init__()
#import pdb; pdb.set_trace()
assert p_layer_num == len(p_input_dims), "Layer num must be correct"
assert len(p_input_dims) == len(p_output_dims), " Dim lengths must match"
self.layer_num = p_layer_num
self.input_dims = [ (ss if i==0 else ss*2) for (i, ss) in enumerate(p_input_dims) ] # lower layers first
self.output_dims = p_output_dims #[ ss*2 for ss in p_output_dims] # lower layers first
self.hidden_dim = p_hidden_dim # asssume that hidden dim of all layers is equal
self.rnn_type = rnn_type
self.bidirectional=bidirectional
if h_0_type=='zero':
#self.h_0 = Variable( 0.1*torch.eye(self.hidden_dim).double() )
self.h_0 = np.zeros((p_hidden_dim,) )
else:
raise NotImplemented("Other initialization is not currently implemented")
if (rnn_type=='lstm'):
c_0_type = h_0_type
self.c_0 = np.zeros((p_hidden_dim,) )
self.layers = []
for l in range(self.layer_num): # layer 0 is observed, layer 1 is the next after observed etc.
# layers are created separately in order to make python references to the hidden layers outputs
with_input_variance = False if (l==0) else True # input variance
layer = Mean_var_rnn(self.input_dims[l], self.output_dims[l],
self.hidden_dim, rnn_type=rnn_type, with_input_variance = with_input_variance, bidirectional=bidirectional)
setattr(self, 'layer_' + str(l), layer)
self.layers.append(layer)
def forward(self, inp_l0):
#import pdb; pdb.set_trace()
# prepare h_0 ->
h_0 = Variable( torch.from_numpy( np.broadcast_to(self.h_0, ( 2 if self.bidirectional else 1,inp_l0.size()[1],self.h_0.shape[0]) ) ).double() )
if self.rnn_type =='lstm':
c_0 = Variable( torch.from_numpy( np.broadcast_to(self.c_0, ( 2 if self.bidirectional else 1,inp_l0.size()[1],self.h_0.shape[0]) ) ).double() )
else:
c_0 = None
#import pdb; pdb.set_trace()
# prepare h_0 <-
self.out_means = []
self.out_vars = []
out_mean, out_var = inp_l0, None
for l in range(self.layer_num):
layer = self.layers[l]
#import pdb; pdb.set_trace()
out_mean, out_var = layer( out_mean, h_0, var_input=out_var, c_0=c_0)
# Store outputs
self.out_means.append(out_mean )
self.out_vars.append( out_var)
return self.out_means, self.out_vars
class seq_encoder(Parameterized):
def __init__(self, p_layer_num, p_input_dims, p_output_dims, p_hidden_dim, h_0_type='zero', rnn_type='rnn', bidirectional=False,
name='seq_encoder'):
"""
"""
super(seq_encoder, self).__init__(name=name)
#import pdb; pdb.set_trace()
self.encoder = Mean_var_multilayer(p_layer_num, p_input_dims, p_output_dims, p_hidden_dim, h_0_type=h_0_type, rnn_type=rnn_type,
bidirectional=bidirectional).double()
#self.encoder.double() # convert all the parameters to float64
self.params_dict= {}
self.encoder_param_names_dics = {} # inverse transform from pytorch to gpy
for ee in self.encoder.named_parameters():
param_name = ee[0].replace('.','_') # transform paparm name from pytorch to gpy
self.encoder_param_names_dics[param_name] = ee[0]
tt = ee[1].data.numpy().copy()
param = Param( param_name, tt )
setattr(self, param_name, param )
self.params_dict[param_name] = getattr(self, param_name)
self.link_parameters(param)
pass
def _zero_grads(self,):
self.encoder.zero_grad()
def _params_from_gpy(self,):
"""
Copy parameters from GPy to pytorch
"""
for p_name, p_val in self.params_dict.iteritems():
gpy_param = getattr(self, p_name).values.copy()
# if p_name == 'layer_0_linear_var_bias':
# gpy_param[:] = 32
# import pdb; pdb.set_trace()
self.encoder.state_dict()[ self.encoder_param_names_dics[p_name] ].copy_( torch.from_numpy(gpy_param) ) # this seems to work
#setattr( self.encoder, self.encoder_param_names_dics[p_name], Variable( torch.from_numpy(gpy_param) ) )
#import pdb; pdb.set_trace()
def gradients_to_gpy(self,):
"""
Sets the gradients of encoder parameters to the computed values.
This function must be called after all smaples in minibatch are
processed.
"""
#import pdb; pdb.set_trace()
params_dict = {ii[0]:ii[1] for ii in self.encoder.named_parameters()}
for p_name, p_val in self.params_dict.iteritems():
pytorch_param = params_dict[ self.encoder_param_names_dics[p_name] ]
pytorch_param_grad = pytorch_param.grad.data.numpy()
gpy_param = getattr(self, p_name)
assert gpy_param.gradient.shape == pytorch_param_grad.shape, "Shapes must be equal"
gpy_param.gradient = pytorch_param_grad.copy()
def forward_computation(self, l0_input, l0_input2=None):
"""
Given the parameters of the neural networks computes outputs of each layer
Input:
------------------
l0_input: list
list of size batch size, in each element the ndarray of shape (seq_len, input_dim)
l0_input2: list
Another 0-th layer input (usually controls). They are to concatenated to the
foitst input.
"""
#import pdb; pdb.set_trace()
batch_size = l0_input.shape[1]
self._zero_grads()
self._params_from_gpy()
l0_input = torch.from_numpy(l0_input) # right shape: (seq_len, batch, input_dim)
if l0_input2 is not None:
assert batch_size == l0_input2.shape[1], "Batch size must be the same"
assert l0_input.size()[0] == l0_input2.shape[0], "Sequaence lengths must be the same."
l0_input = torch.cat( (l0_input, torch.from_numpy(l0_input2) ), dim=2 )
l0_input = Variable( l0_input )
# comp. from botton to top. Lists of computed means and vars from layers.
self.forward_means_list, self.forward_vars_list = self.encoder.forward( l0_input )
#import pdb; pdb.set_trace()
# Transformation to the required output form: list of lists of (sample size, dimensions). First list is
# over layers (starting from the one after the output), second list is over batch
out_means_list = [ [ ll.squeeze(axis=1) for ll in np.split( pp.data.numpy().copy(), batch_size, axis=1) ] for pp in self.forward_means_list ]
out_vars_list = [ [ ll.squeeze(axis=1) for ll in np.split( pp.data.numpy().copy(), batch_size, axis=1) ] for pp in self.forward_vars_list ]
# return values are a list of lares outputs starting from the lower, the lowest one (output is excluded since it is only the ipnut layer)
return out_means_list, out_vars_list
def backward_computation(self, input_gradient_list ):
"""
Computes the gradient of parameters given the gradients of outputs of each layer.
Input:
---------------
input_gradient_list: list
Contains gradients of X means and variances. First gradients of means, then gradients of variances,
in order from lower layer to the top. (lowest is the one after the output layer).
"""
#import pdb; pdb.set_trace()
input_gradient_list = [ torch.from_numpy(gg) for gg in input_gradient_list]
torch.autograd.backward( variables=self.forward_means_list + self.forward_vars_list,
grad_variables = input_gradient_list, retain_graph=False )
self.gradients_to_gpy()
# Resent the computaitonal graph
self.forward_means_list = None
self.forward_vars_list = None
def test_graph():
y = Variable( torch.from_numpy( np.array((1,2.0)) ) )
w1 = Variable( torch.from_numpy( np.array( (2.0,) ) ), requires_grad=True )
w2 = Variable( torch.from_numpy( np.array( (3.0,) ) ), requires_grad=True )
x1 = y*w1
x2 = x1 * w2
torch.autograd.backward( variables =(x1,x2), grad_variables = (torch.from_numpy( np.array((1,1.0)) ), torch.from_numpy( np.array((1,1.0)) ) ), retain_graph=True )
raise ValueError("sdfb")
#torch.autograd.backward( variables =(x2,), grad_variables = (torch.from_numpy( np.array((1,1.0)) ), ) )
globals().update(locals());
if __name__ == '__main__':
#rnn = nn.RNN(input_size=5, hidden_size=10, num_layers=1, batch_first=True, bidirectional=False)
test_graph()
#tt = Mean_var_multilayer(2, [2,3], [3,4], 5, h_0_type='unit', rnn_type='rnn')
|
bsd-3-clause
|
chandlercr/aima-python
|
submissions/Ottenlips/myKMeans.py
|
3
|
2874
|
from sklearn import datasets
from sklearn.cluster import KMeans
# import numpy
import traceback
from submissions.Ottenlips import billionaires
from submissions.Ottenlips import billionaires
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
bill = DataFrame()
list_of_billionaire = billionaires.get_billionaires()
def billtarget(num):
if num>200:
return 0
if num>500:
return 1
else:
return 2
for billionaires in list_of_billionaire:
# print(billionaires['wealth']['type'])
# print(billionaires)
bill.target.append(billtarget(float(billionaires['rank'])))
# bill.target.append(billionaires['wealth']['how']['inherited'])
bill.data.append([
billionaires['wealth']['worth in billions'],
float(billionaires['demographics']['age']),
float(billionaires['location']['gdp']),
])
bill.feature_names = [
# 'age',
'wealth',
'age',
'gdp of origin country',
# 'gdp of origin country',
# 'rank',
]
bill.target_names = [
'high rank',
'med rank',
'lowest rank',
]
'''
Make a customn classifier,
'''
km = KMeans(
n_clusters=12,
# max_iter=1,
# n_init=12,
# init='',
# algorithm='auto',
precompute_distances='auto',
# tol=1e-4,
# n_jobs=-1,
# random_state=numpy.RandomState,
# verbose=1,
# copy_x=True,
)
km2 = KMeans(
n_clusters=2,
# max_iter=20,
n_init=50,
init='random',
algorithm='auto',
# precompute_distances='auto',
# tol=1e-4,
# n_jobs=-1,
# random_state=numpy.RandomState,
# verbose=1,
# copy_x=True,
)
billScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(bill.data)
billScaled.data = scaleGrid(bill.data)
billScaled.feature_names = bill.feature_names
billScaled.target = bill.target
billScaled.target_names = bill.target_names
Examples = {
'Billdefault': {
'frame': bill,
},
'BillKMClassifier': {
'frame': bill,
'kmeans': km,
},
'BillKMClassifierScaled': {
'frame': billScaled,
'kmeans': km2,
},
}
|
mit
|
elkingtonmcb/h2o-2
|
py/testdir_single_jvm/test_GLM2_basic_cmp.py
|
9
|
7620
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_exec, h2o_glm, h2o_jobs
import h2o_print as h2p
SCIPY_INSTALLED = True
try:
import scipy as sp
import numpy as np
import sklearn as sk
print "numpy, scipy and sklearn are installed. Will do extra checks"
except ImportError:
print "numpy, scipy or sklearn is not installed. Will just do h2o stuff"
SCIPY_INSTALLED = False
#*********************************************************************************
def do_scipy_glm(self, bucket, csvPathname, L, family='binomial'):
h2p.red_print("Now doing sklearn")
h2p.red_print("\nsee http://scikit-learn.org/0.11/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression")
import numpy as np
import scipy as sp
from sklearn.linear_model import LogisticRegression
from numpy import loadtxt
csvPathnameFull = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
# make sure it does fp divide
C = 1/(L+0.0)
print "C regularization:", C
dataset = np.loadtxt(
open(csvPathnameFull,'r'),
skiprows=1, # skip the header
delimiter=',',
dtype='float');
print "\ncsv read for training, done"
n_features = len(dataset[0]) - 1;
print "n_features:", n_features
# don't want ID (col 0) or CAPSULE (col 1)
# get CAPSULE
target = [x[1] for x in dataset]
# slice off the first 2
train = np.array ( [x[2:] for x in dataset] )
n_samples, n_features = train.shape
print "n_samples:", n_samples, "n_features:", n_features
print "histogram of target"
print sp.histogram(target,3)
print "len(train):", len(train)
print "len(target):", len(target)
print "dataset shape:", dataset.shape
if family!='binomial':
raise Exception("Only have binomial logistic for scipy")
print "\nTrying l2"
clf2 = LogisticRegression(
C=C,
dual=False,
fit_intercept=True,
intercept_scaling=1,
penalty='l2',
tol=0.0001);
# train the classifier
start = time.time()
clf2.fit(train, target)
print "L2 fit took", time.time() - start, "seconds"
# print "coefficients:", clf2.coef_
cstring = "".join([("%.5e " % c) for c in clf2.coef_[0]])
h2p.green_print("sklearn L2 C", C)
h2p.green_print("sklearn coefficients:", cstring)
h2p.green_print("sklearn intercept:", "%.5e" % clf2.intercept_[0])
h2p.green_print("sklearn score:", clf2.score(train,target))
print "\nTrying l1"
clf1 = LogisticRegression(
C=C,
dual=False,
fit_intercept=True,
intercept_scaling=1,
penalty='l1',
tol=0.0001);
# train the classifier
start = time.time()
clf1.fit(train, target)
print "L1 fit took", time.time() - start, "seconds"
# print "coefficients:", clf1.coef_
cstring = "".join([("%.5e " % c) for c in clf1.coef_[0]])
h2p.green_print("sklearn L1 C", C)
h2p.green_print("sklearn coefficients:", cstring)
h2p.green_print("sklearn intercept:", "%.5e" % clf1.intercept_[0])
h2p.green_print("sklearn score:", clf1.score(train,target))
# attributes are accessed in the normal python way
dx = clf1.__dict__
dx.keys()
## ['loss', 'C', 'dual', 'fit_intercept', 'class_weight_label', 'label_',
## 'penalty', 'multi_class', 'raw_coef_', 'tol', 'class_weight',
## 'intercept_scaling']
#*********************************************************************************
def do_h2o_glm(self, bucket, csvPathname, L, family='binomial'):
h2p.red_print("\nNow doing h2o")
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='local', timeoutSecs=180)
# save the resolved pathname for use in the sklearn csv read below
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print inspect
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
x = 'ID'
y = 'CAPSULE'
family = family
alpha = '0'
lambda_ = L
nfolds = '0'
f = 'prostate'
modelKey = 'GLM_' + f
kwargs = {
'response' : y,
'ignored_cols' : x,
'family' : family,
'lambda' : lambda_,
'alpha' : alpha,
'n_folds' : nfolds, # passes if 0, fails otherwise
'destination_key' : modelKey,
}
timeoutSecs = 60
start = time.time()
glmResult = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
# this stuff was left over from when we got the result after polling the jobs list
# okay to do it again
# GLM2: when it redirects to the model view, we no longer have the job_key! (unlike the first response and polling)
(warnings, clist, intercept) = h2o_glm.simpleCheckGLM(self, glmResult, None, **kwargs)
cstring = "".join([("%.5e " % c) for c in clist])
h2p.green_print("h2o alpha ", alpha)
h2p.green_print("h2o lambda ", lambda_)
h2p.green_print("h2o coefficient list:", cstring)
h2p.green_print("h2o intercept", "%.5e " % intercept)
# other stuff in the json response
glm_model = glmResult['glm_model']
_names = glm_model['_names']
coefficients_names = glm_model['coefficients_names']
# the first submodel is the right one, if onely one lambda is provided as a parameter above
submodels = glm_model['submodels'][0]
beta = submodels['beta']
h2p.red_print("beta:", beta)
norm_beta = submodels['norm_beta']
iteration = submodels['iteration']
validation = submodels['validation']
auc = validation['auc']
aic = validation['aic']
null_deviance = validation['null_deviance']
residual_deviance = validation['residual_deviance']
print '_names', _names
print 'coefficients_names', coefficients_names
# did beta get shortened? the simple check confirms names/beta/norm_beta are same length
print 'beta', beta
print 'iteration', iteration
print 'auc', auc
#*********************************************************************************
# the actual test that will run both
#*********************************************************************************
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1, java_heap_GB=10)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_basic_cmp(self):
bucket = 'smalldata'
importFolderPath = "logreg"
csvFilename = 'prostate.csv'
csvPathname = importFolderPath + "/" + csvFilename
# use L for lambda in h2o, C=1/L in sklearn
family = 'binomial'
L = 1e-4
do_h2o_glm(self, bucket, csvPathname, L, family)
if SCIPY_INSTALLED:
do_scipy_glm(self, bucket, csvPathname, L, family)
# since we invert for C, can't use 0 (infinity)
L = 1e-13
# C in sklearn Specifies the strength of the regularization.
# The smaller it is the bigger in the regularization.
# we'll set it to 1/L
do_h2o_glm(self, bucket, csvPathname, L, family)
if SCIPY_INSTALLED:
do_scipy_glm(self, bucket, csvPathname, L, family)
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
|
google-research/google-research
|
aloe/aloe/common/plot_2d.py
|
1
|
2275
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
def plot_heatmap(pdf_func, out_name, size=3):
w = 100
x = np.linspace(-size, size, w)
y = np.linspace(-size, size, w)
xx, yy = np.meshgrid(x, y)
coords = np.stack([xx.flatten(), yy.flatten()]).transpose()
scores = pdf_func(coords)
a = scores.reshape((w, w))
plt.imshow(a)
plt.axis('equal')
plt.axis('off')
plt.savefig(out_name, bbox_inches='tight')
plt.close()
def plot_samples(samples, out_name, lim=None, axis=True):
plt.scatter(samples[:, 0], samples[:, 1], marker='.')
plt.axis('equal')
if lim is not None:
plt.xlim(-lim, lim)
plt.ylim(-lim, lim)
if not axis:
plt.axis('off')
plt.savefig(out_name, bbox_inches='tight')
plt.close()
def plot_joint(dataset, samples, out_name):
x = np.max(dataset)
y = np.max(-dataset)
z = np.ceil(max((x, y)))
plt.scatter(dataset[:, 0], dataset[:, 1], c='r', marker='x')
plt.scatter(samples[:, 0], samples[:, 1], c='b', marker='.')
plt.legend(['training data', 'ADE sampled'])
plt.axis('equal')
plt.xlim(-z, z)
plt.ylim(-z, z)
plt.savefig(out_name, bbox_inches='tight')
plt.close()
fname = out_name.split('/')[-1]
out_name = '/'.join(out_name.split('/')[:-1]) + '/none-' + fname
plt.figure(figsize=(8, 8))
plt.scatter(dataset[:, 0], dataset[:, 1], c='r', marker='x')
plt.scatter(samples[:, 0], samples[:, 1], c='b', marker='.')
plt.axis('equal')
plt.xlim(-z, z)
plt.ylim(-z, z)
plt.savefig(out_name, bbox_inches='tight')
plt.close()
|
apache-2.0
|
mwv/scikit-learn
|
examples/neighbors/plot_classification.py
|
287
|
1790
|
"""
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
|
bsd-3-clause
|
bssrdf/pmtk3
|
python/demos/ch02/robustDemo.py
|
7
|
1150
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pylab as pl
from scipy.stats import t, laplace, norm
a = np.random.randn(30)
outliers = np.array([8, 8.75, 9.5])
pl.hist(a, 7, weights=[1 / 30] * 30, rwidth=0.8)
#fit without outliers
x = np.linspace(-5, 10, 500)
loc, scale = norm.fit(a)
n = norm.pdf(x, loc=loc, scale=scale)
loc, scale = laplace.fit(a)
l = laplace.pdf(x, loc=loc, scale=scale)
fd, loc, scale = t.fit(a)
s = t.pdf(x, fd, loc=loc, scale=scale)
pl.plot(x, n, 'k>',
x, s, 'r-',
x, l, 'b--')
pl.legend(('Gauss', 'Student', 'Laplace'))
pl.savefig('robustDemo_without_outliers.png')
#add the outliers
pl.figure()
pl.hist(a, 7, weights=[1 / 33] * 30, rwidth=0.8)
pl.hist(outliers, 3, weights=[1 / 33] * 3, rwidth=0.8)
aa = np.hstack((a, outliers))
loc, scale = norm.fit(aa)
n = norm.pdf(x, loc=loc, scale=scale)
loc, scale = laplace.fit(aa)
l = laplace.pdf(x, loc=loc, scale=scale)
fd, loc, scale = t.fit(aa)
t = t.pdf(x, fd, loc=loc, scale=scale)
pl.plot(x, n, 'k:',
x, t, 'r-',
x, l, 'b--')
pl.legend(('Gauss', 'Student', 'Laplace'))
pl.savefig('robustDemo_with_outliers.png')
pl.show()
|
mit
|
johnowhitaker/bobibabber
|
sklearn/metrics/scorer.py
|
5
|
12582
|
"""
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from warnings import warn
import numpy as np
from . import (r2_score, mean_absolute_error, mean_squared_error,
accuracy_score, f1_score, roc_auc_score,
average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
return self._sign * self._score_func(y_true, y_pred, **self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False, loss_func=None,
score_func=None, score_overrides_loss=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = not (scoring is None and loss_func is None and
score_func is None)
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif hasattr(estimator, 'predict') and has_scoring:
scorer = None
if loss_func is not None or score_func is not None:
if loss_func is not None:
warn("Passing a loss function is "
"deprecated and will be removed in 0.15. "
"Either use strings or score objects. "
"The relevant new parameter is called ''scoring''. ",
category=DeprecationWarning, stacklevel=2)
scorer = make_scorer(loss_func, greater_is_better=False)
if score_func is not None:
warn("Passing function as ``score_func`` is "
"deprecated and will be removed in 0.15. "
"Either use strings or score objects. "
"The relevant new parameter is called ''scoring''.",
category=DeprecationWarning, stacklevel=2)
if loss_func is None or score_overrides_loss:
scorer = make_scorer(score_func)
else:
scorer = get_scorer(scoring)
return scorer
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif not has_scoring:
if allow_none:
return None
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
else:
raise TypeError(
"The estimator passed should have a 'score' or a 'predict' "
"method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, f1=f1_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
precision=precision_scorer, recall=recall_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
|
mit
|
FrederikDiehl/apsis
|
code/apsis/assistants/lab_assistant.py
|
2
|
15663
|
__author__ = 'Frederik Diehl'
import json
import os
import time
import uuid
import apsis.models.experiment as experiment
from apsis.assistants.experiment_assistant import ExperimentAssistant
from apsis.utilities.file_utils import ensure_directory_exists
from apsis.utilities.logging_utils import get_logger
# These are the colours supported by the plot.
COLORS = ["g", "r", "c", "b", "m", "y"]
class LabAssistant(object):
"""
This is used to control multiple experiments at once.
This is done by abstracting a dict of named experiment assistants.
Attributes
----------
_exp_assistants : dict of ExperimentAssistants.
The dictionary of experiment assistants this LabAssistant uses.
_write_dir : String, optional
The directory to write all the results and plots to.
_logger : logging.logger
The logger for this class.
"""
_exp_assistants = None
_write_dir = None
_global_start_date = None
_logger = None
def __init__(self, write_dir=None):
"""
Initializes the lab assistant.
Parameters
----------
write_dir: string, optional
Sets the write directory for the lab assistant. If None (default),
nothing will be written.
"""
self._logger = get_logger(self)
self._logger.info("Initializing lab assistant.")
self._logger.info("\tWriting results to %s" %write_dir)
self._write_dir = write_dir
self._exp_assistants = {}
reloading_possible = True
try:
if self._write_dir:
with open(self._write_dir + "/lab_assistant.json", "r"):
pass
else:
self._logger.debug("\tReloading impossible due to no "
"_write_dir specified.")
reloading_possible = False
except IOError:
self._logger.debug("\tReloading impossible due to IOError - "
"probably no lab_assistant existing.")
reloading_possible = False
if not reloading_possible:
self._global_start_date = time.time()
else:
# set the correct path.
with open(self._write_dir + "/lab_assistant.json", 'r') as infile:
lab_assistant_json = json.load(infile)
self._global_start_date = lab_assistant_json["global_start_date"]
for p in lab_assistant_json["exp_assistants"].values():
self._load_exp_assistant_from_path(p)
self._logger.debug("\tReloaded all exp_assistants.")
self._write_state_to_file()
self._logger.info("lab assistant successfully initialized.")
def init_experiment(self, name, optimizer, param_defs, exp_id=None,
notes=None, optimizer_arguments=None,
minimization=True):
"""
Initializes an experiment.
Parameters
----------
name : string
name of the experiment.
optimizer : string
String representation of the optimizer.
param_defs : dict of parameter definitions
Dictionary of parameter definition classes.
optimizer_arguments : dict, optional
A dictionary defining the operation of the optimizer. See the
respective documentation of the optimizers.
Default is None, which are default values.
exp_id : string or None, optional
The id of the experiment, which will be used to reference it.
Should be a proper uuid, and especially has to be unique. If it is
not, an error may be returned.
notes : jsonable object or None, optional
Any note that you'd like to put in the experiment. Could be used
to provide some details on the experiment, on the start time or the
user starting it.
minimization : bool, optional
Whether the problem is one of minimization. Defaults to True.
Returns
-------
exp_id : string
String representing the id of the experiment or "failed" if failed.
Raises
------
ValueError :
Iff there already is an experiment with the exp_id for this lab
assistant. Does not occur if no exp_id is given.
"""
self._logger.debug("Initializing new experiment. Parameters: "
"name: %s, optimizer: %s, param_defs: %s, "
"exp_id: %s, notes: %s, optimizer_arguments: %s, "
"minimization: %s" %(name, optimizer, param_defs,
exp_id, notes,
optimizer_arguments,
minimization))
if exp_id in self._exp_assistants.keys():
raise ValueError("Already an experiment with id %s registered."
%exp_id)
if exp_id is None:
while True:
exp_id = uuid.uuid4().hex
if exp_id not in self._exp_assistants.keys():
break
self._logger.debug("\tGenerated new exp_id: %s" %exp_id)
if not self._write_dir:
exp_assistant_write_directory = None
else:
exp_assistant_write_directory = os.path.join(self._write_dir +
"/" + exp_id)
ensure_directory_exists(exp_assistant_write_directory)
self._logger.debug("\tExp_ass directory: %s"
%exp_assistant_write_directory)
exp = experiment.Experiment(name,
param_defs,
exp_id,
notes,
minimization)
exp_ass = ExperimentAssistant(optimizer,
experiment=exp,
optimizer_arguments=optimizer_arguments,
write_dir=exp_assistant_write_directory)
self._exp_assistants[exp_id] = exp_ass
self._logger.info("Experiment initialized successfully with id %s."
%exp_id)
self._write_state_to_file()
return exp_id
def _load_exp_assistant_from_path(self, path):
"""
This loads a complete exp_assistant from path.
Specifically, it looks for exp_assistant.json in the path and restores
optimizer_class, optimizer_arguments and write_dir from this. It then
loads the experiment from the write_dir/experiment.json, then
initializes both.
Parameters
----------
path : string
The path from which to initialize. This must contain an
exp_assistant.json as specified.
"""
self._logger.debug("Loading Exp_assistant from path %s" %path)
with open(path + "/exp_assistant.json", 'r') as infile:
exp_assistant_json = json.load(infile)
optimizer_class = exp_assistant_json["optimizer_class"]
optimizer_arguments = exp_assistant_json["optimizer_arguments"]
exp_ass_write_dir = exp_assistant_json["write_dir"]
ensure_directory_exists(exp_ass_write_dir)
self._logger.debug("\tLoaded exp_parameters: "
"optimizer_class: %s, optimizer_arguments: %s,"
"write_dir: %s" %(optimizer_class,
optimizer_arguments,
exp_ass_write_dir))
exp = self._load_experiment(path)
self._logger.debug("\tLoaded Experiment. %s" %exp.to_dict())
exp_ass = ExperimentAssistant(optimizer_class=optimizer_class,
experiment=exp,
optimizer_arguments=optimizer_arguments,
write_dir=exp_ass_write_dir)
if exp_ass.exp_id in self._exp_assistants:
raise ValueError("Loaded exp_id is duplicated in experiment! id "
"is %s" %exp_ass.exp_id)
self._exp_assistants[exp_ass.exp_id] = exp_ass
self._logger.info("Successfully loaded experiment from %s." %path)
def _load_experiment(self, path):
"""
Loads an experiment from path.
Looks for experiment.json in path.
Parameters
----------
path : string
The path where experiment.json is located.
"""
self._logger.debug("Loading experiment.")
with open(path + "/experiment.json", 'r') as infile:
exp_json = json.load(infile)
exp = experiment.from_dict(exp_json)
self._logger.debug("\tLoaded experiment, %s" %exp.to_dict())
return exp
def _write_state_to_file(self):
"""
Writes the state of this lab assistant to a file.
Iff _write_dir is not None, it will collate global_start_date and a
dictionary of every experiment assistant, and dump this to
self._write_dir/lab_assistant.json.
"""
self._logger.debug("Writing lab_assistant state to file %s"
%self._write_dir)
if not self._write_dir:
return
state = {"global_start_date": self._global_start_date,
"exp_assistants": {x.exp_id: x.write_dir for x
in self._exp_assistants.values()}}
self._logger.debug("\tState is %s" %state)
with open(self._write_dir + '/lab_assistant.json', 'w') as outfile:
json.dump(state, outfile)
def get_candidates(self, experiment_id):
"""
Returns all candidates for a specific experiment.
Parameters
----------
experiment_id : string
The id of the experiment for which to return the candidates.
Returns
-------
result : dict
A dictionary of three lists with the keys finished, pending and
working, with the corresponding candidates.
"""
self._logger.debug("Returning candidates for exp %s" %experiment_id)
candidates = self._exp_assistants[experiment_id].get_candidates()
self._logger.debug("\tCandidates are %s" %candidates)
return candidates
def get_next_candidate(self, experiment_id):
"""
Returns the next candidates for a specific experiment.
Parameters
----------
experiment_id : string
The id of the experiment for which to return the next candidate.
Returns
-------
next_candidate : Candidate or None
The Candidate object that should be evaluated next. May be None,
which is equivalent to no candidate generated.
"""
self._logger.debug("Returning next candidate for id %s" %experiment_id)
next_cand = self._exp_assistants[experiment_id].get_next_candidate()
self._logger.debug("\tNext candidate is %s" %next_cand)
return next_cand
def get_best_candidate(self, experiment_id):
"""
Returns the best candidates for a specific experiment.
Parameters
----------
experiment_id : string
The id of the experiment for which to return the best candidate.
Returns
-------
best_candidate : Candidate or None
The Candidate object that has performed best. May be None,
which is equivalent to no candidate being evaluated.
"""
self._logger.debug("Returning best candidate for id %s" %experiment_id)
best_cand = self._exp_assistants[experiment_id].get_best_candidate()
self._logger.debug("\tBest candidate is %s" %best_cand)
return best_cand
def update(self, experiment_id, status, candidate):
"""
Updates the specicied experiment with the status of an experiment
evaluation.
Parameters
----------
experiment_id : string
The id of the experiment for which to return the best candidate.
candidate : Candidate
The Candidate object whose status is updated.
status : {"finished", "pausing", "working"}
A string defining the status change. Can be one of the following:
- finished: The Candidate is now finished.
- pausing: The evaluation of Candidate has been paused and can be
resumed by another worker.
- working: The Candidate is now being worked on by a worker.
"""
self._logger.debug("Updating exp_id %s with candidate %s with status"
"%s." %(experiment_id, candidate, status))
self._exp_assistants[experiment_id].update(status=status,
candidate=candidate)
def get_experiment_as_dict(self, exp_id):
"""
Returns the specified experiment as dictionary.
Parameters
----------
exp_id : string
The id of the experiment.
Returns
-------
exp_dict : dict
The experiment dictionary as defined by Experiment.to_dict().
"""
self._logger.debug("Returning experiment %s as dict." %exp_id)
exp_dict = self._exp_assistants[exp_id].get_experiment_as_dict()
self._logger.debug("\tDict is %s" %exp_dict)
return exp_dict
def get_plot_result_per_step(self, exp_id):
"""
Returns the figure for the result of each step.
Parameters
----------
exp_id : string
The id of the experiment.
Result
------
fig : matplotlib.figure
The figure containing the result of each step.
"""
self._logger.debug("Returning plot of results per step for %s."
%exp_id)
fig = self._exp_assistants[exp_id].plot_result_per_step()
self._logger.debug("Figure is %s" %fig)
return fig
def contains_id(self, exp_id):
"""
Tests whether this lab assistant has an experiment with id.
Parameters
----------
exp_id : string
The ID to be tested.
Returns
-------
contains : bool
True iff this lab assistant contains an experiment with this id.
"""
self._logger.debug("Testing whether this contains id %s" %exp_id)
if exp_id in self._exp_assistants:
self._logger.debug("exp_id %s is contained." %exp_id)
return True
self._logger.debug("exp_id %s is not contained." %exp_id)
return False
def get_ids(self):
"""
Returns all known ids for this lab assistant.
Returns
-------
exp_ids : list of strings
All ids this lab assitant knows.
"""
self._logger.debug("Requested all exp_ids.")
exp_ids = self._exp_assistants.keys()
self._logger.debug("All exp_ids: %s" %exp_ids)
return exp_ids
def set_exit(self):
"""
Exits this assistant.
Currently, all that is done is exiting all exp_assistants..
"""
self._logger.info("Shutting down lab assistant: Setting exit.")
for exp in self._exp_assistants.values():
exp.set_exit()
self._logger.info("Shut down all experiment assistants.")
|
mit
|
dandxy89/ExperiKeras
|
Spark_Course_EDX/week2_apache_log.py
|
1
|
48711
|
# coding: utf-8
# version 1.0.0
# # + 
# # **Web Server Log Analysis with Apache Spark**
#
# ####This lab will demonstrate how easy it is to perform web server log analysis with Apache Spark.
#
# ####Server log analysis is an ideal use case for Spark. It's a very large, common data source and contains a rich set of information. Spark allows you to store your logs in files on disk cheaply, while still providing a quick and simple way to perform data analysis on them. This homework will show you how to use Apache Spark on real-world text-based production logs and fully harness the power of that data. Log data comes from many sources, such as web, file, and compute servers, application logs, user-generated content, and can be used for monitoring servers, improving business and customer intelligence, building recommendation systems, fraud detection, and much more.
# ### How to complete this assignment
#
# ####This assignment is broken up into sections with bite-sized examples for demonstrating Spark functionality for log processing. For each problem, you should start by thinking about the algorithm that you will use to *efficiently* process the log in a parallel, distributed manner. This means using the various [RDD](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD) operations along with [`lambda` functions](https://docs.python.org/2/tutorial/controlflow.html#lambda-expressions) that are applied at each worker.
#
# ####This assignment consists of 4 parts:
# #### *Part 1*: Apache Web Server Log file format
# #### *Part 2*: Sample Analyses on the Web Server Log File
# #### *Part 3*: Analyzing Web Server Log File
# #### *Part 4*: Exploring 404 Response Codes
# ### **Part 1: Apache Web Server Log file format**
# ####The log files that we use for this assignment are in the [Apache Common Log Format (CLF)](http://httpd.apache.org/docs/1.3/logs.html#common). The log file entries produced in CLF will look something like this:
# `127.0.0.1 - - [01/Aug/1995:00:00:01 -0400] "GET /images/launch-logo.gif HTTP/1.0" 200 1839`
#
# ####Each part of this log entry is described below.
# * `127.0.0.1`
# ####This is the IP address (or host name, if available) of the client (remote host) which made the request to the server.
#
# * `-`
# ####The "hyphen" in the output indicates that the requested piece of information (user identity from remote machine) is not available.
#
# * `-`
# ####The "hyphen" in the output indicates that the requested piece of information (user identity from local logon) is not available.
#
# * `[01/Aug/1995:00:00:01 -0400]`
# ####The time that the server finished processing the request. The format is:
# `[day/month/year:hour:minute:second timezone]`
# * ####day = 2 digits
# * ####month = 3 letters
# * ####year = 4 digits
# * ####hour = 2 digits
# * ####minute = 2 digits
# * ####second = 2 digits
# * ####zone = (\+ | \-) 4 digits
#
# * `"GET /images/launch-logo.gif HTTP/1.0"`
# ####This is the first line of the request string from the client. It consists of a three components: the request method (e.g., `GET`, `POST`, etc.), the endpoint (a [Uniform Resource Identifier](http://en.wikipedia.org/wiki/Uniform_resource_identifier)), and the client protocol version.
#
# * `200`
# ####This is the status code that the server sends back to the client. This information is very valuable, because it reveals whether the request resulted in a successful response (codes beginning in 2), a redirection (codes beginning in 3), an error caused by the client (codes beginning in 4), or an error in the server (codes beginning in 5). The full list of possible status codes can be found in the HTTP specification ([RFC 2616](https://www.ietf.org/rfc/rfc2616.txt) section 10).
#
# * `1839`
# ####The last entry indicates the size of the object returned to the client, not including the response headers. If no content was returned to the client, this value will be "-" (or sometimes 0).
#
# ####Note that log files contain information supplied directly by the client, without escaping. Therefore, it is possible for malicious clients to insert control-characters in the log files, *so care must be taken in dealing with raw logs.*
#
# ### NASA-HTTP Web Server Log
# ####For this assignment, we will use a data set from NASA Kennedy Space Center WWW server in Florida. The full data set is freely available (http://ita.ee.lbl.gov/html/contrib/NASA-HTTP.html) and contains two month's of all HTTP requests. We are using a subset that only contains several days worth of requests.
# ### **(1a) Parsing Each Log Line**
# ####Using the CLF as defined above, we create a regular expression pattern to extract the nine fields of the log line using the Python regular expression [`search` function](https://docs.python.org/2/library/re.html#regular-expression-objects). The function returns a pair consisting of a Row object and 1. If the log line fails to match the regular expression, the function returns a pair consisting of the log line string and 0. A '-' value in the content size field is cleaned up by substituting it with 0. The function converts the log line's date string into a Python `datetime` object using the given `parse_apache_time` function.
# In[1]:
import re
import datetime
from pyspark.sql import Row
month_map = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6, 'Jul': 7,
'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
def parse_apache_time(s):
""" Convert Apache time format into a Python datetime object
Args:
s (str): date and time in Apache time format
Returns:
datetime: datetime object (ignore timezone for now)
"""
return datetime.datetime(int(s[7:11]),
month_map[s[3:6]],
int(s[0:2]),
int(s[12:14]),
int(s[15:17]),
int(s[18:20]))
def parseApacheLogLine(logline):
""" Parse a line in the Apache Common Log format
Args:
logline (str): a line of text in the Apache Common Log format
Returns:
tuple: either a dictionary containing the parts of the Apache Access Log and 1,
or the original invalid log line and 0
"""
match = re.search(APACHE_ACCESS_LOG_PATTERN, logline)
if match is None:
return (logline, 0)
size_field = match.group(9)
if size_field == '-':
size = long(0)
else:
size = long(match.group(9))
return (Row(
host=match.group(1),
client_identd=match.group(2),
user_id=match.group(3),
date_time=parse_apache_time(match.group(4)),
method=match.group(5),
endpoint=match.group(6),
protocol=match.group(7),
response_code=int(match.group(8)),
content_size=size
), 1)
# In[2]:
# A regular expression pattern to extract fields from the log line
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)" (\d{3}) (\S+)'
# ### **(1b) Configuration and Initial RDD Creation**
# ####We are ready to specify the input log file and create an RDD containing the parsed log file data. The log file has already been downloaded for you.
#
# ####To create the primary RDD that we'll use in the rest of this assignment, we first load the text file using [`sc.textfile(logFile)`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext.textFile) to convert each line of the file into an element in an RDD.
# ####Next, we use [`map(parseApacheLogLine)`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.map) to apply the parse function to each element (that is, a line from the log file) in the RDD and turn each line into a pair [`Row` object](http://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.Row).
# ####Finally, we cache the RDD in memory since we'll use it throughout this notebook.
# In[16]:
statinfo = os.stat(logFile).st_size
print statinfo
# In[4]:
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab2', 'apache.access.log.PROJECT')
logFile = os.path.join(baseDir, inputPath)
def parseLogs():
""" Read and parse log file """
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: %d' % failed_logs.count()
for line in failed_logs.take(20):
print 'Invalid logline: %s' % line
print 'Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (parsed_logs.count(), access_logs.count(), failed_logs.count())
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
# ### **(1c) Data Cleaning**
# #### Notice that there are a large number of log lines that failed to parse. Examine the sample of invalid lines and compare them to the correctly parsed line, an example is included below. Based on your observations, alter the `APACHE_ACCESS_LOG_PATTERN` regular expression below so that the failed lines will correctly parse, and press `Shift-Enter` to rerun `parseLogs()`.
#
# `127.0.0.1 - - [01/Aug/1995:00:00:01 -0400] "GET /images/launch-logo.gif HTTP/1.0" 200 1839`
#
# #### If you not familar with Python regular expression [`search` function](https://docs.python.org/2/library/re.html#regular-expression-objects), now would be a good time to check up on the [documentation](https://developers.google.com/edu/python/regular-expressions). One tip that might be useful is to use an online tester like http://pythex.org or http://www.pythonregex.com. To use it, copy and paste the regular expression string below (located between the single quotes ') and test it against one of the 'Invalid logline' above.
# In[5]:
# TODO: Replace <FILL IN> with appropriate code
# This was originally '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+)
# (\S+)\s*(\S*)" (\d{3}) (\S+)'
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(.*?)" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
# In[6]:
# TEST Data cleaning (1c)
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(
parsed_logs.count(),
1043177,
'incorrect parsed_logs.count()')
Test.assertEquals(
access_logs.count(),
parsed_logs.count(),
'incorrect access_logs.count()')
# ### **Part 2: Sample Analyses on the Web Server Log File**
#
# ####Now that we have an RDD containing the log file as a set of Row objects, we can perform various analyses.
#
# #### **(2a) Example: Content Size Statistics**
#
# ####Let's compute some statistics about the sizes of content being returned by the web server. In particular, we'd like to know what are the average, minimum, and maximum content sizes.
#
# ####We can compute the statistics by applying a `map` to the `access_logs` RDD. The `lambda` function we want for the map is to extract the `content_size` field from the RDD. The map produces a new RDD containing only the `content_sizes` (one element for each Row object in the `access_logs` RDD). To compute the minimum and maximum statistics, we can use [`min()`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.min) and [`max()`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.max) functions on the new RDD. We can compute the average statistic by using the [`reduce`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.reduce) function with a `lambda` function that sums the two inputs, which represent two elements from the new RDD that are being reduced together. The result of the `reduce()` is the total content size from the log and it is to be divided by the number of requests as determined using the [`count()`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.count) function on the new RDD.
# In[7]:
# Calculate statistics based on the content size.
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: %s' % (
content_sizes.reduce(lambda a, b: a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max())
# #### **(2b) Example: Response Code Analysis**
# ####Next, lets look at the response codes that appear in the log. As with the content size analysis, first we create a new RDD by using a `lambda` function to extract the `response_code` field from the `access_logs` RDD. The difference here is that we will use a [pair tuple](https://docs.python.org/2/tutorial/datastructures.html?highlight=tuple#tuples-and-sequences) instead of just the field itself. Using a pair tuple consisting of the response code and 1 will let us count how many records have a particular response code. Using the new RDD, we perform a [`reduceByKey`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.reduceByKey) function. `reduceByKey` performs a reduce on a per-key basis by applying the `lambda` function to each element, pairwise with the same key. We use the simple `lambda` function of adding the two values. Then, we cache the resulting RDD and create a list by using the [`take`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.take) function.
# In[8]:
# Response Code to Count
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b: a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found %d response codes' % len(responseCodeToCountList)
print 'Response Code Counts: %s' % responseCodeToCountList
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [
(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
# #### **(2c) Example: Response Code Graphing with `matplotlib`**
# ####Now, lets visualize the results from the last example. We can visualize the results from the last example using [`matplotlib`](http://matplotlib.org/). First we need to extract the labels and fractions for the graph. We do this with two separate `map` functions with a `lambda` functions. The first `map` function extracts a list of of the response code values, and the second `map` function extracts a list of the per response code counts divided by the total size of the access logs. Next, we create a figure with `figure()` constructor and use the `pie()` method to create the pie plot.
# In[9]:
labels = responseCodeToCount.map(lambda x_y9: x_y9[0]).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(
lambda x_y10: (
float(
x_y10[1]) /
count)).collect()
print fracs
# In[10]:
import matplotlib.pyplot as plt
def pie_pct_format(value):
""" Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
"""
return '' if value < 7 else '%.0f%%' % value
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = [
'yellowgreen',
'lightskyblue',
'gold',
'purple',
'lightcoral',
'yellow',
'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
# If the slice is small to fit, don't show a text label
text.set_text('')
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
pass
# #### **(2d) Example: Frequent Hosts**
# ####Let's look at hosts that have accessed the server multiple times (e.g., more than ten times). As with the response code analysis in (2b), first we create a new RDD by using a `lambda` function to extract the `host` field from the `access_logs` RDD using a pair tuple consisting of the host and 1 which will let us count how many records were created by a particular host's request. Using the new RDD, we perform a `reduceByKey` function with a `lambda` function that adds the two values. We then filter the result based on the count of accesses by each host (the second element of each pair) being greater than ten. Next, we extract the host name by performing a `map` with a `lambda` function that returns the first element of each pair. Finally, we extract 20 elements from the resulting RDD - *note that the choice of which elements are returned is not guaranteed to be deterministic.*
# In[11]:
# Any hosts that has accessed the server more than 10 times.
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b: a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: %s' % hostsPick20
# An example: [u'204.120.34.185', u'204.243.249.9',
# u'slip1-32.acs.ohio-state.edu', u'lapdog-14.baylor.edu', u'199.77.67.3',
# u'gs1.cs.ttu.edu', u'haskell.limbex.com', u'alfred.uib.no',
# u'146.129.66.31', u'manaus.bologna.maraut.it',
# u'dialup98-110.swipnet.se', u'slip-ppp02.feldspar.com',
# u'ad03-053.compuserve.com', u'srawlin.opsys.nwa.com', u'199.202.200.52',
# u'ix-den7-23.ix.netcom.com', u'151.99.247.114', u'w20-575-104.mit.edu',
# u'205.25.227.20', u'ns.rmc.com']
# #### **(2e) Example: Visualizing Endpoints**
# ####Now, lets visualize the number of hits to endpoints (URIs) in the log. To perform this task, we first create a new RDD by using a `lambda` function to extract the `endpoint` field from the `access_logs` RDD using a pair tuple consisting of the endpoint and 1 which will let us count how many records were created by a particular host's request. Using the new RDD, we perform a `reduceByKey` function with a `lambda` function that adds the two values. We then cache the results.
#
# ####Next we visualize the results using `matplotlib`. We previously imported the `matplotlib.pyplot` library, so we do not need to import it again. We perform two separate `map` functions with `lambda` functions. The first `map` function extracts a list of endpoint values, and the second `map` function extracts a list of the visits per endpoint values. Next, we create a figure with `figure()` constructor, set various features of the plot (axis limits, grid lines, and labels), and use the `plot()` method to create the line plot.
# In[12]:
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b: a + b)
.cache())
ends = endpoints.map(lambda x_y11: x_y11[0]).collect()
counts = endpoints.map(lambda x_y12: x_y12[1]).collect()
fig = plt.figure(figsize=(8, 4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
pass
# #### **(2f) Example: Top Endpoints**
# ####For the final example, we'll look at the top endpoints (URIs) in the log. To determine them, we first create a new RDD by using a `lambda` function to extract the `endpoint` field from the `access_logs` RDD using a pair tuple consisting of the endpoint and 1 which will let us count how many records were created by a particular host's request. Using the new RDD, we perform a `reduceByKey` function with a `lambda` function that adds the two values. We then extract the top ten endpoints by performing a [`takeOrdered`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.takeOrdered) with a value of 10 and a `lambda` function that multiplies the count (the second element of each pair) by -1 to create a sorted list with the top endpoints at the bottom.
# In[13]:
# Top Endpoints
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b: a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: %s' % topEndpoints
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277),
(u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20292)], 'incorrect Top Ten Endpoints'
# ### **Part 3: Analyzing Web Server Log File**
#
# ####Now it is your turn to perform analyses on web server log files.
# #### **(3a) Exercise: Top Ten Error Endpoints**
# ####What are the top ten endpoints which did not have return code 200? Create a sorted list containing top ten endpoints and the number of times that they were accessed with non-200 return code.
#
# ####Think about the steps that you need to perform to determine which endpoints did not have a 200 return code, how you will uniquely count those endpoints, and sort the list.
#
# ####You might want to refer back to the previous Lab (Lab 1 Word Count) for insights.
# In[14]:
# TODO: Replace <FILL IN> with appropriate code
# HINT: Each of these <FILL IN> below could be completed with a single transformation or action.
# You are welcome to structure your solution in a different way, so long as
# you ensure the variables used in the next Test section are defined (ie.
# endpointSum, topTenErrURLs).
not200 = access_logs.filter(lambda log: log.response_code != 200)
endpointCountPairTuple = not200.map(lambda log: (log.endpoint, 1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a, b: a + b)
topTenErrURLs = endpointSum.takeOrdered(10, key=lambda x_y: -x_y[1])
print 'Top Ten failed URLs: %s' % topTenErrURLs
# In[15]:
# TEST Top ten error endpoints (3a)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs,
[(u'/images/NASA-logosmall.gif',
8761),
(u'/images/KSC-logosmall.gif',
7236),
(u'/images/MOSAIC-logosmall.gif',
5197),
(u'/images/USA-logosmall.gif',
5157),
(u'/images/WORLD-logosmall.gif',
5020),
(u'/images/ksclogo-medium.gif',
4728),
(u'/history/apollo/images/apollo-logo1.gif',
2907),
(u'/images/launch-logo.gif',
2811),
(u'/',
2199),
(u'/images/ksclogosmall.gif',
1622)],
'incorrect Top Ten failed URLs (topTenErrURLs)')
# #### **(3b) Exercise: Number of Unique Hosts**
# ####How many unique hosts are there in the entire log?
#
# ####Think about the steps that you need to perform to count the number of different hosts in the log.
# In[17]:
# TODO: Replace <FILL IN> with appropriate code
# HINT: Do you recall the tips from (3a)? Each of these <FILL IN> could be
# an transformation or action.
hosts = access_logs.map(lambda log: (log.host))
uniqueHosts = hosts.distinct()
uniqueHostCount = uniqueHosts.count()
print 'Unique hosts: %d' % uniqueHostCount
# In[18]:
# TEST Number of unique hosts (3b)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
# #### **(3c) Exercise: Number of Unique Daily Hosts**
# ####For an advanced exercise, let's determine the number of unique hosts in the entire log on a day-by-day basis. This computation will give us counts of the number of unique daily hosts. We'd like a list sorted by increasing day of the month which includes the day of the month and the associated number of unique hosts for that day. Make sure you cache the resulting RDD `dailyHosts` so that we can reuse it in the next exercise.
#
# ####Think about the steps that you need to perform to count the number of different hosts that make requests *each* day.
# ####*Since the log only covers a single month, you can ignore the month.*
# In[38]:
# TODO: Replace <FILL IN> with appropriate code
dayToHostPairTuple = (access_logs
.map(lambda log: ((log.date_time.day, log.host), 1))
)
dayGroupedHosts = (dayToHostPairTuple
.reduceByKey(lambda v1, v2: v1 + v2)
.map(lambda k_v: k_v[0]))
dayHostCount = (dayGroupedHosts
.map(lambda k_v1: (k_v1[0], 1))
.reduceByKey(lambda v1, v2: v1 + v2))
dailyHosts = (dayHostCount
.sortByKey()
.cache())
dailyHostsList = dailyHosts.take(30)
print 'Unique hosts per day: %s' % dailyHostsList
# In[39]:
# TEST Number of unique daily hosts (3c)
Test.assertEquals(dailyHosts.count(), 21, 'incorrect dailyHosts.count()')
Test.assertEquals(dailyHostsList, [(1, 2582), (3, 3222), (4, 4190), (5, 2502), (6, 2537), (7, 4106), (8, 4406), (9, 4317), (10, 4523), (11, 4346), (12, 2864), (
13, 2650), (14, 4454), (15, 4214), (16, 4340), (17, 4385), (18, 4168), (19, 2550), (20, 2560), (21, 4134), (22, 4456)], 'incorrect dailyHostsList')
Test.assertTrue(dailyHosts.is_cached, 'incorrect dailyHosts.is_cached')
# #### **(3d) Exercise: Visualizing the Number of Unique Daily Hosts**
# ####Using the results from the previous exercise, use `matplotlib` to plot a "Line" graph of the unique hosts requests by day.
# #### `daysWithHosts` should be a list of days and `hosts` should be a list of number of unique hosts for each corresponding day.
# #### * How could you convert a RDD into a list? See the [`collect()` method](http://spark.apache.org/docs/latest/api/python/pyspark.html?highlight=collect#pyspark.RDD.collect)*
# In[40]:
# TODO: Replace <FILL IN> with appropriate code
# daysWithHosts = dailyHosts.<FILL IN>
daysWithHosts = (dailyHosts
.map(lambda k_v2: k_v2[0])
.collect())
# hosts = dailyHosts.<FILL IN>
hosts = (dailyHosts
.map(lambda k_v3: k_v3[1])
.collect())
# In[41]:
# TEST Visualizing unique daily hosts (3d)
test_days = range(1, 23)
test_days.remove(2)
Test.assertEquals(daysWithHosts, test_days, 'incorrect days')
Test.assertEquals(hosts,
[2582,
3222,
4190,
2502,
2537,
4106,
4406,
4317,
4523,
4346,
2864,
2650,
4454,
4214,
4340,
4385,
4168,
2550,
2560,
4134,
4456],
'incorrect hosts')
# In[42]:
fig = plt.figure(figsize=(8, 4.5), facecolor='white', edgecolor='white')
plt.axis([min(daysWithHosts), max(daysWithHosts), 0, max(hosts) + 500])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Hosts')
plt.plot(daysWithHosts, hosts)
pass
# #### **(3e) Exercise: Average Number of Daily Requests per Hosts**
# ####Next, let's determine the average number of requests on a day-by-day basis. We'd like a list by increasing day of the month and the associated average number of requests per host for that day. Make sure you cache the resulting RDD `avgDailyReqPerHost` so that we can reuse it in the next exercise.
# ####To compute the average number of requests per host, get the total number of request across all hosts and divide that by the number of unique hosts.
# ####*Since the log only covers a single month, you can skip checking for the month.*
# ####*Also to keep it simple, when calculating the approximate average use the integer value - you do not need to upcast to float*
# In[43]:
# dayAndHostTuple = access_logs.<FILL IN>
dayAndHostTuple = (access_logs
.map(lambda log: ((log.date_time.day, log.host), 1))
.reduceByKey(lambda v1, v2: v1 + v2)
)
# groupedByDay = dayAndHostTuple.<FILL IN>
groupedByDay = (dayAndHostTuple
.map(lambda k_v_cnt: (k_v_cnt[0][0], (1, k_v_cnt[1])))
.reduceByKey(lambda v1, v2: (v1[0] + v2[0], v1[1] + v2[1]))
)
# sortedByDay = groupedByDay.<FILL IN>
sortedByDay = (groupedByDay
.sortByKey()
)
avgDailyReqPerHost = (sortedByDay
.map(lambda k_v4: (k_v4[0], k_v4[1][1] / k_v4[1][0]))
.cache()
)
avgDailyReqPerHostList = avgDailyReqPerHost.take(30)
print 'Average number of daily requests per Hosts is %s' % avgDailyReqPerHostList
# In[44]:
# TEST Average number of daily requests per hosts (3e)
Test.assertEquals(avgDailyReqPerHostList, [(1, 13), (3, 12), (4, 14), (5, 12), (6, 12), (7, 13), (8, 13), (9, 14), (10, 13), (11, 14), (12, 13), (
13, 13), (14, 13), (15, 13), (16, 13), (17, 13), (18, 13), (19, 12), (20, 12), (21, 13), (22, 12)], 'incorrect avgDailyReqPerHostList')
Test.assertTrue(avgDailyReqPerHost.is_cached,
'incorrect avgDailyReqPerHost.is_cache')
# #### **(3f) Exercise: Visualizing the Average Daily Requests per Unique Host**
# ####Using the result `avgDailyReqPerHost` from the previous exercise, use `matplotlib` to plot a "Line" graph of the average daily requests per unique host by day.
# #### `daysWithAvg` should be a list of days and `avgs` should be a list of average daily requests per unique hosts for each corresponding day.
# In[45]:
# TODO: Replace <FILL IN> with appropriate code
# daysWithAvg = avgDailyReqPerHost.<FILL IN>
daysWithAvg = avgDailyReqPerHost.map(lambda k_v13: k_v13[0]).take(30)
# avgs = avgDailyReqPerHost.<FILL IN>
avgs = avgDailyReqPerHost.map(lambda k_v14: k_v14[1]).take(30)
# In[46]:
# TEST Average Daily Requests per Unique Host (3f)
Test.assertEquals(daysWithAvg,
[1,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22],
'incorrect days')
Test.assertEquals(avgs,
[13,
12,
14,
12,
12,
13,
13,
14,
13,
14,
13,
13,
13,
13,
13,
13,
13,
12,
12,
13,
12],
'incorrect avgs')
# In[47]:
fig = plt.figure(figsize=(8, 4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithAvg), 0, max(avgs) + 2])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Average')
plt.plot(daysWithAvg, avgs)
pass
# ### **Part 4: Exploring 404 Response Codes**
#
# ####Let's drill down and explore the error 404 response code records. 404 errors are returned when an endpoint is not found by the server (i.e., a missing page or object).
# #### **(4a) Exercise: Counting 404 Response Codes**
# #### Create a RDD containing only log records with a 404 response code. Make sure you `cache()` the RDD `badRecords` as we will use it in the rest of this exercise.
#
# #### How many 404 records are in the log?
# In[48]:
# TODO: Replace <FILL IN> with appropriate code
# badRecords = (access_logs
# <FILL IN>)
badRecords = (access_logs
.filter(lambda log: log.response_code == 404)
.cache()
)
print 'Found %d 404 URLs' % badRecords.count()
# In[49]:
# TEST Counting 404 (4a)
Test.assertEquals(badRecords.count(), 6185, 'incorrect badRecords.count()')
Test.assertTrue(badRecords.is_cached, 'incorrect badRecords.is_cached')
# #### **(4b) Exercise: Listing 404 Response Code Records**
# ####Using the RDD containing only log records with a 404 response code that you cached in part (4a), print out a list up to 40 **distinct** endpoints that generate 404 errors - *no endpoint should appear more than once in your list.*
# In[56]:
# TODO: Replace <FILL IN> with appropriate code
# badEndpoints = badRecords.<FILL IN>
badEndpoints = badRecords.map(lambda log: (log.endpoint, 1))
# badUniqueEndpoints = badEndpoints.<FILL IN>
badUniqueEndpoints = (badEndpoints
.reduceByKey(lambda v1, v2: 1)
.map(lambda k_v5: k_v5[0])
)
badUniqueEndpointsPick40 = badUniqueEndpoints.take(40)
print '404 URLS: %s' % badUniqueEndpointsPick40
# In[57]:
# TEST Listing 404 records (4b)
badUniqueEndpointsSet40 = set(badUniqueEndpointsPick40)
Test.assertEquals(len(badUniqueEndpointsSet40), 40,
'badUniqueEndpointsPick40 not distinct')
# #### **(4c) Exercise: Listing the Top Twenty 404 Response Code Endpoints**
# ####Using the RDD containing only log records with a 404 response code that you cached in part (4a), print out a list of the top twenty endpoints that generate the most 404 errors.
# ####*Remember, top endpoints should be in sorted order*
# In[58]:
# TODO: Replace <FILL IN> with appropriate code
# badEndpointsCountPairTuple = badRecords.<FILL IN>
badEndpointsCountPairTuple = badRecords.map(lambda log: (log.endpoint, 1))
# badEndpointsSum = badEndpointsCountPairTuple.<FILL IN>
badEndpointsSum = (badEndpointsCountPairTuple
.reduceByKey(lambda v1, v2: v1 + v2)
)
badEndpointsTop20 = badEndpointsSum.takeOrdered(20, lambda k_v6: -1 * k_v6[1])
print 'Top Twenty 404 URLs: %s' % badEndpointsTop20
# In[59]:
# TEST Top twenty 404 URLs (4c)
Test.assertEquals(badEndpointsTop20,
[(u'/pub/winvn/readme.txt',
633),
(u'/pub/winvn/release.txt',
494),
(u'/shuttle/missions/STS-69/mission-STS-69.html',
431),
(u'/images/nasa-logo.gif',
319),
(u'/elv/DELTA/uncons.htm',
178),
(u'/shuttle/missions/sts-68/ksc-upclose.gif',
156),
(u'/history/apollo/sa-1/sa-1-patch-small.gif',
146),
(u'/images/crawlerway-logo.gif',
120),
(u'/://spacelink.msfc.nasa.gov',
117),
(u'/history/apollo/pad-abort-test-1/pad-abort-test-1-patch-small.gif',
100),
(u'/history/apollo/a-001/a-001-patch-small.gif',
97),
(u'/images/Nasa-logo.gif',
85),
(u'/shuttle/resources/orbiters/atlantis.gif',
64),
(u'/history/apollo/images/little-joe.jpg',
62),
(u'/images/lf-logo.gif',
59),
(u'/shuttle/resources/orbiters/discovery.gif',
56),
(u'/shuttle/resources/orbiters/challenger.gif',
54),
(u'/robots.txt',
53),
(u'/elv/new01.gif>',
43),
(u'/history/apollo/pad-abort-test-2/pad-abort-test-2-patch-small.gif',
38)],
'incorrect badEndpointsTop20')
# #### **(4d) Exercise: Listing the Top Twenty-five 404 Response Code Hosts**
# ####Instead of looking at the endpoints that generated 404 errors, let's look at the hosts that encountered 404 errors. Using the RDD containing only log records with a 404 response code that you cached in part (4a), print out a list of the top twenty-five hosts that generate the most 404 errors.
# In[60]:
# TODO: Replace <FILL IN> with appropriate code
# errHostsCountPairTuple = badRecords.<FILL IN>
errHostsCountPairTuple = badRecords.map(lambda log: (log.host, 1))
# errHostsSum = errHostsCountPairTuple.<FILL IN>
errHostsSum = errHostsCountPairTuple.reduceByKey(lambda v1, v2: v1 + v2)
errHostsTop25 = errHostsSum.takeOrdered(25, lambda k_v7: -1 * k_v7[1])
print 'Top 25 hosts that generated errors: %s' % errHostsTop25
# In[61]:
# TEST Top twenty-five 404 response code hosts (4d)
Test.assertEquals(len(errHostsTop25), 25, 'length of errHostsTop25 is not 25')
Test.assertEquals(len(set(errHostsTop25) - set([(u'maz3.maz.net',
39),
(u'piweba3y.prodigy.com',
39),
(u'gate.barr.com',
38),
(u'm38-370-9.mit.edu',
37),
(u'ts8-1.westwood.ts.ucla.edu',
37),
(u'nexus.mlckew.edu.au',
37),
(u'204.62.245.32',
33),
(u'163.206.104.34',
27),
(u'spica.sci.isas.ac.jp',
27),
(u'www-d4.proxy.aol.com',
26),
(u'www-c4.proxy.aol.com',
25),
(u'203.13.168.24',
25),
(u'203.13.168.17',
25),
(u'internet-gw.watson.ibm.com',
24),
(u'scooter.pa-x.dec.com',
23),
(u'crl5.crl.com',
23),
(u'piweba5y.prodigy.com',
23),
(u'onramp2-9.onr.com',
22),
(u'slip145-189.ut.nl.ibm.net',
22),
(u'198.40.25.102.sap2.artic.edu',
21),
(u'gn2.getnet.com',
20),
(u'msp1-16.nas.mr.net',
20),
(u'isou24.vilspa.esa.es',
19),
(u'dial055.mbnet.mb.ca',
19),
(u'tigger.nashscene.com',
19)])),
0,
'incorrect errHostsTop25')
# #### **(4e) Exercise: Listing 404 Response Codes per Day**
# ####Let's explore the 404 records temporally. Break down the 404 requests by day (`cache()` the RDD `errDateSorted`) and get the daily counts sorted by day as a list.
# ####*Since the log only covers a single month, you can ignore the month in your checks.*
# In[62]:
# TODO: Replace <FILL IN> with appropriate code
# errDateCountPairTuple = badRecords.<FILL IN>
errDateCountPairTuple = badRecords.map(lambda log: (log.date_time.day, 1))
# errDateSum = errDateCountPairTuple.<FILL IN>
errDateSum = errDateCountPairTuple.reduceByKey(lambda v1, v2: v1 + v2)
# errDateSorted = (errDateSum.<FILL IN>)
errDateSorted = (errDateSum
.sortByKey()
.cache())
# errByDate = errDateSorted.<FILL IN>
errByDate = errDateSorted.take(30)
print '404 Errors by day: %s' % errByDate
# In[63]:
# TEST 404 response codes per day (4e)
Test.assertEquals(errByDate, [(1, 243), (3, 303), (4, 346), (5, 234), (6, 372), (7, 532), (8, 381), (9, 279), (10, 314), (11, 263), (12, 195), (
13, 216), (14, 287), (15, 326), (16, 258), (17, 269), (18, 255), (19, 207), (20, 312), (21, 305), (22, 288)], 'incorrect errByDate')
Test.assertTrue(errDateSorted.is_cached, 'incorrect errDateSorted.is_cached')
# #### **(4f) Exercise: Visualizing the 404 Response Codes by Day**
# ####Using the results from the previous exercise, use `matplotlib` to plot a "Line" or "Bar" graph of the 404 response codes by day.
# In[64]:
# TODO: Replace <FILL IN> with appropriate code
# daysWithErrors404 = errDateSorted.<FILL IN>
daysWithErrors404 = errDateSorted.map(lambda k_v15: k_v15[0]).take(30)
# errors404ByDay = errDateSorted.<FILL IN>
errors404ByDay = errDateSorted.map(lambda k_v16: k_v16[1]).take(30)
# In[65]:
# TEST Visualizing the 404 Response Codes by Day (4f)
Test.assertEquals(daysWithErrors404,
[1,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22],
'incorrect daysWithErrors404')
Test.assertEquals(errors404ByDay,
[243,
303,
346,
234,
372,
532,
381,
279,
314,
263,
195,
216,
287,
326,
258,
269,
255,
207,
312,
305,
288],
'incorrect errors404ByDay')
# In[66]:
fig = plt.figure(figsize=(8, 4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithErrors404), 0, max(errors404ByDay)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.plot(daysWithErrors404, errors404ByDay)
pass
# #### **(4g) Exercise: Top Five Days for 404 Response Codes **
# ####Using the RDD `errDateSorted` you cached in the part (4e), what are the top five days for 404 response codes and the corresponding counts of 404 response codes?
# In[67]:
# TODO: Replace <FILL IN> with appropriate code
# topErrDate = errDateSorted.<FILL IN>
topErrDate = errDateSorted.takeOrdered(5, lambda k_v8: -1 * k_v8[1])
print 'Top Five dates for 404 requests: %s' % topErrDate
# In[68]:
# TEST Five dates for 404 requests (4g)
Test.assertEquals(
topErrDate, [
(7, 532), (8, 381), (6, 372), (4, 346), (15, 326)], 'incorrect topErrDate')
# #### **(4h) Exercise: Hourly 404 Response Codes**
# ####Using the RDD `badRecords` you cached in the part (4a) and by hour of the day and in decreasing order, create an RDD containing how many requests had a 404 return code for each hour of the day. Cache the resulting RDD hourRecordsSorted and print that as a list.
# In[71]:
# TODO: Replace <FILL IN> with appropriate code
# hourCountPairTuple = badRecords.<FILL IN>
hourCountPairTuple = badRecords.map(lambda log: (log.date_time.hour, 1))
# hourRecordsSum = hourCountPairTuple.<FILL IN>
hourRecordsSum = hourCountPairTuple.reduceByKey(lambda v1, v2: v1 + v2)
# hourRecordsSorted = (hourRecordsSum
# <FILL IN>)
hourRecordsSorted = (hourRecordsSum
.sortByKey()
.cache())
# errHourList = hourRecordsSorted.<FILL IN>
errHourList = hourRecordsSorted.take(24)
print 'Top hours for 404 requests: %s' % errHourList
# In[72]:
# TEST Hourly 404 response codes (4h)
Test.assertEquals(errHourList, [(0, 175), (1, 171), (2, 422), (3, 272), (4, 102), (5, 95), (6, 93), (7, 122), (8, 199), (9, 185), (10, 329), (11, 263), (
12, 438), (13, 397), (14, 318), (15, 347), (16, 373), (17, 330), (18, 268), (19, 269), (20, 270), (21, 241), (22, 234), (23, 272)], 'incorrect errHourList')
Test.assertTrue(hourRecordsSorted.is_cached,
'incorrect hourRecordsSorted.is_cached')
# #### **(4i) Exercise: Visualizing the 404 Response Codes by Hour**
# ####Using the results from the previous exercise, use `matplotlib` to plot a "Line" or "Bar" graph of the 404 response codes by hour.
# In[73]:
# TODO: Replace <FILL IN> with appropriate code
# hoursWithErrors404 = hourRecordsSorted.<FILL IN>
hoursWithErrors404 = hourRecordsSorted.map(lambda k_v17: k_v17[0]).take(24)
# errors404ByHours = hourRecordsSorted.<FILL IN>
errors404ByHours = hourRecordsSorted.map(lambda k_v18: k_v18[1]).take(24)
# In[74]:
# TEST Visualizing the 404 Response Codes by Hour (4i)
Test.assertEquals(hoursWithErrors404,
[0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23],
'incorrect hoursWithErrors404')
Test.assertEquals(errors404ByHours,
[175,
171,
422,
272,
102,
95,
93,
122,
199,
185,
329,
263,
438,
397,
318,
347,
373,
330,
268,
269,
270,
241,
234,
272],
'incorrect errors404ByHours')
# In[75]:
fig = plt.figure(figsize=(8, 4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(hoursWithErrors404), 0, max(errors404ByHours)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Hour')
plt.ylabel('404 Errors')
plt.plot(hoursWithErrors404, errors404ByHours)
pass
# In[ ]:
|
mit
|
cbertinato/pandas
|
pandas/tests/groupby/test_value_counts.py
|
3
|
2351
|
"""
these are systematically testing all of the args to value_counts
with different size combinations. This is to ensure stability of the sorting
and proper parameter handling
"""
from itertools import product
import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Series, date_range
from pandas.util import testing as tm
# our starting frame
def seed_df(seed_nans, n, m):
np.random.seed(1234)
days = date_range('2015-08-24', periods=10)
frame = DataFrame({
'1st': np.random.choice(
list('abcd'), n),
'2nd': np.random.choice(days, n),
'3rd': np.random.randint(1, m + 1, n)
})
if seed_nans:
frame.loc[1::11, '1st'] = np.nan
frame.loc[3::17, '2nd'] = np.nan
frame.loc[7::19, '3rd'] = np.nan
frame.loc[8::19, '3rd'] = np.nan
frame.loc[9::19, '3rd'] = np.nan
return frame
# create input df, keys, and the bins
binned = []
ids = []
for seed_nans in [True, False]:
for n, m in product((100, 1000), (5, 20)):
df = seed_df(seed_nans, n, m)
bins = None, np.arange(0, max(5, df['3rd'].max()) + 1, 2)
keys = '1st', '2nd', ['1st', '2nd']
for k, b in product(keys, bins):
binned.append((df, k, b, n, m))
ids.append("{}-{}-{}".format(k, n, m))
@pytest.mark.slow
@pytest.mark.parametrize("df, keys, bins, n, m", binned, ids=ids)
def test_series_groupby_value_counts(df, keys, bins, n, m):
def rebuild_index(df):
arr = list(map(df.index.get_level_values, range(df.index.nlevels)))
df.index = MultiIndex.from_arrays(arr, names=df.index.names)
return df
for isort, normalize, sort, ascending, dropna \
in product((False, True), repeat=5):
kwargs = dict(normalize=normalize, sort=sort,
ascending=ascending, dropna=dropna, bins=bins)
gr = df.groupby(keys, sort=isort)
left = gr['3rd'].value_counts(**kwargs)
gr = df.groupby(keys, sort=isort)
right = gr['3rd'].apply(Series.value_counts, **kwargs)
right.index.names = right.index.names[:-1] + ['3rd']
# have to sort on index because of unstable sort on values
left, right = map(rebuild_index, (left, right)) # xref GH9212
tm.assert_series_equal(left.sort_index(), right.sort_index())
|
bsd-3-clause
|
bigdataelephants/scikit-learn
|
examples/applications/plot_out_of_core_classification.py
|
255
|
13919
|
"""
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
|
bsd-3-clause
|
he7d3r/revscoring
|
revscoring/scoring/models/model.py
|
2
|
9160
|
"""
All scoring models are an implementation of :class:`revscoring.Model`.
.. autoclass:: revscoring.scoring.models.Learned
:members:
.. autoclass:: revscoring.scoring.models.Classifier
:members:
"""
import bz2
import logging
import pickle
from multiprocessing import Pool, cpu_count
import yamlconf
from sklearn.model_selection import KFold
from sklearn.preprocessing import RobustScaler
from ..environment import Environment
from ..model_info import ModelInfo
logger = logging.getLogger(__name__)
class Model:
SCORE_SCHEMA = NotImplemented
def __init__(self, features, version=None, environment=None,
statistics=None, additional_info=None):
"""
A model used to score things
:Parameters:
features : `list`(`Feature`)
A list of `Feature`s that the model expects to be provided.
version : `str`
A string describing the version of the model.
"""
logger.debug("Initializing Model with {0}".format(features))
self.features = tuple(features)
self.params = {}
self.version = version
self.info = ModelInfo()
"""
A :class:`revscoring.scoring.ModelInfo` instance that implements
:func:`~revscoring.scoring.ModelInfo.lookup` and
:func:`~revscoring.scoring.ModelInfo.format` -- both of which
act as an index into information about a model.
"""
self.info['type'] = self.__class__.__name__
self.info['version'] = version
self.info['params'] = self.params
for key, value in (additional_info or {}):
self.info[key] = value
self.info['environment'] = environment or Environment()
if statistics is not None:
self.info['statistics'] = statistics
def score(self, feature_values):
"""
Make a prediction or otherwise use the model to generate a score.
:Parameters:
feature_values : collection(`mixed`)
an ordered collection of values that correspond to the
`Feature` s provided to the constructor
:Returns:
A `dict` of statistics
"""
raise NotImplementedError()
def test(self, values_labels):
"""
Tests the model against a labeled data.
:Parameters:
values_labels : `iterable` (( `<feature_values>`, `<label>` ))
an iterable of labeled data Where <values_labels> is an ordered
collection of predictive values that correspond to the
`Feature` s provided to the constructor
:Returns:
A dictionary of test results.
"""
# Score all of the observations
score_labels = [(self.score(values), label)
for values, label in values_labels]
# Fit builtin statistics engine
self.info['statistics'].fit(score_labels)
return self.info['statistics']
@classmethod
def load(cls, f, error_on_env_check=False):
"""
Reads serialized model information from a file.
"""
if hasattr(f, 'buffer'):
model = pickle.load(f.buffer)
else:
model = pickle.load(f)
model.info['environment'].check(raise_exception=error_on_env_check)
return model
def dump(self, f):
"""
Writes serialized model information to a file.
"""
if hasattr(f, 'buffer'):
return pickle.dump(self, f.buffer)
else:
return pickle.dump(self, f)
@classmethod
def from_config(cls, config, name, section_key='scorer_models'):
section = config[section_key][name]
if 'module' in section:
return yamlconf.import_module(section['module'])
elif 'class' in section:
class_path = section['class']
Class = yamlconf.import_module(class_path)
if 'model_file' in section:
# TODO: Cache the model file for reuse across workers?
with open_file(section['model_file']) as stream:
return Class.load(stream)
else:
return Class(**{k: v for k, v in section.items()
if k != "class"})
def open_file(path):
if path[-4:] == ".bz2" or path[-6:] == ".bzip2":
return bz2.open(path, 'rb')
else:
return open(path, 'rb')
class Learned(Model):
def __init__(self, *args, scale=False, center=False, **kwargs):
"""
A machine learned model. Beyond :class:`revscoring.Model`, this
"Learned" models implement
:func:`~revscoring.scoring.models.Learned.fit` and
:func:`~revscoring.scoring.models.Learned.cross_validate`.
"""
super().__init__(*args, **kwargs)
self.trained = None
if scale or center:
self.scaler = RobustScaler(with_centering=center,
with_scaling=scale)
else:
self.scaler = None
self.params.update({
'scale': scale,
'center': center
})
def train(self, values_labels):
"""
Fits the model using labeled data by learning its shape.
:Parameters:
values_labels : [( `<feature_values>`, `<label>` )]
an iterable of labeled data Where <values_labels> is an ordered
collection of predictive values that correspond to the
:class:`revscoring.Feature` s provided to the constructor
"""
raise NotImplementedError()
def fit_scaler_and_transform(self, fv_vectors):
"""
Fits the internal scale to labeled data.
:Parameters:
fv_vectors : `iterable` (( `<feature_values>`, `<label>` ))
an iterable of labeled data Where <values_labels> is an ordered
collection of predictive values that correspond to the
`Feature` s provided to the constructor
:Returns:
A dictionary of model statistics.
"""
if self.scaler is not None:
return self.scaler.fit_transform(fv_vectors)
else:
return fv_vectors
def apply_scaling(self, fv_vector):
if self.scaler is not None:
if not hasattr(self.scaler, "center_") and \
not hasattr(self.scaler, "scale_"):
raise RuntimeError("Cannot scale a vector before " +
"training the scaler")
fv_vector = self.scaler.transform([fv_vector])[0]
return fv_vector
def _clean_copy(self):
raise NotImplementedError()
def cross_validate(self, values_labels, folds=10, processes=1):
"""
Trains and tests the model agaists folds of labeled data.
:Parameters:
values_labels : [( `<feature_values>`, `<label>` )]
an iterable of labeled data Where <values_labels> is an ordered
collection of predictive values that correspond to the
`Feature` s provided to the constructor
folds : `int`
When set to 1, cross-validation will run in the parent thread.
When set to 2 or greater, a :class:`multiprocessing.pool.Pool`
will be created.
"""
folds_i = KFold(n_splits=folds, shuffle=True,
random_state=0)
if processes == 1:
mapper = map
else:
pool = Pool(processes=processes or cpu_count())
mapper = pool.map
results = mapper(self._cross_score,
((i, [values_labels[i] for i in train_i],
[values_labels[i] for i in test_i])
for i, (train_i, test_i) in enumerate(
folds_i.split(values_labels))))
agg_score_labels = []
for score_labels in results:
agg_score_labels.extend(score_labels)
self.info['statistics'].fit(agg_score_labels)
return self.info['statistics']
def _cross_score(self, i_train_test):
i, train_set, test_set = i_train_test
logger.info("Performing cross-validation {0}...".format(i + 1))
model = self._clean_copy()
logger.debug("Training cross-validation for {0}...".format(i + 1))
model.train(train_set)
logger.debug("Scoring cross-validation for {0}...".format(i + 1))
feature_values, labels = map(list, zip(*test_set))
docs = model.score_many(feature_values)
return list(zip(docs, labels))
class Classifier(Learned):
def __init__(self, features, labels, multilabel=False,
population_rates=None, **kwargs):
self.labels = labels
self.multilabel = multilabel
self.population_rates = population_rates
super().__init__(features, **kwargs)
self.params.update({
'labels': labels,
'multilabel': multilabel,
'population_rates': population_rates
})
|
mit
|
altair-viz/altair
|
altair/examples/select_detail.py
|
1
|
1944
|
"""
Selection Detail Example
========================
This example shows a selection that links two views of data: the left panel
contains one point per object, and the right panel contains one line per
object. Clicking on either the points or lines will select the corresponding
objects in both views of the data.
The challenge lies in expressing such hierarchical data in a way that Altair
can handle. We do this by merging the data into a "long form" dataframe, and
aggregating identical metadata for the final plot.
"""
# category: interactive charts
import altair as alt
import pandas as pd
import numpy as np
np.random.seed(0)
n_objects = 20
n_times = 50
# Create one (x, y) pair of metadata per object
locations = pd.DataFrame({
'id': range(n_objects),
'x': np.random.randn(n_objects),
'y': np.random.randn(n_objects)
})
# Create a 50-element time-series for each object
timeseries = pd.DataFrame(np.random.randn(n_times, n_objects).cumsum(0),
columns=locations['id'],
index=pd.RangeIndex(0, n_times, name='time'))
# Melt the wide-form timeseries into a long-form view
timeseries = timeseries.reset_index().melt('time')
# Merge the (x, y) metadata into the long-form view
timeseries['id'] = timeseries['id'].astype(int) # make merge not complain
data = pd.merge(timeseries, locations, on='id')
# Data is prepared, now make a chart
selector = alt.selection_single(empty='all', fields=['id'])
base = alt.Chart(data).properties(
width=250,
height=250
).add_selection(selector)
points = base.mark_point(filled=True, size=200).encode(
x='mean(x)',
y='mean(y)',
color=alt.condition(selector, 'id:O', alt.value('lightgray'), legend=None),
)
timeseries = base.mark_line().encode(
x='time',
y=alt.Y('value', scale=alt.Scale(domain=(-15, 15))),
color=alt.Color('id:O', legend=None)
).transform_filter(
selector
)
points | timeseries
|
bsd-3-clause
|
rishikksh20/scikit-learn
|
sklearn/decomposition/tests/test_dict_learning.py
|
46
|
9267
|
import numpy as np
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_array
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0,
n_jobs=-1)
with ignore_warnings(category=ConvergenceWarning):
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only,
decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
Xf = check_array(X, order='F')
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
|
bsd-3-clause
|
florian-f/sklearn
|
sklearn/externals/joblib/__init__.py
|
4
|
4467
|
""" Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs. In addition, Joblib
can also be used to provide a light-weight make replacement or caching
solution.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to aleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitely links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.7.0d'
from .memory import Memory
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
|
bsd-3-clause
|
voxlol/scikit-learn
|
examples/linear_model/plot_theilsen.py
|
232
|
3615
|
"""
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
|
bsd-3-clause
|
oew1v07/scikit-image
|
doc/examples/plot_watershed.py
|
7
|
2336
|
"""
======================
Watershed segmentation
======================
The watershed is a classical algorithm used for **segmentation**, that
is, for separating different objects in an image.
Starting from user-defined markers, the watershed algorithm treats
pixels values as a local topography (elevation). The algorithm floods
basins from the markers, until basins attributed to different markers
meet on watershed lines. In many cases, markers are chosen as local
minima of the image, from which basins are flooded.
In the example below, two overlapping circles are to be separated. To
do so, one computes an image that is the distance to the
background. The maxima of this distance (i.e., the minima of the
opposite of the distance) are chosen as markers, and the flooding of
basins from such markers separates the two circles along a watershed
line.
See Wikipedia_ for more details on the algorithm.
.. _Wikipedia: http://en.wikipedia.org/wiki/Watershed_(image_processing)
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage.morphology import watershed
from skimage.feature import peak_local_max
# Generate an initial image with two overlapping circles
x, y = np.indices((80, 80))
x1, y1, x2, y2 = 28, 28, 44, 52
r1, r2 = 16, 20
mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
image = np.logical_or(mask_circle1, mask_circle2)
# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
distance = ndi.distance_transform_edt(image)
local_maxi = peak_local_max(distance, indices=False, footprint=np.ones((3, 3)),
labels=image)
markers = ndi.label(local_maxi)[0]
labels = watershed(-distance, markers, mask=image)
fig, axes = plt.subplots(ncols=3, figsize=(8, 2.7))
ax0, ax1, ax2 = axes
ax0.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax0.set_title('Overlapping objects')
ax1.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
ax1.set_title('Distances')
ax2.imshow(labels, cmap=plt.cm.spectral, interpolation='nearest')
ax2.set_title('Separated objects')
for ax in axes:
ax.axis('off')
fig.subplots_adjust(hspace=0.01, wspace=0.01, top=0.9, bottom=0, left=0,
right=1)
plt.show()
|
bsd-3-clause
|
Nelca/buildMLSystem
|
ch11/demo_corr.py
|
3
|
2395
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from matplotlib import pylab
import numpy as np
import scipy
from scipy.stats import norm, pearsonr
DATA_DIR = os.path.join("..", "data")
CHART_DIR = os.path.join("..", "charts")
def _plot_correlation_func(x, y):
r, p = pearsonr(x, y)
title = "Cor($X_1$, $X_2$) = %.3f" % r
pylab.scatter(x, y)
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
f1 = scipy.poly1d(scipy.polyfit(x, y, 1))
pylab.plot(x, f1(x), "r--", linewidth=2)
# pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in
# [0,1,2,3,4]])
def plot_correlation_demo():
np.random.seed(0) # to reproduce the data later on
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(0, 10, 0.2)
pylab.subplot(221)
y = 0.5 * x + norm.rvs(1, loc=0, scale=.01, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(222)
y = 0.5 * x + norm.rvs(1, loc=0, scale=.1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(223)
y = 0.5 * x + norm.rvs(1, loc=0, scale=1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(224)
y = norm.rvs(1, loc=0, scale=10, size=len(x))
_plot_correlation_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "corr_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(-5, 5, 0.2)
pylab.subplot(221)
y = 0.5 * x ** 2 + norm.rvs(1, loc=0, scale=.01, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(222)
y = 0.5 * x ** 2 + norm.rvs(1, loc=0, scale=.1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(223)
y = 0.5 * x ** 2 + norm.rvs(1, loc=0, scale=1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(224)
y = 0.5 * x ** 2 + norm.rvs(1, loc=0, scale=10, size=len(x))
_plot_correlation_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "corr_demo_2.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_correlation_demo()
|
mit
|
pianomania/scikit-learn
|
sklearn/covariance/robust_covariance.py
|
105
|
29653
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
bsd-3-clause
|
shoyer/xarray
|
xarray/tests/test_units.py
|
1
|
179545
|
import functools
import operator
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray.core import formatting
from xarray.core.npcompat import IS_NEP18_ACTIVE
from xarray.testing import assert_allclose, assert_identical
from .test_variable import _PAD_XR_NP_ARGS, VariableSubclassobjects
pint = pytest.importorskip("pint")
DimensionalityError = pint.errors.DimensionalityError
# make sure scalars are converted to 0d arrays so quantities can
# always be treated like ndarrays
unit_registry = pint.UnitRegistry(force_ndarray=True)
Quantity = unit_registry.Quantity
pytestmark = [
pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NUMPY_EXPERIMENTAL_ARRAY_FUNCTION is not enabled"
),
# TODO: remove this once pint has a released version with __array_function__
pytest.mark.skipif(
not hasattr(unit_registry.Quantity, "__array_function__"),
reason="pint does not implement __array_function__ yet",
),
# pytest.mark.filterwarnings("ignore:::pint[.*]"),
]
def is_compatible(unit1, unit2):
def dimensionality(obj):
if isinstance(obj, (unit_registry.Quantity, unit_registry.Unit)):
unit_like = obj
else:
unit_like = unit_registry.dimensionless
return unit_like.dimensionality
return dimensionality(unit1) == dimensionality(unit2)
def compatible_mappings(first, second):
return {
key: is_compatible(unit1, unit2)
for key, (unit1, unit2) in merge_mappings(first, second)
}
def array_extract_units(obj):
if isinstance(obj, (xr.Variable, xr.DataArray, xr.Dataset)):
obj = obj.data
try:
return obj.units
except AttributeError:
return None
def array_strip_units(array):
try:
return array.magnitude
except AttributeError:
return array
def array_attach_units(data, unit):
if isinstance(data, Quantity):
raise ValueError(f"cannot attach unit {unit} to quantity {data}")
try:
quantity = data * unit
except np.core._exceptions.UFuncTypeError:
if isinstance(unit, unit_registry.Unit):
raise
quantity = data
return quantity
def extract_units(obj):
if isinstance(obj, xr.Dataset):
vars_units = {
name: array_extract_units(value) for name, value in obj.data_vars.items()
}
coords_units = {
name: array_extract_units(value) for name, value in obj.coords.items()
}
units = {**vars_units, **coords_units}
elif isinstance(obj, xr.DataArray):
vars_units = {obj.name: array_extract_units(obj)}
coords_units = {
name: array_extract_units(value) for name, value in obj.coords.items()
}
units = {**vars_units, **coords_units}
elif isinstance(obj, xr.Variable):
vars_units = {None: array_extract_units(obj.data)}
units = {**vars_units}
elif isinstance(obj, Quantity):
vars_units = {None: array_extract_units(obj)}
units = {**vars_units}
else:
units = {}
return units
def strip_units(obj):
if isinstance(obj, xr.Dataset):
data_vars = {
strip_units(name): strip_units(value)
for name, value in obj.data_vars.items()
}
coords = {
strip_units(name): strip_units(value) for name, value in obj.coords.items()
}
new_obj = xr.Dataset(data_vars=data_vars, coords=coords)
elif isinstance(obj, xr.DataArray):
data = array_strip_units(obj.data)
coords = {
strip_units(name): (
(value.dims, array_strip_units(value.data))
if isinstance(value.data, Quantity)
else value # to preserve multiindexes
)
for name, value in obj.coords.items()
}
new_obj = xr.DataArray(
name=strip_units(obj.name), data=data, coords=coords, dims=obj.dims
)
elif isinstance(obj, xr.Variable):
data = array_strip_units(obj.data)
new_obj = obj.copy(data=data)
elif isinstance(obj, unit_registry.Quantity):
new_obj = obj.magnitude
elif isinstance(obj, (list, tuple)):
return type(obj)(strip_units(elem) for elem in obj)
else:
new_obj = obj
return new_obj
def attach_units(obj, units):
if not isinstance(obj, (xr.DataArray, xr.Dataset, xr.Variable)):
units = units.get("data", None) or units.get(None, None) or 1
return array_attach_units(obj, units)
if isinstance(obj, xr.Dataset):
data_vars = {
name: attach_units(value, units) for name, value in obj.data_vars.items()
}
coords = {
name: attach_units(value, units) for name, value in obj.coords.items()
}
new_obj = xr.Dataset(data_vars=data_vars, coords=coords, attrs=obj.attrs)
elif isinstance(obj, xr.DataArray):
# try the array name, "data" and None, then fall back to dimensionless
data_units = (
units.get(obj.name, None)
or units.get("data", None)
or units.get(None, None)
or 1
)
data = array_attach_units(obj.data, data_units)
coords = {
name: (
(value.dims, array_attach_units(value.data, units.get(name) or 1))
if name in units
# to preserve multiindexes
else value
)
for name, value in obj.coords.items()
}
dims = obj.dims
attrs = obj.attrs
new_obj = xr.DataArray(
name=obj.name, data=data, coords=coords, attrs=attrs, dims=dims
)
else:
data_units = units.get("data", None) or units.get(None, None) or 1
data = array_attach_units(obj.data, data_units)
new_obj = obj.copy(data=data)
return new_obj
def convert_units(obj, to):
# preprocess
to = {
key: None if not isinstance(value, unit_registry.Unit) else value
for key, value in to.items()
}
if isinstance(obj, xr.Dataset):
data_vars = {
name: convert_units(array.variable, {None: to.get(name)})
for name, array in obj.data_vars.items()
}
coords = {
name: convert_units(array.variable, {None: to.get(name)})
for name, array in obj.coords.items()
}
new_obj = xr.Dataset(data_vars=data_vars, coords=coords, attrs=obj.attrs)
elif isinstance(obj, xr.DataArray):
name = obj.name
new_units = (
to.get(name, None) or to.get("data", None) or to.get(None, None) or None
)
data = convert_units(obj.variable, {None: new_units})
coords = {
name: (array.dims, convert_units(array.variable, {None: to.get(name)}))
for name, array in obj.coords.items()
if name != obj.name
}
new_obj = xr.DataArray(
name=name, data=data, coords=coords, attrs=obj.attrs, dims=obj.dims
)
elif isinstance(obj, xr.Variable):
new_data = convert_units(obj.data, to)
new_obj = obj.copy(data=new_data)
elif isinstance(obj, unit_registry.Quantity):
units = to.get(None)
new_obj = obj.to(units) if units is not None else obj
else:
new_obj = obj
return new_obj
def assert_units_equal(a, b):
__tracebackhide__ = True
assert extract_units(a) == extract_units(b)
def assert_equal_with_units(a, b):
# works like xr.testing.assert_equal, but also explicitly checks units
# so, it is more like assert_identical
__tracebackhide__ = True
if isinstance(a, xr.Dataset) or isinstance(b, xr.Dataset):
a_units = extract_units(a)
b_units = extract_units(b)
a_without_units = strip_units(a)
b_without_units = strip_units(b)
assert a_without_units.equals(b_without_units), formatting.diff_dataset_repr(
a, b, "equals"
)
assert a_units == b_units
else:
a = a if not isinstance(a, (xr.DataArray, xr.Variable)) else a.data
b = b if not isinstance(b, (xr.DataArray, xr.Variable)) else b.data
assert type(a) == type(b) or (
isinstance(a, Quantity) and isinstance(b, Quantity)
)
# workaround until pint implements allclose in __array_function__
if isinstance(a, Quantity) or isinstance(b, Quantity):
assert (
hasattr(a, "magnitude") and hasattr(b, "magnitude")
) and np.allclose(a.magnitude, b.magnitude, equal_nan=True)
assert (hasattr(a, "units") and hasattr(b, "units")) and a.units == b.units
else:
assert np.allclose(a, b, equal_nan=True)
@pytest.fixture(params=[float, int])
def dtype(request):
return request.param
def merge_mappings(*mappings):
for key in set(mappings[0]).intersection(*mappings[1:]):
yield key, tuple(m[key] for m in mappings)
def merge_args(default_args, new_args):
from itertools import zip_longest
fill_value = object()
return [
second if second is not fill_value else first
for first, second in zip_longest(default_args, new_args, fillvalue=fill_value)
]
class method:
""" wrapper class to help with passing methods via parametrize
This is works a bit similar to using `partial(Class.method, arg, kwarg)`
"""
def __init__(self, name, *args, **kwargs):
self.name = name
self.args = args
self.kwargs = kwargs
def __call__(self, obj, *args, **kwargs):
from collections.abc import Callable
from functools import partial
all_args = merge_args(self.args, args)
all_kwargs = {**self.kwargs, **kwargs}
func = getattr(obj, self.name, None)
if func is None or not isinstance(func, Callable):
# fall back to module level numpy functions if not a xarray object
if not isinstance(obj, (xr.Variable, xr.DataArray, xr.Dataset)):
numpy_func = getattr(np, self.name)
func = partial(numpy_func, obj)
# remove typical xarray args like "dim"
exclude_kwargs = ("dim", "dims")
all_kwargs = {
key: value
for key, value in all_kwargs.items()
if key not in exclude_kwargs
}
else:
raise AttributeError(f"{obj} has no method named '{self.name}'")
return func(*all_args, **all_kwargs)
def __repr__(self):
return f"method_{self.name}"
class function:
""" wrapper class for numpy functions
Same as method, but the name is used for referencing numpy functions
"""
def __init__(self, name_or_function, *args, function_label=None, **kwargs):
if callable(name_or_function):
self.name = (
function_label
if function_label is not None
else name_or_function.__name__
)
self.func = name_or_function
else:
self.name = name_or_function if function_label is None else function_label
self.func = getattr(np, name_or_function)
if self.func is None:
raise AttributeError(
f"module 'numpy' has no attribute named '{self.name}'"
)
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
all_args = merge_args(self.args, args)
all_kwargs = {**self.kwargs, **kwargs}
return self.func(*all_args, **all_kwargs)
def __repr__(self):
return f"function_{self.name}"
def test_apply_ufunc_dataarray(dtype):
func = functools.partial(
xr.apply_ufunc, np.mean, input_core_dims=[["x"]], kwargs={"axis": -1}
)
array = np.linspace(0, 10, 20).astype(dtype) * unit_registry.m
x = np.arange(20) * unit_registry.s
data_array = xr.DataArray(data=array, dims="x", coords={"x": x})
expected = attach_units(func(strip_units(data_array)), extract_units(data_array))
actual = func(data_array)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
def test_apply_ufunc_dataset(dtype):
func = functools.partial(
xr.apply_ufunc, np.mean, input_core_dims=[["x"]], kwargs={"axis": -1}
)
array1 = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * unit_registry.m
array2 = np.linspace(0, 10, 5).astype(dtype) * unit_registry.m
x = np.arange(5) * unit_registry.s
y = np.arange(10) * unit_registry.m
ds = xr.Dataset(
data_vars={"a": (("x", "y"), array1), "b": ("x", array2)},
coords={"x": x, "y": y},
)
expected = attach_units(func(strip_units(ds)), extract_units(ds))
actual = func(ds)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.mm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
ids=repr,
)
@pytest.mark.parametrize(
"variant",
(
"data",
pytest.param("dims", marks=pytest.mark.xfail(reason="indexes strip units")),
"coords",
),
)
@pytest.mark.parametrize("fill_value", (10, np.nan))
def test_align_dataarray(fill_value, variant, unit, error, dtype):
original_unit = unit_registry.m
variants = {
"data": (unit, original_unit, original_unit),
"dims": (original_unit, unit, original_unit),
"coords": (original_unit, original_unit, unit),
}
data_unit, dim_unit, coord_unit = variants.get(variant)
array1 = np.linspace(0, 10, 2 * 5).reshape(2, 5).astype(dtype) * original_unit
array2 = np.linspace(0, 8, 2 * 5).reshape(2, 5).astype(dtype) * data_unit
x = np.arange(2) * original_unit
y1 = np.arange(5) * original_unit
y2 = np.arange(2, 7) * dim_unit
y_a1 = np.array([3, 5, 7, 8, 9]) * original_unit
y_a2 = np.array([7, 8, 9, 11, 13]) * coord_unit
coords1 = {"x": x, "y": y1}
coords2 = {"x": x, "y": y2}
if variant == "coords":
coords1["y_a"] = ("y", y_a1)
coords2["y_a"] = ("y", y_a2)
data_array1 = xr.DataArray(data=array1, coords=coords1, dims=("x", "y"))
data_array2 = xr.DataArray(data=array2, coords=coords2, dims=("x", "y"))
fill_value = fill_value * data_unit
func = function(xr.align, join="outer", fill_value=fill_value)
if error is not None and not (
np.isnan(fill_value) and not isinstance(fill_value, Quantity)
):
with pytest.raises(error):
func(data_array1, data_array2)
return
stripped_kwargs = {
key: strip_units(
convert_units(value, {None: original_unit if data_unit != 1 else None})
)
for key, value in func.kwargs.items()
}
units_a = extract_units(data_array1)
units_b = extract_units(data_array2)
expected_a, expected_b = func(
strip_units(data_array1),
strip_units(convert_units(data_array2, units_a)),
**stripped_kwargs,
)
expected_a = attach_units(expected_a, units_a)
if isinstance(array2, Quantity):
expected_b = convert_units(attach_units(expected_b, units_a), units_b)
else:
expected_b = attach_units(expected_b, units_b)
actual_a, actual_b = func(data_array1, data_array2)
assert_units_equal(expected_a, actual_a)
assert_allclose(expected_a, actual_a)
assert_units_equal(expected_b, actual_b)
assert_allclose(expected_b, actual_b)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.mm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
ids=repr,
)
@pytest.mark.parametrize(
"variant",
(
"data",
pytest.param("dims", marks=pytest.mark.xfail(reason="indexes strip units")),
"coords",
),
)
@pytest.mark.parametrize("fill_value", (np.float64(10), np.float64(np.nan)))
def test_align_dataset(fill_value, unit, variant, error, dtype):
original_unit = unit_registry.m
variants = {
"data": (unit, original_unit, original_unit),
"dims": (original_unit, unit, original_unit),
"coords": (original_unit, original_unit, unit),
}
data_unit, dim_unit, coord_unit = variants.get(variant)
array1 = np.linspace(0, 10, 2 * 5).reshape(2, 5).astype(dtype) * original_unit
array2 = np.linspace(0, 10, 2 * 5).reshape(2, 5).astype(dtype) * data_unit
x = np.arange(2) * original_unit
y1 = np.arange(5) * original_unit
y2 = np.arange(2, 7) * dim_unit
y_a1 = np.array([3, 5, 7, 8, 9]) * original_unit
y_a2 = np.array([7, 8, 9, 11, 13]) * coord_unit
coords1 = {"x": x, "y": y1}
coords2 = {"x": x, "y": y2}
if variant == "coords":
coords1["y_a"] = ("y", y_a1)
coords2["y_a"] = ("y", y_a2)
ds1 = xr.Dataset(data_vars={"a": (("x", "y"), array1)}, coords=coords1)
ds2 = xr.Dataset(data_vars={"a": (("x", "y"), array2)}, coords=coords2)
fill_value = fill_value * data_unit
func = function(xr.align, join="outer", fill_value=fill_value)
if error is not None and not (
np.isnan(fill_value) and not isinstance(fill_value, Quantity)
):
with pytest.raises(error):
func(ds1, ds2)
return
stripped_kwargs = {
key: strip_units(
convert_units(value, {None: original_unit if data_unit != 1 else None})
)
for key, value in func.kwargs.items()
}
units_a = extract_units(ds1)
units_b = extract_units(ds2)
expected_a, expected_b = func(
strip_units(ds1), strip_units(convert_units(ds2, units_a)), **stripped_kwargs
)
expected_a = attach_units(expected_a, units_a)
if isinstance(array2, Quantity):
expected_b = convert_units(attach_units(expected_b, units_a), units_b)
else:
expected_b = attach_units(expected_b, units_b)
actual_a, actual_b = func(ds1, ds2)
assert_units_equal(expected_a, actual_a)
assert_allclose(expected_a, actual_a)
assert_units_equal(expected_b, actual_b)
assert_allclose(expected_b, actual_b)
def test_broadcast_dataarray(dtype):
array1 = np.linspace(0, 10, 2) * unit_registry.Pa
array2 = np.linspace(0, 10, 3) * unit_registry.Pa
a = xr.DataArray(data=array1, dims="x")
b = xr.DataArray(data=array2, dims="y")
units_a = extract_units(a)
units_b = extract_units(b)
expected_a, expected_b = xr.broadcast(strip_units(a), strip_units(b))
expected_a = attach_units(expected_a, units_a)
expected_b = convert_units(attach_units(expected_b, units_a), units_b)
actual_a, actual_b = xr.broadcast(a, b)
assert_units_equal(expected_a, actual_a)
assert_identical(expected_a, actual_a)
assert_units_equal(expected_b, actual_b)
assert_identical(expected_b, actual_b)
def test_broadcast_dataset(dtype):
array1 = np.linspace(0, 10, 2) * unit_registry.Pa
array2 = np.linspace(0, 10, 3) * unit_registry.Pa
x1 = np.arange(2)
y1 = np.arange(3)
x2 = np.arange(2, 4)
y2 = np.arange(3, 6)
ds = xr.Dataset(
data_vars={"a": ("x", array1), "b": ("y", array2)}, coords={"x": x1, "y": y1}
)
other = xr.Dataset(
data_vars={
"a": ("x", array1.to(unit_registry.hPa)),
"b": ("y", array2.to(unit_registry.hPa)),
},
coords={"x": x2, "y": y2},
)
units_a = extract_units(ds)
units_b = extract_units(other)
expected_a, expected_b = xr.broadcast(strip_units(ds), strip_units(other))
expected_a = attach_units(expected_a, units_a)
expected_b = attach_units(expected_b, units_b)
actual_a, actual_b = xr.broadcast(ds, other)
assert_units_equal(expected_a, actual_a)
assert_identical(expected_a, actual_a)
assert_units_equal(expected_b, actual_b)
assert_identical(expected_b, actual_b)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.mm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
ids=repr,
)
@pytest.mark.parametrize(
"variant",
(
"data",
pytest.param("dims", marks=pytest.mark.xfail(reason="indexes strip units")),
"coords",
),
)
def test_combine_by_coords(variant, unit, error, dtype):
original_unit = unit_registry.m
variants = {
"data": (unit, original_unit, original_unit),
"dims": (original_unit, unit, original_unit),
"coords": (original_unit, original_unit, unit),
}
data_unit, dim_unit, coord_unit = variants.get(variant)
array1 = np.zeros(shape=(2, 3), dtype=dtype) * original_unit
array2 = np.zeros(shape=(2, 3), dtype=dtype) * original_unit
x = np.arange(1, 4) * 10 * original_unit
y = np.arange(2) * original_unit
z = np.arange(3) * original_unit
other_array1 = np.ones_like(array1) * data_unit
other_array2 = np.ones_like(array2) * data_unit
other_x = np.arange(1, 4) * 10 * dim_unit
other_y = np.arange(2, 4) * dim_unit
other_z = np.arange(3, 6) * coord_unit
ds = xr.Dataset(
data_vars={"a": (("y", "x"), array1), "b": (("y", "x"), array2)},
coords={"x": x, "y": y, "z": ("x", z)},
)
other = xr.Dataset(
data_vars={"a": (("y", "x"), other_array1), "b": (("y", "x"), other_array2)},
coords={"x": other_x, "y": other_y, "z": ("x", other_z)},
)
if error is not None:
with pytest.raises(error):
xr.combine_by_coords([ds, other])
return
units = extract_units(ds)
expected = attach_units(
xr.combine_by_coords(
[strip_units(ds), strip_units(convert_units(other, units))]
),
units,
)
actual = xr.combine_by_coords([ds, other])
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.mm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
ids=repr,
)
@pytest.mark.parametrize(
"variant",
(
"data",
pytest.param("dims", marks=pytest.mark.xfail(reason="indexes strip units")),
"coords",
),
)
def test_combine_nested(variant, unit, error, dtype):
original_unit = unit_registry.m
variants = {
"data": (unit, original_unit, original_unit),
"dims": (original_unit, unit, original_unit),
"coords": (original_unit, original_unit, unit),
}
data_unit, dim_unit, coord_unit = variants.get(variant)
array1 = np.zeros(shape=(2, 3), dtype=dtype) * original_unit
array2 = np.zeros(shape=(2, 3), dtype=dtype) * original_unit
x = np.arange(1, 4) * 10 * original_unit
y = np.arange(2) * original_unit
z = np.arange(3) * original_unit
ds1 = xr.Dataset(
data_vars={"a": (("y", "x"), array1), "b": (("y", "x"), array2)},
coords={"x": x, "y": y, "z": ("x", z)},
)
ds2 = xr.Dataset(
data_vars={
"a": (("y", "x"), np.ones_like(array1) * data_unit),
"b": (("y", "x"), np.ones_like(array2) * data_unit),
},
coords={
"x": np.arange(3) * dim_unit,
"y": np.arange(2, 4) * dim_unit,
"z": ("x", np.arange(-3, 0) * coord_unit),
},
)
ds3 = xr.Dataset(
data_vars={
"a": (("y", "x"), np.zeros_like(array1) * np.nan * data_unit),
"b": (("y", "x"), np.zeros_like(array2) * np.nan * data_unit),
},
coords={
"x": np.arange(3, 6) * dim_unit,
"y": np.arange(4, 6) * dim_unit,
"z": ("x", np.arange(3, 6) * coord_unit),
},
)
ds4 = xr.Dataset(
data_vars={
"a": (("y", "x"), -1 * np.ones_like(array1) * data_unit),
"b": (("y", "x"), -1 * np.ones_like(array2) * data_unit),
},
coords={
"x": np.arange(6, 9) * dim_unit,
"y": np.arange(6, 8) * dim_unit,
"z": ("x", np.arange(6, 9) * coord_unit),
},
)
func = function(xr.combine_nested, concat_dim=["x", "y"])
if error is not None:
with pytest.raises(error):
func([[ds1, ds2], [ds3, ds4]])
return
units = extract_units(ds1)
convert_and_strip = lambda ds: strip_units(convert_units(ds, units))
expected = attach_units(
func(
[
[strip_units(ds1), convert_and_strip(ds2)],
[convert_and_strip(ds3), convert_and_strip(ds4)],
]
),
units,
)
actual = func([[ds1, ds2], [ds3, ds4]])
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.mm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
ids=repr,
)
@pytest.mark.parametrize(
"variant",
(
"data",
pytest.param("dims", marks=pytest.mark.xfail(reason="indexes strip units")),
),
)
def test_concat_dataarray(variant, unit, error, dtype):
original_unit = unit_registry.m
variants = {"data": (unit, original_unit), "dims": (original_unit, unit)}
data_unit, dims_unit = variants.get(variant)
array1 = np.linspace(0, 5, 10).astype(dtype) * unit_registry.m
array2 = np.linspace(-5, 0, 5).astype(dtype) * data_unit
x1 = np.arange(5, 15) * original_unit
x2 = np.arange(5) * dims_unit
arr1 = xr.DataArray(data=array1, coords={"x": x1}, dims="x")
arr2 = xr.DataArray(data=array2, coords={"x": x2}, dims="x")
if error is not None:
with pytest.raises(error):
xr.concat([arr1, arr2], dim="x")
return
units = extract_units(arr1)
expected = attach_units(
xr.concat(
[strip_units(arr1), strip_units(convert_units(arr2, units))], dim="x"
),
units,
)
actual = xr.concat([arr1, arr2], dim="x")
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.mm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
ids=repr,
)
@pytest.mark.parametrize(
"variant",
(
"data",
pytest.param("dims", marks=pytest.mark.xfail(reason="indexes strip units")),
),
)
def test_concat_dataset(variant, unit, error, dtype):
original_unit = unit_registry.m
variants = {"data": (unit, original_unit), "dims": (original_unit, unit)}
data_unit, dims_unit = variants.get(variant)
array1 = np.linspace(0, 5, 10).astype(dtype) * unit_registry.m
array2 = np.linspace(-5, 0, 5).astype(dtype) * data_unit
x1 = np.arange(5, 15) * original_unit
x2 = np.arange(5) * dims_unit
ds1 = xr.Dataset(data_vars={"a": ("x", array1)}, coords={"x": x1})
ds2 = xr.Dataset(data_vars={"a": ("x", array2)}, coords={"x": x2})
if error is not None:
with pytest.raises(error):
xr.concat([ds1, ds2], dim="x")
return
units = extract_units(ds1)
expected = attach_units(
xr.concat([strip_units(ds1), strip_units(convert_units(ds2, units))], dim="x"),
units,
)
actual = xr.concat([ds1, ds2], dim="x")
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.mm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
ids=repr,
)
@pytest.mark.parametrize(
"variant",
(
"data",
pytest.param("dims", marks=pytest.mark.xfail(reason="indexes strip units")),
"coords",
),
)
def test_merge_dataarray(variant, unit, error, dtype):
original_unit = unit_registry.m
variants = {
"data": (unit, original_unit, original_unit),
"dims": (original_unit, unit, original_unit),
"coords": (original_unit, original_unit, unit),
}
data_unit, dim_unit, coord_unit = variants.get(variant)
array1 = np.linspace(0, 1, 2 * 3).reshape(2, 3).astype(dtype) * original_unit
x1 = np.arange(2) * original_unit
y1 = np.arange(3) * original_unit
u1 = np.linspace(10, 20, 2) * original_unit
v1 = np.linspace(10, 20, 3) * original_unit
array2 = np.linspace(1, 2, 2 * 4).reshape(2, 4).astype(dtype) * data_unit
x2 = np.arange(2, 4) * dim_unit
z2 = np.arange(4) * original_unit
u2 = np.linspace(20, 30, 2) * coord_unit
w2 = np.linspace(10, 20, 4) * original_unit
array3 = np.linspace(0, 2, 3 * 4).reshape(3, 4).astype(dtype) * data_unit
y3 = np.arange(3, 6) * dim_unit
z3 = np.arange(4, 8) * dim_unit
v3 = np.linspace(10, 20, 3) * coord_unit
w3 = np.linspace(10, 20, 4) * coord_unit
arr1 = xr.DataArray(
name="a",
data=array1,
coords={"x": x1, "y": y1, "u": ("x", u1), "v": ("y", v1)},
dims=("x", "y"),
)
arr2 = xr.DataArray(
name="a",
data=array2,
coords={"x": x2, "z": z2, "u": ("x", u2), "w": ("z", w2)},
dims=("x", "z"),
)
arr3 = xr.DataArray(
name="a",
data=array3,
coords={"y": y3, "z": z3, "v": ("y", v3), "w": ("z", w3)},
dims=("y", "z"),
)
if error is not None:
with pytest.raises(error):
xr.merge([arr1, arr2, arr3])
return
units = {name: original_unit for name in list("axyzuvw")}
convert_and_strip = lambda arr: strip_units(convert_units(arr, units))
expected_units = {
"a": original_unit,
"u": original_unit,
"v": original_unit,
"w": original_unit,
"x": original_unit,
"y": original_unit,
"z": original_unit,
}
expected = convert_units(
attach_units(
xr.merge(
[
convert_and_strip(arr1),
convert_and_strip(arr2),
convert_and_strip(arr3),
]
),
units,
),
expected_units,
)
actual = xr.merge([arr1, arr2, arr3])
assert_units_equal(expected, actual)
assert_allclose(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.mm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
ids=repr,
)
@pytest.mark.parametrize(
"variant",
(
"data",
pytest.param("dims", marks=pytest.mark.xfail(reason="indexes strip units")),
"coords",
),
)
def test_merge_dataset(variant, unit, error, dtype):
original_unit = unit_registry.m
variants = {
"data": (unit, original_unit, original_unit),
"dims": (original_unit, unit, original_unit),
"coords": (original_unit, original_unit, unit),
}
data_unit, dim_unit, coord_unit = variants.get(variant)
array1 = np.zeros(shape=(2, 3), dtype=dtype) * original_unit
array2 = np.zeros(shape=(2, 3), dtype=dtype) * original_unit
x = np.arange(11, 14) * original_unit
y = np.arange(2) * original_unit
z = np.arange(3) * original_unit
ds1 = xr.Dataset(
data_vars={"a": (("y", "x"), array1), "b": (("y", "x"), array2)},
coords={"x": x, "y": y, "u": ("x", z)},
)
ds2 = xr.Dataset(
data_vars={
"a": (("y", "x"), np.ones_like(array1) * data_unit),
"b": (("y", "x"), np.ones_like(array2) * data_unit),
},
coords={
"x": np.arange(3) * dim_unit,
"y": np.arange(2, 4) * dim_unit,
"u": ("x", np.arange(-3, 0) * coord_unit),
},
)
ds3 = xr.Dataset(
data_vars={
"a": (("y", "x"), np.full_like(array1, np.nan) * data_unit),
"b": (("y", "x"), np.full_like(array2, np.nan) * data_unit),
},
coords={
"x": np.arange(3, 6) * dim_unit,
"y": np.arange(4, 6) * dim_unit,
"u": ("x", np.arange(3, 6) * coord_unit),
},
)
func = function(xr.merge)
if error is not None:
with pytest.raises(error):
func([ds1, ds2, ds3])
return
units = extract_units(ds1)
convert_and_strip = lambda ds: strip_units(convert_units(ds, units))
expected_units = {name: original_unit for name in list("abxyzu")}
expected = convert_units(
attach_units(
func(
[convert_and_strip(ds1), convert_and_strip(ds2), convert_and_strip(ds3)]
),
units,
),
expected_units,
)
actual = func([ds1, ds2, ds3])
assert_units_equal(expected, actual)
assert_allclose(expected, actual)
@pytest.mark.parametrize("func", (xr.zeros_like, xr.ones_like))
def test_replication_dataarray(func, dtype):
array = np.linspace(0, 10, 20).astype(dtype) * unit_registry.s
data_array = xr.DataArray(data=array, dims="x")
numpy_func = getattr(np, func.__name__)
units = extract_units(numpy_func(data_array))
expected = attach_units(func(data_array), units)
actual = func(data_array)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize("func", (xr.zeros_like, xr.ones_like))
def test_replication_dataset(func, dtype):
array1 = np.linspace(0, 10, 20).astype(dtype) * unit_registry.s
array2 = np.linspace(5, 10, 10).astype(dtype) * unit_registry.Pa
x = np.arange(20).astype(dtype) * unit_registry.m
y = np.arange(10).astype(dtype) * unit_registry.m
z = y.to(unit_registry.mm)
ds = xr.Dataset(
data_vars={"a": ("x", array1), "b": ("y", array2)},
coords={"x": x, "y": y, "z": ("y", z)},
)
numpy_func = getattr(np, func.__name__)
units = extract_units(ds.map(numpy_func))
expected = attach_units(func(strip_units(ds)), units)
actual = func(ds)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.xfail(
reason=(
"pint is undecided on how `full_like` should work, so incorrect errors "
"may be expected: hgrecco/pint#882"
)
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.m, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.ms, None, id="compatible_unit"),
pytest.param(unit_registry.s, None, id="identical_unit"),
),
ids=repr,
)
def test_replication_full_like_dataarray(unit, error, dtype):
array = np.linspace(0, 5, 10) * unit_registry.s
data_array = xr.DataArray(data=array, dims="x")
fill_value = -1 * unit
if error is not None:
with pytest.raises(error):
xr.full_like(data_array, fill_value=fill_value)
return
units = {**extract_units(data_array), **{None: unit if unit != 1 else None}}
expected = attach_units(
xr.full_like(strip_units(data_array), fill_value=strip_units(fill_value)), units
)
actual = xr.full_like(data_array, fill_value=fill_value)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.xfail(
reason=(
"pint is undecided on how `full_like` should work, so incorrect errors "
"may be expected: hgrecco/pint#882"
)
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.m, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.ms, None, id="compatible_unit"),
pytest.param(unit_registry.s, None, id="identical_unit"),
),
ids=repr,
)
def test_replication_full_like_dataset(unit, error, dtype):
array1 = np.linspace(0, 10, 20).astype(dtype) * unit_registry.s
array2 = np.linspace(5, 10, 10).astype(dtype) * unit_registry.Pa
x = np.arange(20).astype(dtype) * unit_registry.m
y = np.arange(10).astype(dtype) * unit_registry.m
z = y.to(unit_registry.mm)
ds = xr.Dataset(
data_vars={"a": ("x", array1), "b": ("y", array2)},
coords={"x": x, "y": y, "z": ("y", z)},
)
fill_value = -1 * unit
if error is not None:
with pytest.raises(error):
xr.full_like(ds, fill_value=fill_value)
return
units = {
**extract_units(ds),
**{name: unit if unit != 1 else None for name in ds.data_vars},
}
expected = attach_units(
xr.full_like(strip_units(ds), fill_value=strip_units(fill_value)), units
)
actual = xr.full_like(ds, fill_value=fill_value)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.mm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
ids=repr,
)
@pytest.mark.parametrize("fill_value", (np.nan, 10.2))
def test_where_dataarray(fill_value, unit, error, dtype):
array = np.linspace(0, 5, 10).astype(dtype) * unit_registry.m
x = xr.DataArray(data=array, dims="x")
cond = x < 5 * unit_registry.m
fill_value = fill_value * unit
if error is not None and not (
np.isnan(fill_value) and not isinstance(fill_value, Quantity)
):
with pytest.raises(error):
xr.where(cond, x, fill_value)
return
expected = attach_units(
xr.where(
cond,
strip_units(x),
strip_units(convert_units(fill_value, {None: unit_registry.m})),
),
extract_units(x),
)
actual = xr.where(cond, x, fill_value)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.mm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
ids=repr,
)
@pytest.mark.parametrize("fill_value", (np.nan, 10.2))
def test_where_dataset(fill_value, unit, error, dtype):
array1 = np.linspace(0, 5, 10).astype(dtype) * unit_registry.m
array2 = np.linspace(-5, 0, 10).astype(dtype) * unit_registry.m
x = np.arange(10) * unit_registry.s
ds = xr.Dataset(data_vars={"a": ("x", array1), "b": ("x", array2)}, coords={"x": x})
cond = x < 5 * unit_registry.s
fill_value = fill_value * unit
if error is not None and not (
np.isnan(fill_value) and not isinstance(fill_value, Quantity)
):
with pytest.raises(error):
xr.where(cond, ds, fill_value)
return
expected = attach_units(
xr.where(
cond,
strip_units(ds),
strip_units(convert_units(fill_value, {None: unit_registry.m})),
),
extract_units(ds),
)
actual = xr.where(cond, ds, fill_value)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
def test_dot_dataarray(dtype):
array1 = (
np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype)
* unit_registry.m
/ unit_registry.s
)
array2 = (
np.linspace(10, 20, 10 * 20).reshape(10, 20).astype(dtype) * unit_registry.s
)
data_array = xr.DataArray(data=array1, dims=("x", "y"))
other = xr.DataArray(data=array2, dims=("y", "z"))
expected = attach_units(
xr.dot(strip_units(data_array), strip_units(other)), {None: unit_registry.m}
)
actual = xr.dot(data_array, other)
assert_units_equal(expected, actual)
assert_identical(expected, actual)
def delete_attrs(*to_delete):
def wrapper(cls):
for item in to_delete:
setattr(cls, item, None)
return cls
return wrapper
@delete_attrs(
"test_getitem_with_mask",
"test_getitem_with_mask_nd_indexer",
"test_index_0d_string",
"test_index_0d_datetime",
"test_index_0d_timedelta64",
"test_0d_time_data",
"test_index_0d_not_a_time",
"test_datetime64_conversion",
"test_timedelta64_conversion",
"test_pandas_period_index",
"test_1d_math",
"test_1d_reduce",
"test_array_interface",
"test___array__",
"test_copy_index",
"test_concat_number_strings",
"test_concat_fixed_len_str",
"test_concat_mixed_dtypes",
"test_pandas_datetime64_with_tz",
"test_pandas_data",
"test_multiindex",
)
class TestVariable(VariableSubclassobjects):
@staticmethod
def cls(dims, data, *args, **kwargs):
return xr.Variable(
dims, unit_registry.Quantity(data, unit_registry.m), *args, **kwargs
)
def example_1d_objects(self):
for data in [
range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
np.array(["a", "b", "c"], dtype=object),
]:
yield (self.cls("x", data), data)
@pytest.mark.parametrize(
"func",
(
method("all"),
method("any"),
method("argmax"),
method("argmin"),
method("argsort"),
method("cumprod"),
method("cumsum"),
method("max"),
method("mean"),
method("median"),
method("min"),
pytest.param(
method("prod"),
marks=pytest.mark.xfail(reason="not implemented by pint"),
),
method("std"),
method("sum"),
method("var"),
),
ids=repr,
)
def test_aggregation(self, func, dtype):
array = np.linspace(0, 1, 10).astype(dtype) * (
unit_registry.m if func.name != "cumprod" else unit_registry.dimensionless
)
variable = xr.Variable("x", array)
units = extract_units(func(array))
expected = attach_units(func(strip_units(variable)), units)
actual = func(variable)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("astype", np.float32),
method("conj"),
method("conjugate"),
method("clip", min=2, max=7),
),
ids=repr,
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_numpy_methods(self, func, unit, error, dtype):
array = np.linspace(0, 1, 10).astype(dtype) * unit_registry.m
variable = xr.Variable("x", array)
args = [
item * unit if isinstance(item, (int, float, list)) else item
for item in func.args
]
kwargs = {
key: value * unit if isinstance(value, (int, float, list)) else value
for key, value in func.kwargs.items()
}
if error is not None and func.name in ("searchsorted", "clip"):
with pytest.raises(error):
func(variable, *args, **kwargs)
return
converted_args = [
strip_units(convert_units(item, {None: unit_registry.m})) for item in args
]
converted_kwargs = {
key: strip_units(convert_units(value, {None: unit_registry.m}))
for key, value in kwargs.items()
}
units = extract_units(func(array, *args, **kwargs))
expected = attach_units(
func(strip_units(variable), *converted_args, **converted_kwargs), units
)
actual = func(variable, *args, **kwargs)
assert_units_equal(expected, actual)
xr.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize(
"func", (method("item", 5), method("searchsorted", 5)), ids=repr
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_raw_numpy_methods(self, func, unit, error, dtype):
array = np.linspace(0, 1, 10).astype(dtype) * unit_registry.m
variable = xr.Variable("x", array)
args = [
item * unit
if isinstance(item, (int, float, list)) and func.name != "item"
else item
for item in func.args
]
kwargs = {
key: value * unit
if isinstance(value, (int, float, list)) and func.name != "item"
else value
for key, value in func.kwargs.items()
}
if error is not None and func.name != "item":
with pytest.raises(error):
func(variable, *args, **kwargs)
return
converted_args = [
strip_units(convert_units(item, {None: unit_registry.m}))
if func.name != "item"
else item
for item in args
]
converted_kwargs = {
key: strip_units(convert_units(value, {None: unit_registry.m}))
if func.name != "item"
else value
for key, value in kwargs.items()
}
units = extract_units(func(array, *args, **kwargs))
expected = attach_units(
func(strip_units(variable), *converted_args, **converted_kwargs), units
)
actual = func(variable, *args, **kwargs)
assert_units_equal(expected, actual)
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize(
"func", (method("isnull"), method("notnull"), method("count")), ids=repr
)
def test_missing_value_detection(self, func):
array = (
np.array(
[
[1.4, 2.3, np.nan, 7.2],
[np.nan, 9.7, np.nan, np.nan],
[2.1, np.nan, np.nan, 4.6],
[9.9, np.nan, 7.2, 9.1],
]
)
* unit_registry.degK
)
variable = xr.Variable(("x", "y"), array)
expected = func(strip_units(variable))
actual = func(variable)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_missing_value_fillna(self, unit, error):
value = 10
array = (
np.array(
[
[1.4, 2.3, np.nan, 7.2],
[np.nan, 9.7, np.nan, np.nan],
[2.1, np.nan, np.nan, 4.6],
[9.9, np.nan, 7.2, 9.1],
]
)
* unit_registry.m
)
variable = xr.Variable(("x", "y"), array)
fill_value = value * unit
if error is not None:
with pytest.raises(error):
variable.fillna(value=fill_value)
return
expected = attach_units(
strip_units(variable).fillna(
value=fill_value.to(unit_registry.m).magnitude
),
extract_units(variable),
)
actual = variable.fillna(value=fill_value)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit",),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"convert_data",
(
pytest.param(False, id="no_conversion"),
pytest.param(True, id="with_conversion"),
),
)
@pytest.mark.parametrize(
"func",
(
method("equals"),
pytest.param(
method("identical"),
marks=pytest.mark.skip(reason="behavior of identical is undecided"),
),
),
ids=repr,
)
def test_comparisons(self, func, unit, convert_data, dtype):
array = np.linspace(0, 1, 9).astype(dtype)
quantity1 = array * unit_registry.m
variable = xr.Variable("x", quantity1)
if convert_data and is_compatible(unit_registry.m, unit):
quantity2 = convert_units(array * unit_registry.m, {None: unit})
else:
quantity2 = array * unit
other = xr.Variable("x", quantity2)
expected = func(
strip_units(variable),
strip_units(
convert_units(other, extract_units(variable))
if is_compatible(unit_registry.m, unit)
else other
),
)
if func.name == "identical":
expected &= extract_units(variable) == extract_units(other)
else:
expected &= all(
compatible_mappings(
extract_units(variable), extract_units(other)
).values()
)
actual = func(variable, other)
assert expected == actual
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
def test_broadcast_equals(self, unit, dtype):
base_unit = unit_registry.m
left_array = np.ones(shape=(2, 2), dtype=dtype) * base_unit
value = (
(1 * base_unit).to(unit).magnitude if is_compatible(unit, base_unit) else 1
)
right_array = np.full(shape=(2,), fill_value=value, dtype=dtype) * unit
left = xr.Variable(("x", "y"), left_array)
right = xr.Variable("x", right_array)
units = {
**extract_units(left),
**({} if is_compatible(unit, base_unit) else {None: None}),
}
expected = strip_units(left).broadcast_equals(
strip_units(convert_units(right, units))
) & is_compatible(unit, base_unit)
actual = left.broadcast_equals(right)
assert expected == actual
@pytest.mark.parametrize(
"indices",
(
pytest.param(4, id="single index"),
pytest.param([5, 2, 9, 1], id="multiple indices"),
),
)
def test_isel(self, indices, dtype):
array = np.linspace(0, 5, 10).astype(dtype) * unit_registry.s
variable = xr.Variable("x", array)
expected = attach_units(
strip_units(variable).isel(x=indices), extract_units(variable)
)
actual = variable.isel(x=indices)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"func",
(
function(lambda x, *_: +x, function_label="unary_plus"),
function(lambda x, *_: -x, function_label="unary_minus"),
function(lambda x, *_: abs(x), function_label="absolute"),
function(lambda x, y: x + y, function_label="sum"),
function(lambda x, y: y + x, function_label="commutative_sum"),
function(lambda x, y: x * y, function_label="product"),
function(lambda x, y: y * x, function_label="commutative_product"),
),
ids=repr,
)
def test_1d_math(self, func, unit, error, dtype):
base_unit = unit_registry.m
array = np.arange(5).astype(dtype) * base_unit
variable = xr.Variable("x", array)
values = np.ones(5)
y = values * unit
if error is not None and func.name in ("sum", "commutative_sum"):
with pytest.raises(error):
func(variable, y)
return
units = extract_units(func(array, y))
if all(compatible_mappings(units, extract_units(y)).values()):
converted_y = convert_units(y, units)
else:
converted_y = y
if all(compatible_mappings(units, extract_units(variable)).values()):
converted_variable = convert_units(variable, units)
else:
converted_variable = variable
expected = attach_units(
func(strip_units(converted_variable), strip_units(converted_y)), units
)
actual = func(variable, y)
assert_units_equal(expected, actual)
xr.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"func", (method("where"), method("_getitem_with_mask")), ids=repr
)
def test_masking(self, func, unit, error, dtype):
base_unit = unit_registry.m
array = np.linspace(0, 5, 10).astype(dtype) * base_unit
variable = xr.Variable("x", array)
cond = np.array([True, False] * 5)
other = -1 * unit
if error is not None:
with pytest.raises(error):
func(variable, cond, other)
return
expected = attach_units(
func(
strip_units(variable),
cond,
strip_units(
convert_units(
other,
{None: base_unit}
if is_compatible(base_unit, unit)
else {None: None},
)
),
),
extract_units(variable),
)
actual = func(variable, cond, other)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
def test_squeeze(self, dtype):
shape = (2, 1, 3, 1, 1, 2)
names = list("abcdef")
array = np.ones(shape=shape) * unit_registry.m
variable = xr.Variable(names, array)
expected = attach_units(
strip_units(variable).squeeze(), extract_units(variable)
)
actual = variable.squeeze()
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
names = tuple(name for name, size in zip(names, shape) if shape == 1)
for name in names:
expected = attach_units(
strip_units(variable).squeeze(dim=name), extract_units(variable)
)
actual = variable.squeeze(dim=name)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("coarsen", windows={"y": 2}, func=np.mean),
pytest.param(
method("quantile", q=[0.25, 0.75]),
marks=pytest.mark.xfail(
LooseVersion(pint.__version__) < "0.12",
reason="quantile / nanquantile not implemented yet",
),
),
pytest.param(
method("rank", dim="x"),
marks=pytest.mark.xfail(reason="rank not implemented for non-ndarray"),
),
method("roll", {"x": 2}),
pytest.param(
method("rolling_window", "x", 3, "window"),
marks=pytest.mark.xfail(reason="converts to ndarray"),
),
method("reduce", np.std, "x"),
method("round", 2),
method("shift", {"x": -2}),
method("transpose", "y", "x"),
),
ids=repr,
)
def test_computation(self, func, dtype):
base_unit = unit_registry.m
array = np.linspace(0, 5, 5 * 10).reshape(5, 10).astype(dtype) * base_unit
variable = xr.Variable(("x", "y"), array)
expected = attach_units(func(strip_units(variable)), extract_units(variable))
actual = func(variable)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_searchsorted(self, unit, error, dtype):
base_unit = unit_registry.m
array = np.linspace(0, 5, 10).astype(dtype) * base_unit
variable = xr.Variable("x", array)
value = 0 * unit
if error is not None:
with pytest.raises(error):
variable.searchsorted(value)
return
expected = strip_units(variable).searchsorted(
strip_units(convert_units(value, {None: base_unit}))
)
actual = variable.searchsorted(value)
assert_units_equal(expected, actual)
np.testing.assert_allclose(expected, actual)
def test_stack(self, dtype):
array = np.linspace(0, 5, 3 * 10).reshape(3, 10).astype(dtype) * unit_registry.m
variable = xr.Variable(("x", "y"), array)
expected = attach_units(
strip_units(variable).stack(z=("x", "y")), extract_units(variable)
)
actual = variable.stack(z=("x", "y"))
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
def test_unstack(self, dtype):
array = np.linspace(0, 5, 3 * 10).astype(dtype) * unit_registry.m
variable = xr.Variable("z", array)
expected = attach_units(
strip_units(variable).unstack(z={"x": 3, "y": 10}), extract_units(variable)
)
actual = variable.unstack(z={"x": 3, "y": 10})
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_concat(self, unit, error, dtype):
array1 = (
np.linspace(0, 5, 9 * 10).reshape(3, 6, 5).astype(dtype) * unit_registry.m
)
array2 = np.linspace(5, 10, 10 * 3).reshape(3, 2, 5).astype(dtype) * unit
variable = xr.Variable(("x", "y", "z"), array1)
other = xr.Variable(("x", "y", "z"), array2)
if error is not None:
with pytest.raises(error):
xr.Variable.concat([variable, other], dim="y")
return
units = extract_units(variable)
expected = attach_units(
xr.Variable.concat(
[strip_units(variable), strip_units(convert_units(other, units))],
dim="y",
),
units,
)
actual = xr.Variable.concat([variable, other], dim="y")
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
def test_set_dims(self, dtype):
array = np.linspace(0, 5, 3 * 10).reshape(3, 10).astype(dtype) * unit_registry.m
variable = xr.Variable(("x", "y"), array)
dims = {"z": 6, "x": 3, "a": 1, "b": 4, "y": 10}
expected = attach_units(
strip_units(variable).set_dims(dims), extract_units(variable)
)
actual = variable.set_dims(dims)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
def test_copy(self, dtype):
array = np.linspace(0, 5, 10).astype(dtype) * unit_registry.m
other = np.arange(10).astype(dtype) * unit_registry.s
variable = xr.Variable("x", array)
expected = attach_units(
strip_units(variable).copy(data=strip_units(other)), extract_units(other)
)
actual = variable.copy(data=other)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
def test_no_conflicts(self, unit, dtype):
base_unit = unit_registry.m
array1 = (
np.array(
[
[6.3, 0.3, 0.45],
[np.nan, 0.3, 0.3],
[3.7, np.nan, 0.2],
[9.43, 0.3, 0.7],
]
)
* base_unit
)
array2 = np.array([np.nan, 0.3, np.nan]) * unit
variable = xr.Variable(("x", "y"), array1)
other = xr.Variable("y", array2)
expected = strip_units(variable).no_conflicts(
strip_units(
convert_units(
other, {None: base_unit if is_compatible(base_unit, unit) else None}
)
)
) & is_compatible(base_unit, unit)
actual = variable.no_conflicts(other)
assert expected == actual
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad_constant_values(self, dtype, xr_arg, np_arg):
data = np.arange(4 * 3 * 2).reshape(4, 3, 2).astype(dtype) * unit_registry.m
v = xr.Variable(["x", "y", "z"], data)
actual = v.pad(**xr_arg, mode="constant")
expected = xr.Variable(
v.dims,
np.pad(
v.data.astype(float), np_arg, mode="constant", constant_values=np.nan,
),
)
xr.testing.assert_identical(expected, actual)
assert_units_equal(expected, actual)
assert isinstance(actual._data, type(v._data))
# for the boolean array, we pad False
data = np.full_like(data, False, dtype=bool).reshape(4, 3, 2)
v = xr.Variable(["x", "y", "z"], data)
actual = v.pad(**xr_arg, mode="constant", constant_values=data.flat[0])
expected = xr.Variable(
v.dims,
np.pad(v.data, np_arg, mode="constant", constant_values=v.data.flat[0]),
)
xr.testing.assert_identical(actual, expected)
assert_units_equal(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(
1,
DimensionalityError,
id="no_unit",
marks=pytest.mark.xfail(
LooseVersion(pint.__version__) < LooseVersion("0.10.2"),
reason="bug in pint's implementation of np.pad",
),
),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_pad_unit_constant_value(self, unit, error, dtype):
array = np.linspace(0, 5, 3 * 10).reshape(3, 10).astype(dtype) * unit_registry.m
variable = xr.Variable(("x", "y"), array)
fill_value = -100 * unit
func = method("pad", mode="constant", x=(2, 3), y=(1, 4))
if error is not None:
with pytest.raises(error):
func(variable, constant_values=fill_value)
return
units = extract_units(variable)
expected = attach_units(
func(
strip_units(variable),
constant_values=strip_units(convert_units(fill_value, units)),
),
units,
)
actual = func(variable, constant_values=fill_value)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
class TestDataArray:
@pytest.mark.filterwarnings("error:::pint[.*]")
@pytest.mark.parametrize(
"variant",
(
pytest.param(
"with_dims",
marks=pytest.mark.xfail(reason="units in indexes are not supported"),
),
"with_coords",
"without_coords",
),
)
def test_init(self, variant, dtype):
array = np.linspace(1, 2, 10, dtype=dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.s
y = x.to(unit_registry.ms)
variants = {
"with_dims": {"x": x},
"with_coords": {"y": ("x", y)},
"without_coords": {},
}
kwargs = {"data": array, "dims": "x", "coords": variants.get(variant)}
data_array = xr.DataArray(**kwargs)
assert isinstance(data_array.data, Quantity)
assert all(
{
name: isinstance(coord.data, Quantity)
for name, coord in data_array.coords.items()
}.values()
)
@pytest.mark.filterwarnings("error:::pint[.*]")
@pytest.mark.parametrize(
"func", (pytest.param(str, id="str"), pytest.param(repr, id="repr"))
)
@pytest.mark.parametrize(
"variant",
(
pytest.param(
"with_dims",
marks=pytest.mark.xfail(reason="units in indexes are not supported"),
),
pytest.param("with_coords"),
pytest.param("without_coords"),
),
)
def test_repr(self, func, variant, dtype):
array = np.linspace(1, 2, 10, dtype=dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.s
y = x.to(unit_registry.ms)
variants = {
"with_dims": {"x": x},
"with_coords": {"y": ("x", y)},
"without_coords": {},
}
kwargs = {"data": array, "dims": "x", "coords": variants.get(variant)}
data_array = xr.DataArray(**kwargs)
# FIXME: this just checks that the repr does not raise
# warnings or errors, but does not check the result
func(data_array)
@pytest.mark.parametrize(
"func",
(
function("all"),
function("any"),
function("argmax"),
function("argmin"),
function("max"),
function("mean"),
pytest.param(
function("median"),
marks=pytest.mark.xfail(
reason="median does not work with dataarrays yet"
),
),
function("min"),
pytest.param(
function("prod"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
function("sum"),
function("std"),
function("var"),
function("cumsum"),
function("cumprod"),
method("all"),
method("any"),
method("argmax"),
method("argmin"),
method("max"),
method("mean"),
method("median"),
method("min"),
pytest.param(
method("prod"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
method("sum"),
method("std"),
method("var"),
method("cumsum"),
method("cumprod"),
),
ids=repr,
)
def test_aggregation(self, func, dtype):
array = np.arange(10).astype(dtype) * (
unit_registry.m if func.name != "cumprod" else unit_registry.dimensionless
)
data_array = xr.DataArray(data=array, dims="x")
# units differ based on the applied function, so we need to
# first compute the units
units = extract_units(func(array))
expected = attach_units(func(strip_units(data_array)), units)
actual = func(data_array)
assert_units_equal(expected, actual)
xr.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize(
"func",
(
pytest.param(operator.neg, id="negate"),
pytest.param(abs, id="absolute"),
pytest.param(np.round, id="round"),
),
)
def test_unary_operations(self, func, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
units = extract_units(func(array))
expected = attach_units(func(strip_units(data_array)), units)
actual = func(data_array)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"func",
(
pytest.param(lambda x: 2 * x, id="multiply"),
pytest.param(lambda x: x + x, id="add"),
pytest.param(lambda x: x[0] + x, id="add scalar"),
pytest.param(lambda x: x.T @ x, id="matrix multiply"),
),
)
def test_binary_operations(self, func, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
units = extract_units(func(array))
expected = attach_units(func(strip_units(data_array)), units)
actual = func(data_array)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"comparison",
(
pytest.param(operator.lt, id="less_than"),
pytest.param(operator.ge, id="greater_equal"),
pytest.param(operator.eq, id="equal"),
),
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, ValueError, id="without_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.mm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_comparison_operations(self, comparison, unit, error, dtype):
array = (
np.array([10.1, 5.2, 6.5, 8.0, 21.3, 7.1, 1.3]).astype(dtype)
* unit_registry.m
)
data_array = xr.DataArray(data=array)
value = 8
to_compare_with = value * unit
# incompatible units are all not equal
if error is not None and comparison is not operator.eq:
with pytest.raises(error):
comparison(array, to_compare_with)
with pytest.raises(error):
comparison(data_array, to_compare_with)
return
actual = comparison(data_array, to_compare_with)
expected_units = {None: unit_registry.m if array.check(unit) else None}
expected = array.check(unit) & comparison(
strip_units(data_array),
strip_units(convert_units(to_compare_with, expected_units)),
)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"units,error",
(
pytest.param(unit_registry.dimensionless, None, id="dimensionless"),
pytest.param(unit_registry.m, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.degree, None, id="compatible_unit"),
),
)
def test_univariate_ufunc(self, units, error, dtype):
array = np.arange(10).astype(dtype) * units
data_array = xr.DataArray(data=array)
func = function("sin")
if error is not None:
with pytest.raises(error):
np.sin(data_array)
return
expected = attach_units(
func(strip_units(convert_units(data_array, {None: unit_registry.radians}))),
{None: unit_registry.dimensionless},
)
actual = func(data_array)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.xfail(reason="needs the type register system for __array_ufunc__")
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="without_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(
unit_registry.mm,
None,
id="compatible_unit",
marks=pytest.mark.xfail(reason="pint converts to the wrong units"),
),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_bivariate_ufunc(self, unit, error, dtype):
original_unit = unit_registry.m
array = np.arange(10).astype(dtype) * original_unit
data_array = xr.DataArray(data=array)
if error is not None:
with pytest.raises(error):
np.maximum(data_array, 1 * unit)
return
expected_units = {None: original_unit}
expected = attach_units(
np.maximum(
strip_units(data_array),
strip_units(convert_units(1 * unit, expected_units)),
),
expected_units,
)
actual = np.maximum(data_array, 1 * unit)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
actual = np.maximum(1 * unit, data_array)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize("property", ("T", "imag", "real"))
def test_numpy_properties(self, property, dtype):
array = (
np.arange(5 * 10).astype(dtype)
+ 1j * np.linspace(-1, 0, 5 * 10).astype(dtype)
).reshape(5, 10) * unit_registry.s
data_array = xr.DataArray(data=array, dims=("x", "y"))
expected = attach_units(
getattr(strip_units(data_array), property), extract_units(data_array)
)
actual = getattr(data_array, property)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"func",
(method("conj"), method("argsort"), method("conjugate"), method("round")),
ids=repr,
)
def test_numpy_methods(self, func, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array, dims="x")
units = extract_units(func(array))
expected = attach_units(strip_units(data_array), units)
actual = func(data_array)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
def test_item(self, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
func = method("item", 2)
expected = func(strip_units(data_array)) * unit_registry.m
actual = func(data_array)
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"func",
(
method("searchsorted", 5),
pytest.param(
function("searchsorted", 5),
marks=pytest.mark.xfail(
reason="xarray does not implement __array_function__"
),
),
),
ids=repr,
)
def test_searchsorted(self, func, unit, error, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
scalar_types = (int, float)
args = list(value * unit for value in func.args)
kwargs = {
key: (value * unit if isinstance(value, scalar_types) else value)
for key, value in func.kwargs.items()
}
if error is not None:
with pytest.raises(error):
func(data_array, *args, **kwargs)
return
units = extract_units(data_array)
expected_units = extract_units(func(array, *args, **kwargs))
stripped_args = [strip_units(convert_units(value, units)) for value in args]
stripped_kwargs = {
key: strip_units(convert_units(value, units))
for key, value in kwargs.items()
}
expected = attach_units(
func(strip_units(data_array), *stripped_args, **stripped_kwargs),
expected_units,
)
actual = func(data_array, *args, **kwargs)
assert_units_equal(expected, actual)
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("clip", min=3, max=8),
pytest.param(
function("clip", a_min=3, a_max=8),
marks=pytest.mark.xfail(
reason="xarray does not implement __array_function__"
),
),
),
ids=repr,
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_numpy_methods_with_args(self, func, unit, error, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
scalar_types = (int, float)
args = list(value * unit for value in func.args)
kwargs = {
key: (value * unit if isinstance(value, scalar_types) else value)
for key, value in func.kwargs.items()
}
if error is not None:
with pytest.raises(error):
func(data_array, *args, **kwargs)
return
units = extract_units(data_array)
expected_units = extract_units(func(array, *args, **kwargs))
stripped_args = [strip_units(convert_units(value, units)) for value in args]
stripped_kwargs = {
key: strip_units(convert_units(value, units))
for key, value in kwargs.items()
}
expected = attach_units(
func(strip_units(data_array), *stripped_args, **stripped_kwargs),
expected_units,
)
actual = func(data_array, *args, **kwargs)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"func", (method("isnull"), method("notnull"), method("count")), ids=repr
)
def test_missing_value_detection(self, func, dtype):
array = (
np.array(
[
[1.4, 2.3, np.nan, 7.2],
[np.nan, 9.7, np.nan, np.nan],
[2.1, np.nan, np.nan, 4.6],
[9.9, np.nan, 7.2, 9.1],
]
)
* unit_registry.degK
)
data_array = xr.DataArray(data=array)
expected = func(strip_units(data_array))
actual = func(data_array)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.xfail(reason="ffill and bfill lose units in data")
@pytest.mark.parametrize("func", (method("ffill"), method("bfill")), ids=repr)
def test_missing_value_filling(self, func, dtype):
array = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.degK
)
x = np.arange(len(array))
data_array = xr.DataArray(data=array, coords={"x": x}, dims="x")
expected = attach_units(
func(strip_units(data_array), dim="x"), extract_units(data_array)
)
actual = func(data_array, dim="x")
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"fill_value",
(
pytest.param(-1, id="python_scalar"),
pytest.param(np.array(-1), id="numpy_scalar"),
pytest.param(np.array([-1]), id="numpy_array"),
),
)
def test_fillna(self, fill_value, unit, error, dtype):
original_unit = unit_registry.m
array = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* original_unit
)
data_array = xr.DataArray(data=array)
func = method("fillna")
value = fill_value * unit
if error is not None:
with pytest.raises(error):
func(data_array, value=value)
return
units = extract_units(data_array)
expected = attach_units(
func(
strip_units(data_array), value=strip_units(convert_units(value, units))
),
units,
)
actual = func(data_array, value=value)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
def test_dropna(self, dtype):
array = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.m
)
x = np.arange(len(array))
data_array = xr.DataArray(data=array, coords={"x": x}, dims=["x"])
units = extract_units(data_array)
expected = attach_units(strip_units(data_array).dropna(dim="x"), units)
actual = data_array.dropna(dim="x")
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
def test_isin(self, unit, dtype):
array = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.m
)
data_array = xr.DataArray(data=array, dims="x")
raw_values = np.array([1.4, np.nan, 2.3]).astype(dtype)
values = raw_values * unit
units = {None: unit_registry.m if array.check(unit) else None}
expected = strip_units(data_array).isin(
strip_units(convert_units(values, units))
) & array.check(unit)
actual = data_array.isin(values)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"variant", ("masking", "replacing_scalar", "replacing_array", "dropping")
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_where(self, variant, unit, error, dtype):
original_unit = unit_registry.m
array = np.linspace(0, 1, 10).astype(dtype) * original_unit
data_array = xr.DataArray(data=array)
condition = data_array < 0.5 * original_unit
other = np.linspace(-2, -1, 10).astype(dtype) * unit
variant_kwargs = {
"masking": {"cond": condition},
"replacing_scalar": {"cond": condition, "other": -1 * unit},
"replacing_array": {"cond": condition, "other": other},
"dropping": {"cond": condition, "drop": True},
}
kwargs = variant_kwargs.get(variant)
kwargs_without_units = {
key: strip_units(
convert_units(
value, {None: original_unit if array.check(unit) else None}
)
)
for key, value in kwargs.items()
}
if variant not in ("masking", "dropping") and error is not None:
with pytest.raises(error):
data_array.where(**kwargs)
return
expected = attach_units(
strip_units(data_array).where(**kwargs_without_units),
extract_units(data_array),
)
actual = data_array.where(**kwargs)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.xfail(reason="uses numpy.vectorize")
def test_interpolate_na(self):
array = (
np.array([-1.03, 0.1, 1.4, np.nan, 2.3, np.nan, np.nan, 9.1])
* unit_registry.m
)
x = np.arange(len(array))
data_array = xr.DataArray(data=array, coords={"x": x}, dims="x")
units = extract_units(data_array)
expected = attach_units(strip_units(data_array).interpolate_na(dim="x"), units)
actual = data_array.interpolate_na(dim="x")
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit",),
pytest.param(unit_registry.m, None, id="identical_unit",),
),
)
def test_combine_first(self, unit, error, dtype):
array = np.zeros(shape=(2, 2), dtype=dtype) * unit_registry.m
other_array = np.ones_like(array) * unit
data_array = xr.DataArray(
data=array, coords={"x": ["a", "b"], "y": [-1, 0]}, dims=["x", "y"]
)
other = xr.DataArray(
data=other_array, coords={"x": ["b", "c"], "y": [0, 1]}, dims=["x", "y"]
)
if error is not None:
with pytest.raises(error):
data_array.combine_first(other)
return
units = extract_units(data_array)
expected = attach_units(
strip_units(data_array).combine_first(
strip_units(convert_units(other, units))
),
units,
)
actual = data_array.combine_first(other)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"variation",
(
"data",
pytest.param(
"dims", marks=pytest.mark.xfail(reason="units in indexes not supported")
),
"coords",
),
)
@pytest.mark.parametrize(
"func",
(
method("equals"),
pytest.param(
method("identical"),
marks=pytest.mark.skip(reason="the behavior of identical is undecided"),
),
),
ids=repr,
)
def test_comparisons(self, func, variation, unit, dtype):
def is_compatible(a, b):
a = a if a is not None else 1
b = b if b is not None else 1
quantity = np.arange(5) * a
return a == b or quantity.check(b)
data = np.linspace(0, 5, 10).astype(dtype)
coord = np.arange(len(data)).astype(dtype)
base_unit = unit_registry.m
array = data * (base_unit if variation == "data" else 1)
x = coord * (base_unit if variation == "dims" else 1)
y = coord * (base_unit if variation == "coords" else 1)
variations = {
"data": (unit, 1, 1),
"dims": (1, unit, 1),
"coords": (1, 1, unit),
}
data_unit, dim_unit, coord_unit = variations.get(variation)
data_array = xr.DataArray(data=array, coords={"x": x, "y": ("x", y)}, dims="x")
other = attach_units(
strip_units(data_array), {None: data_unit, "x": dim_unit, "y": coord_unit}
)
units = extract_units(data_array)
other_units = extract_units(other)
equal_arrays = all(
is_compatible(units[name], other_units[name]) for name in units.keys()
) and (
strip_units(data_array).equals(
strip_units(convert_units(other, extract_units(data_array)))
)
)
equal_units = units == other_units
expected = equal_arrays and (func.name != "identical" or equal_units)
actual = func(data_array, other)
assert expected == actual
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
def test_broadcast_like(self, unit, dtype):
array1 = np.linspace(1, 2, 2 * 1).reshape(2, 1).astype(dtype) * unit_registry.Pa
array2 = np.linspace(0, 1, 2 * 3).reshape(2, 3).astype(dtype) * unit_registry.Pa
x1 = np.arange(2) * unit_registry.m
x2 = np.arange(2) * unit
y1 = np.array([0]) * unit_registry.m
y2 = np.arange(3) * unit
arr1 = xr.DataArray(data=array1, coords={"x": x1, "y": y1}, dims=("x", "y"))
arr2 = xr.DataArray(data=array2, coords={"x": x2, "y": y2}, dims=("x", "y"))
expected = attach_units(
strip_units(arr1).broadcast_like(strip_units(arr2)), extract_units(arr1)
)
actual = arr1.broadcast_like(arr2)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
def test_broadcast_equals(self, unit, dtype):
left_array = np.ones(shape=(2, 2), dtype=dtype) * unit_registry.m
right_array = np.ones(shape=(2,), dtype=dtype) * unit
left = xr.DataArray(data=left_array, dims=("x", "y"))
right = xr.DataArray(data=right_array, dims="x")
units = {
**extract_units(left),
**({} if left_array.check(unit) else {None: None}),
}
expected = strip_units(left).broadcast_equals(
strip_units(convert_units(right, units))
) & left_array.check(unit)
actual = left.broadcast_equals(right)
assert expected == actual
@pytest.mark.parametrize(
"func",
(
method("pipe", lambda da: da * 10),
method("assign_coords", y2=("y", np.arange(10) * unit_registry.mm)),
method("assign_attrs", attr1="value"),
method("rename", x2="x_mm"),
method("swap_dims", {"x": "x2"}),
method(
"expand_dims",
dim={"z": np.linspace(10, 20, 12) * unit_registry.s},
axis=1,
),
method("drop_vars", "x"),
method("reset_coords", names="x2"),
method("copy"),
method("astype", np.float32),
),
ids=repr,
)
def test_content_manipulation(self, func, dtype):
quantity = (
np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype)
* unit_registry.pascal
)
x = np.arange(quantity.shape[0]) * unit_registry.m
y = np.arange(quantity.shape[1]) * unit_registry.m
x2 = x.to(unit_registry.mm)
data_array = xr.DataArray(
name="data",
data=quantity,
coords={"x": x, "x2": ("x", x2), "y": y},
dims=("x", "y"),
)
stripped_kwargs = {
key: array_strip_units(value) for key, value in func.kwargs.items()
}
units = {**{"x_mm": x2.units, "x2": x2.units}, **extract_units(data_array)}
expected = attach_units(func(strip_units(data_array), **stripped_kwargs), units)
actual = func(data_array)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"func", (pytest.param(method("copy", data=np.arange(20))),), ids=repr
)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.degK, id="with_unit"),
),
)
def test_content_manipulation_with_units(self, func, unit, dtype):
quantity = np.linspace(0, 10, 20, dtype=dtype) * unit_registry.pascal
x = np.arange(len(quantity)) * unit_registry.m
data_array = xr.DataArray(data=quantity, coords={"x": x}, dims="x")
kwargs = {key: value * unit for key, value in func.kwargs.items()}
expected = attach_units(
func(strip_units(data_array)), {None: unit, "x": x.units}
)
actual = func(data_array, **kwargs)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"indices",
(
pytest.param(4, id="single index"),
pytest.param([5, 2, 9, 1], id="multiple indices"),
),
)
def test_isel(self, indices, dtype):
array = np.arange(10).astype(dtype) * unit_registry.s
x = np.arange(len(array)) * unit_registry.m
data_array = xr.DataArray(data=array, coords={"x": x}, dims="x")
expected = attach_units(
strip_units(data_array).isel(x=indices), extract_units(data_array)
)
actual = data_array.isel(x=indices)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.xfail(reason="indexes don't support units")
@pytest.mark.parametrize(
"raw_values",
(
pytest.param(10, id="single_value"),
pytest.param([10, 5, 13], id="list_of_values"),
pytest.param(np.array([9, 3, 7, 12]), id="array_of_values"),
),
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, KeyError, id="no_units"),
pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"),
pytest.param(unit_registry.degree, KeyError, id="incompatible_unit"),
pytest.param(unit_registry.dm, KeyError, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_sel(self, raw_values, unit, error, dtype):
array = np.linspace(5, 10, 20).astype(dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.m
data_array = xr.DataArray(data=array, coords={"x": x}, dims="x")
values = raw_values * unit
if error is not None and not (
isinstance(raw_values, (int, float)) and x.check(unit)
):
with pytest.raises(error):
data_array.sel(x=values)
return
expected = attach_units(
strip_units(data_array).sel(
x=strip_units(convert_units(values, {None: array.units}))
),
extract_units(data_array),
)
actual = data_array.sel(x=values)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.xfail(reason="indexes don't support units")
@pytest.mark.parametrize(
"raw_values",
(
pytest.param(10, id="single_value"),
pytest.param([10, 5, 13], id="list_of_values"),
pytest.param(np.array([9, 3, 7, 12]), id="array_of_values"),
),
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, KeyError, id="no_units"),
pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"),
pytest.param(unit_registry.degree, KeyError, id="incompatible_unit"),
pytest.param(unit_registry.dm, KeyError, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_loc(self, raw_values, unit, error, dtype):
array = np.linspace(5, 10, 20).astype(dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.m
data_array = xr.DataArray(data=array, coords={"x": x}, dims="x")
values = raw_values * unit
if error is not None and not (
isinstance(raw_values, (int, float)) and x.check(unit)
):
with pytest.raises(error):
data_array.loc[{"x": values}]
return
expected = attach_units(
strip_units(data_array).loc[
{"x": strip_units(convert_units(values, {None: array.units}))}
],
extract_units(data_array),
)
actual = data_array.loc[{"x": values}]
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.xfail(reason="indexes don't support units")
@pytest.mark.parametrize(
"raw_values",
(
pytest.param(10, id="single_value"),
pytest.param([10, 5, 13], id="list_of_values"),
pytest.param(np.array([9, 3, 7, 12]), id="array_of_values"),
),
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, KeyError, id="no_units"),
pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"),
pytest.param(unit_registry.degree, KeyError, id="incompatible_unit"),
pytest.param(unit_registry.dm, KeyError, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_drop_sel(self, raw_values, unit, error, dtype):
array = np.linspace(5, 10, 20).astype(dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.m
data_array = xr.DataArray(data=array, coords={"x": x}, dims="x")
values = raw_values * unit
if error is not None and not (
isinstance(raw_values, (int, float)) and x.check(unit)
):
with pytest.raises(error):
data_array.drop_sel(x=values)
return
expected = attach_units(
strip_units(data_array).drop_sel(
x=strip_units(convert_units(values, {None: x.units}))
),
extract_units(data_array),
)
actual = data_array.drop_sel(x=values)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"shape",
(
pytest.param((10, 20), id="nothing_squeezable"),
pytest.param((10, 20, 1), id="last_dimension_squeezable"),
pytest.param((10, 1, 20), id="middle_dimension_squeezable"),
pytest.param((1, 10, 20), id="first_dimension_squeezable"),
pytest.param((1, 10, 1, 20), id="first_and_last_dimension_squeezable"),
),
)
def test_squeeze(self, shape, dtype):
names = "xyzt"
coords = {
name: np.arange(length).astype(dtype)
* (unit_registry.m if name != "t" else unit_registry.s)
for name, length in zip(names, shape)
}
array = np.arange(10 * 20).astype(dtype).reshape(shape) * unit_registry.J
data_array = xr.DataArray(
data=array, coords=coords, dims=tuple(names[: len(shape)])
)
expected = attach_units(
strip_units(data_array).squeeze(), extract_units(data_array)
)
actual = data_array.squeeze()
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
# try squeezing the dimensions separately
names = tuple(dim for dim, coord in coords.items() if len(coord) == 1)
for index, name in enumerate(names):
expected = attach_units(
strip_units(data_array).squeeze(dim=name), extract_units(data_array)
)
actual = data_array.squeeze(dim=name)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"func",
(method("head", x=7, y=3), method("tail", x=7, y=3), method("thin", x=7, y=3)),
ids=repr,
)
def test_head_tail_thin(self, func, dtype):
array = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_registry.degK
coords = {
"x": np.arange(10) * unit_registry.m,
"y": np.arange(5) * unit_registry.m,
}
data_array = xr.DataArray(data=array, coords=coords, dims=("x", "y"))
expected = attach_units(
func(strip_units(data_array)), extract_units(data_array)
)
actual = func(data_array)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize("variant", ("data", "coords"))
@pytest.mark.parametrize(
"func",
(
pytest.param(
method("interp"), marks=pytest.mark.xfail(reason="uses scipy")
),
method("reindex"),
),
ids=repr,
)
def test_interp_reindex(self, variant, func, dtype):
variants = {
"data": (unit_registry.m, 1),
"coords": (1, unit_registry.m),
}
data_unit, coord_unit = variants.get(variant)
array = np.linspace(1, 2, 10).astype(dtype) * data_unit
y = np.arange(10) * coord_unit
x = np.arange(10)
new_x = np.arange(10) + 0.5
data_array = xr.DataArray(array, coords={"x": x, "y": ("x", y)}, dims="x")
units = extract_units(data_array)
expected = attach_units(func(strip_units(data_array), x=new_x), units)
actual = func(data_array, x=new_x)
assert_units_equal(expected, actual)
xr.testing.assert_allclose(expected, actual)
@pytest.mark.xfail(reason="indexes don't support units")
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"func", (method("interp"), method("reindex")), ids=repr,
)
def test_interp_reindex_indexing(self, func, unit, error, dtype):
array = np.linspace(1, 2, 10).astype(dtype)
x = np.arange(10) * unit_registry.m
new_x = (np.arange(10) + 0.5) * unit
data_array = xr.DataArray(array, coords={"x": x}, dims="x")
if error is not None:
with pytest.raises(error):
func(data_array, x=new_x)
return
units = extract_units(data_array)
expected = attach_units(
func(
strip_units(data_array),
x=strip_units(convert_units(new_x, {None: unit_registry.m})),
),
units,
)
actual = func(data_array, x=new_x)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize("variant", ("data", "coords"))
@pytest.mark.parametrize(
"func",
(
pytest.param(
method("interp_like"), marks=pytest.mark.xfail(reason="uses scipy")
),
method("reindex_like"),
),
ids=repr,
)
def test_interp_reindex_like(self, variant, func, dtype):
variants = {
"data": (unit_registry.m, 1),
"coords": (1, unit_registry.m),
}
data_unit, coord_unit = variants.get(variant)
array = np.linspace(1, 2, 10).astype(dtype) * data_unit
coord = np.arange(10) * coord_unit
x = np.arange(10)
new_x = np.arange(-2, 2) + 0.5
data_array = xr.DataArray(array, coords={"x": x, "y": ("x", coord)}, dims="x")
other = xr.DataArray(np.empty_like(new_x), coords={"x": new_x}, dims="x")
units = extract_units(data_array)
expected = attach_units(func(strip_units(data_array), other), units)
actual = func(data_array, other)
assert_units_equal(expected, actual)
xr.testing.assert_allclose(expected, actual)
@pytest.mark.xfail(reason="indexes don't support units")
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"func", (method("interp_like"), method("reindex_like")), ids=repr,
)
def test_interp_reindex_like_indexing(self, func, unit, error, dtype):
array = np.linspace(1, 2, 10).astype(dtype)
x = np.arange(10) * unit_registry.m
new_x = (np.arange(-2, 2) + 0.5) * unit
data_array = xr.DataArray(array, coords={"x": x}, dims="x")
other = xr.DataArray(np.empty_like(new_x), {"x": new_x}, dims="x")
if error is not None:
with pytest.raises(error):
func(data_array, other)
return
units = extract_units(data_array)
expected = attach_units(
func(
strip_units(data_array),
strip_units(convert_units(other, {None: unit_registry.m})),
),
units,
)
actual = func(data_array, other)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"func",
(method("unstack"), method("reset_index", "z"), method("reorder_levels")),
ids=repr,
)
def test_stacking_stacked(self, func, dtype):
array = (
np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * unit_registry.m
)
x = np.arange(array.shape[0])
y = np.arange(array.shape[1])
data_array = xr.DataArray(
name="data", data=array, coords={"x": x, "y": y}, dims=("x", "y")
)
stacked = data_array.stack(z=("x", "y"))
expected = attach_units(func(strip_units(stacked)), {"data": unit_registry.m})
actual = func(stacked)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.xfail(reason="indexes don't support units")
def test_to_unstacked_dataset(self, dtype):
array = (
np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype)
* unit_registry.pascal
)
x = np.arange(array.shape[0]) * unit_registry.m
y = np.arange(array.shape[1]) * unit_registry.s
data_array = xr.DataArray(
data=array, coords={"x": x, "y": y}, dims=("x", "y")
).stack(z=("x", "y"))
func = method("to_unstacked_dataset", dim="z")
expected = attach_units(
func(strip_units(data_array)),
{"y": y.units, **dict(zip(x.magnitude, [array.units] * len(y)))},
).rename({elem.magnitude: elem for elem in x})
actual = func(data_array)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("transpose", "y", "x", "z"),
method("stack", a=("x", "y")),
method("set_index", x="x2"),
method("shift", x=2),
method("roll", x=2, roll_coords=False),
method("sortby", "x2"),
),
ids=repr,
)
def test_stacking_reordering(self, func, dtype):
array = (
np.linspace(0, 10, 2 * 5 * 10).reshape(2, 5, 10).astype(dtype)
* unit_registry.m
)
x = np.arange(array.shape[0])
y = np.arange(array.shape[1])
z = np.arange(array.shape[2])
x2 = np.linspace(0, 1, array.shape[0])[::-1]
data_array = xr.DataArray(
name="data",
data=array,
coords={"x": x, "y": y, "z": z, "x2": ("x", x2)},
dims=("x", "y", "z"),
)
expected = attach_units(func(strip_units(data_array)), {None: unit_registry.m})
actual = func(data_array)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("diff", dim="x"),
method("differentiate", coord="x"),
method("integrate", dim="x"),
pytest.param(
method("quantile", q=[0.25, 0.75]),
marks=pytest.mark.xfail(
LooseVersion(pint.__version__) < "0.12",
reason="quantile / nanquantile not implemented yet",
),
),
method("reduce", func=np.sum, dim="x"),
pytest.param(lambda x: x.dot(x), id="method_dot"),
),
ids=repr,
)
def test_computation(self, func, dtype):
array = (
np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * unit_registry.m
)
x = np.arange(array.shape[0]) * unit_registry.m
y = np.arange(array.shape[1]) * unit_registry.s
data_array = xr.DataArray(data=array, coords={"x": x, "y": y}, dims=("x", "y"))
# we want to make sure the output unit is correct
units = {
**extract_units(data_array),
**(
{}
if isinstance(func, (function, method))
else extract_units(func(array.reshape(-1)))
),
}
expected = attach_units(func(strip_units(data_array)), units)
actual = func(data_array)
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("groupby", "x"),
method("groupby_bins", "y", bins=4),
method("coarsen", y=2),
pytest.param(
method("rolling", y=3),
marks=pytest.mark.xfail(
reason="numpy.lib.stride_tricks.as_strided converts to ndarray"
),
),
pytest.param(
method("rolling_exp", y=3),
marks=pytest.mark.xfail(reason="units not supported by numbagg"),
),
),
ids=repr,
)
def test_computation_objects(self, func, dtype):
array = (
np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * unit_registry.m
)
x = np.array([0, 0, 1, 2, 2]) * unit_registry.m
y = np.arange(array.shape[1]) * 3 * unit_registry.s
data_array = xr.DataArray(data=array, coords={"x": x, "y": y}, dims=("x", "y"))
units = extract_units(data_array)
expected = attach_units(func(strip_units(data_array)).mean(), units)
actual = func(data_array).mean()
assert_units_equal(expected, actual)
xr.testing.assert_allclose(expected, actual)
def test_resample(self, dtype):
array = np.linspace(0, 5, 10).astype(dtype) * unit_registry.m
time = pd.date_range("10-09-2010", periods=len(array), freq="1y")
data_array = xr.DataArray(data=array, coords={"time": time}, dims="time")
units = extract_units(data_array)
func = method("resample", time="6m")
expected = attach_units(func(strip_units(data_array)).mean(), units)
actual = func(data_array).mean()
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("assign_coords", z=(["x"], np.arange(5) * unit_registry.s)),
method("first"),
method("last"),
pytest.param(
method("quantile", q=[0.25, 0.5, 0.75], dim="x"),
marks=pytest.mark.xfail(
LooseVersion(pint.__version__) < "0.12",
reason="quantile / nanquantile not implemented yet",
),
),
),
ids=repr,
)
def test_grouped_operations(self, func, dtype):
array = (
np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * unit_registry.m
)
x = np.arange(array.shape[0]) * unit_registry.m
y = np.arange(array.shape[1]) * 3 * unit_registry.s
data_array = xr.DataArray(data=array, coords={"x": x, "y": y}, dims=("x", "y"))
units = {**extract_units(data_array), **{"z": unit_registry.s, "q": None}}
stripped_kwargs = {
key: (
strip_units(value)
if not isinstance(value, tuple)
else tuple(strip_units(elem) for elem in value)
)
for key, value in func.kwargs.items()
}
expected = attach_units(
func(strip_units(data_array).groupby("y"), **stripped_kwargs), units
)
actual = func(data_array.groupby("y"))
assert_units_equal(expected, actual)
xr.testing.assert_identical(expected, actual)
class TestDataset:
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.mm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="same_unit"),
),
)
@pytest.mark.parametrize(
"shared",
(
"nothing",
pytest.param("dims", marks=pytest.mark.xfail(reason="indexes strip units")),
pytest.param(
"coords",
marks=pytest.mark.xfail(reason="reindex does not work with pint yet"),
),
),
)
def test_init(self, shared, unit, error, dtype):
original_unit = unit_registry.m
scaled_unit = unit_registry.mm
a = np.linspace(0, 1, 10).astype(dtype) * unit_registry.Pa
b = np.linspace(-1, 0, 12).astype(dtype) * unit_registry.Pa
raw_x = np.arange(a.shape[0])
x = raw_x * original_unit
x2 = x.to(scaled_unit)
raw_y = np.arange(b.shape[0])
y = raw_y * unit
y_units = unit if isinstance(y, unit_registry.Quantity) else None
if isinstance(y, unit_registry.Quantity):
if y.check(scaled_unit):
y2 = y.to(scaled_unit)
else:
y2 = y * 1000
y2_units = y2.units
else:
y2 = y * 1000
y2_units = None
variants = {
"nothing": ({"x": x, "x2": ("x", x2)}, {"y": y, "y2": ("y", y2)}),
"dims": (
{"x": x, "x2": ("x", strip_units(x2))},
{"x": y, "y2": ("x", strip_units(y2))},
),
"coords": ({"x": raw_x, "y": ("x", x2)}, {"x": raw_y, "y": ("x", y2)}),
}
coords_a, coords_b = variants.get(shared)
dims_a, dims_b = ("x", "y") if shared == "nothing" else ("x", "x")
arr1 = xr.DataArray(data=a, coords=coords_a, dims=dims_a)
arr2 = xr.DataArray(data=b, coords=coords_b, dims=dims_b)
if error is not None and shared != "nothing":
with pytest.raises(error):
xr.Dataset(data_vars={"a": arr1, "b": arr2})
return
actual = xr.Dataset(data_vars={"a": arr1, "b": arr2})
expected_units = {
"a": a.units,
"b": b.units,
"x": x.units,
"x2": x2.units,
"y": y_units,
"y2": y2_units,
}
expected = attach_units(
xr.Dataset(data_vars={"a": strip_units(arr1), "b": strip_units(arr2)}),
expected_units,
)
assert_equal_with_units(actual, expected)
@pytest.mark.parametrize(
"func", (pytest.param(str, id="str"), pytest.param(repr, id="repr"))
)
@pytest.mark.parametrize(
"variant",
(
pytest.param(
"with_dims",
marks=pytest.mark.xfail(reason="units in indexes are not supported"),
),
pytest.param("with_coords"),
pytest.param("without_coords"),
),
)
@pytest.mark.filterwarnings("error:::pint[.*]")
def test_repr(self, func, variant, dtype):
array1 = np.linspace(1, 2, 10, dtype=dtype) * unit_registry.Pa
array2 = np.linspace(0, 1, 10, dtype=dtype) * unit_registry.degK
x = np.arange(len(array1)) * unit_registry.s
y = x.to(unit_registry.ms)
variants = {
"with_dims": {"x": x},
"with_coords": {"y": ("x", y)},
"without_coords": {},
}
data_array = xr.Dataset(
data_vars={"a": ("x", array1), "b": ("x", array2)},
coords=variants.get(variant),
)
# FIXME: this just checks that the repr does not raise
# warnings or errors, but does not check the result
func(data_array)
@pytest.mark.parametrize(
"func",
(
pytest.param(
function("all"),
marks=pytest.mark.xfail(reason="not implemented by pint"),
),
pytest.param(
function("any"),
marks=pytest.mark.xfail(reason="not implemented by pint"),
),
function("argmax"),
function("argmin"),
function("max"),
function("min"),
function("mean"),
pytest.param(
function("median"),
marks=pytest.mark.xfail(
reason="np.median does not work with dataset yet"
),
),
function("sum"),
pytest.param(
function("prod"),
marks=pytest.mark.xfail(reason="not implemented by pint"),
),
function("std"),
function("var"),
function("cumsum"),
pytest.param(
function("cumprod"),
marks=pytest.mark.xfail(reason="fails within xarray"),
),
pytest.param(
method("all"), marks=pytest.mark.xfail(reason="not implemented by pint")
),
pytest.param(
method("any"), marks=pytest.mark.xfail(reason="not implemented by pint")
),
method("argmax"),
method("argmin"),
method("max"),
method("min"),
method("mean"),
method("median"),
method("sum"),
pytest.param(
method("prod"),
marks=pytest.mark.xfail(reason="not implemented by pint"),
),
method("std"),
method("var"),
method("cumsum"),
pytest.param(
method("cumprod"), marks=pytest.mark.xfail(reason="fails within xarray")
),
),
ids=repr,
)
def test_aggregation(self, func, dtype):
unit_a = (
unit_registry.Pa if func.name != "cumprod" else unit_registry.dimensionless
)
unit_b = (
unit_registry.kg / unit_registry.m ** 3
if func.name != "cumprod"
else unit_registry.dimensionless
)
a = xr.DataArray(data=np.linspace(0, 1, 10).astype(dtype) * unit_a, dims="x")
b = xr.DataArray(data=np.linspace(-1, 0, 10).astype(dtype) * unit_b, dims="x")
x = xr.DataArray(data=np.arange(10).astype(dtype) * unit_registry.m, dims="x")
y = xr.DataArray(
data=np.arange(10, 20).astype(dtype) * unit_registry.s, dims="x"
)
ds = xr.Dataset(data_vars={"a": a, "b": b}, coords={"x": x, "y": y})
actual = func(ds)
expected = attach_units(
func(strip_units(ds)),
{
"a": extract_units(func(a)).get(None),
"b": extract_units(func(b)).get(None),
},
)
assert_equal_with_units(actual, expected)
@pytest.mark.parametrize("property", ("imag", "real"))
def test_numpy_properties(self, property, dtype):
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(
data=np.linspace(0, 1, 10) * unit_registry.Pa, dims="x"
),
"b": xr.DataArray(
data=np.linspace(-1, 0, 15) * unit_registry.Pa, dims="y"
),
},
coords={
"x": np.arange(10) * unit_registry.m,
"y": np.arange(15) * unit_registry.s,
},
)
units = extract_units(ds)
actual = getattr(ds, property)
expected = attach_units(getattr(strip_units(ds), property), units)
assert_equal_with_units(actual, expected)
@pytest.mark.parametrize(
"func",
(
method("astype", float),
method("conj"),
method("argsort"),
method("conjugate"),
method("round"),
),
ids=repr,
)
def test_numpy_methods(self, func, dtype):
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(
data=np.linspace(1, -1, 10) * unit_registry.Pa, dims="x"
),
"b": xr.DataArray(
data=np.linspace(-1, 1, 15) * unit_registry.Pa, dims="y"
),
},
coords={
"x": np.arange(10) * unit_registry.m,
"y": np.arange(15) * unit_registry.s,
},
)
units = {
"a": array_extract_units(func(ds.a)),
"b": array_extract_units(func(ds.b)),
"x": unit_registry.m,
"y": unit_registry.s,
}
actual = func(ds)
expected = attach_units(func(strip_units(ds)), units)
assert_equal_with_units(actual, expected)
@pytest.mark.parametrize("func", (method("clip", min=3, max=8),), ids=repr)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_numpy_methods_with_args(self, func, unit, error, dtype):
data_unit = unit_registry.m
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=np.arange(10) * data_unit, dims="x"),
"b": xr.DataArray(data=np.arange(15) * data_unit, dims="y"),
},
coords={
"x": np.arange(10) * unit_registry.m,
"y": np.arange(15) * unit_registry.s,
},
)
units = extract_units(ds)
kwargs = {
key: (value * unit if isinstance(value, (int, float)) else value)
for key, value in func.kwargs.items()
}
if error is not None:
with pytest.raises(error):
func(ds, **kwargs)
return
stripped_kwargs = {
key: strip_units(convert_units(value, {None: data_unit}))
for key, value in kwargs.items()
}
actual = func(ds, **kwargs)
expected = attach_units(func(strip_units(ds), **stripped_kwargs), units)
assert_equal_with_units(actual, expected)
@pytest.mark.parametrize(
"func", (method("isnull"), method("notnull"), method("count")), ids=repr
)
def test_missing_value_detection(self, func, dtype):
array1 = (
np.array(
[
[1.4, 2.3, np.nan, 7.2],
[np.nan, 9.7, np.nan, np.nan],
[2.1, np.nan, np.nan, 4.6],
[9.9, np.nan, 7.2, 9.1],
]
)
* unit_registry.degK
)
array2 = (
np.array(
[
[np.nan, 5.7, 12.0, 7.2],
[np.nan, 12.4, np.nan, 4.2],
[9.8, np.nan, 4.6, 1.4],
[7.2, np.nan, 6.3, np.nan],
[8.4, 3.9, np.nan, np.nan],
]
)
* unit_registry.Pa
)
x = np.arange(array1.shape[0]) * unit_registry.m
y = np.arange(array1.shape[1]) * unit_registry.m
z = np.arange(array2.shape[0]) * unit_registry.m
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("x", "y")),
"b": xr.DataArray(data=array2, dims=("z", "x")),
},
coords={"x": x, "y": y, "z": z},
)
expected = func(strip_units(ds))
actual = func(ds)
assert_equal_with_units(expected, actual)
@pytest.mark.xfail(reason="ffill and bfill lose the unit")
@pytest.mark.parametrize("func", (method("ffill"), method("bfill")), ids=repr)
def test_missing_value_filling(self, func, dtype):
array1 = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.degK
)
array2 = (
np.array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan]).astype(dtype)
* unit_registry.Pa
)
x = np.arange(len(array1))
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims="x"),
"b": xr.DataArray(data=array2, dims="x"),
},
coords={"x": x},
)
expected = attach_units(
func(strip_units(ds), dim="x"),
{"a": unit_registry.degK, "b": unit_registry.Pa},
)
actual = func(ds, dim="x")
assert_equal_with_units(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(
unit_registry.cm,
None,
id="compatible_unit",
marks=pytest.mark.xfail(
reason="where converts the array, not the fill value"
),
),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"fill_value",
(
pytest.param(-1, id="python_scalar"),
pytest.param(np.array(-1), id="numpy_scalar"),
pytest.param(np.array([-1]), id="numpy_array"),
),
)
def test_fillna(self, fill_value, unit, error, dtype):
array1 = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.m
)
array2 = (
np.array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan]).astype(dtype)
* unit_registry.m
)
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims="x"),
"b": xr.DataArray(data=array2, dims="x"),
}
)
if error is not None:
with pytest.raises(error):
ds.fillna(value=fill_value * unit)
return
actual = ds.fillna(value=fill_value * unit)
expected = attach_units(
strip_units(ds).fillna(
value=strip_units(
convert_units(fill_value * unit, {None: unit_registry.m})
)
),
{"a": unit_registry.m, "b": unit_registry.m},
)
assert_equal_with_units(expected, actual)
def test_dropna(self, dtype):
array1 = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.degK
)
array2 = (
np.array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan]).astype(dtype)
* unit_registry.Pa
)
x = np.arange(len(array1))
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims="x"),
"b": xr.DataArray(data=array2, dims="x"),
},
coords={"x": x},
)
expected = attach_units(
strip_units(ds).dropna(dim="x"),
{"a": unit_registry.degK, "b": unit_registry.Pa},
)
actual = ds.dropna(dim="x")
assert_equal_with_units(expected, actual)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="same_unit"),
),
)
def test_isin(self, unit, dtype):
array1 = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.m
)
array2 = (
np.array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan]).astype(dtype)
* unit_registry.m
)
x = np.arange(len(array1))
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims="x"),
"b": xr.DataArray(data=array2, dims="x"),
},
coords={"x": x},
)
raw_values = np.array([1.4, np.nan, 2.3]).astype(dtype)
values = raw_values * unit
if (
isinstance(values, unit_registry.Quantity)
and values.check(unit_registry.m)
and unit != unit_registry.m
):
raw_values = values.to(unit_registry.m).magnitude
expected = strip_units(ds).isin(raw_values)
if not isinstance(values, unit_registry.Quantity) or not values.check(
unit_registry.m
):
expected.a[:] = False
expected.b[:] = False
actual = ds.isin(values)
assert_equal_with_units(actual, expected)
@pytest.mark.parametrize(
"variant", ("masking", "replacing_scalar", "replacing_array", "dropping")
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="same_unit"),
),
)
def test_where(self, variant, unit, error, dtype):
original_unit = unit_registry.m
array1 = np.linspace(0, 1, 10).astype(dtype) * original_unit
array2 = np.linspace(-1, 0, 10).astype(dtype) * original_unit
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims="x"),
"b": xr.DataArray(data=array2, dims="x"),
},
coords={"x": np.arange(len(array1))},
)
condition = ds < 0.5 * original_unit
other = np.linspace(-2, -1, 10).astype(dtype) * unit
variant_kwargs = {
"masking": {"cond": condition},
"replacing_scalar": {"cond": condition, "other": -1 * unit},
"replacing_array": {"cond": condition, "other": other},
"dropping": {"cond": condition, "drop": True},
}
kwargs = variant_kwargs.get(variant)
if variant not in ("masking", "dropping") and error is not None:
with pytest.raises(error):
ds.where(**kwargs)
return
kwargs_without_units = {
key: strip_units(convert_units(value, {None: original_unit}))
for key, value in kwargs.items()
}
expected = attach_units(
strip_units(ds).where(**kwargs_without_units),
{"a": original_unit, "b": original_unit},
)
actual = ds.where(**kwargs)
assert_equal_with_units(expected, actual)
@pytest.mark.xfail(reason="interpolate strips units")
def test_interpolate_na(self, dtype):
array1 = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.degK
)
array2 = (
np.array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan]).astype(dtype)
* unit_registry.Pa
)
x = np.arange(len(array1))
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims="x"),
"b": xr.DataArray(data=array2, dims="x"),
},
coords={"x": x},
)
expected = attach_units(
strip_units(ds).interpolate_na(dim="x"),
{"a": unit_registry.degK, "b": unit_registry.Pa},
)
actual = ds.interpolate_na(dim="x")
assert_equal_with_units(expected, actual)
@pytest.mark.xfail(reason="wrong argument order for `where`")
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="same_unit"),
),
)
def test_combine_first(self, unit, error, dtype):
array1 = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.m
)
array2 = (
np.array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan]).astype(dtype)
* unit_registry.m
)
x = np.arange(len(array1))
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims="x"),
"b": xr.DataArray(data=array2, dims="x"),
},
coords={"x": x},
)
other_array1 = np.ones_like(array1) * unit
other_array2 = -1 * np.ones_like(array2) * unit
other = xr.Dataset(
data_vars={
"a": xr.DataArray(data=other_array1, dims="x"),
"b": xr.DataArray(data=other_array2, dims="x"),
},
coords={"x": np.arange(array1.shape[0])},
)
if error is not None:
with pytest.raises(error):
ds.combine_first(other)
return
expected = attach_units(
strip_units(ds).combine_first(
strip_units(
convert_units(other, {"a": unit_registry.m, "b": unit_registry.m})
)
),
{"a": unit_registry.m, "b": unit_registry.m},
)
actual = ds.combine_first(other)
assert_equal_with_units(expected, actual)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"variation",
(
"data",
pytest.param(
"dims", marks=pytest.mark.xfail(reason="units in indexes not supported")
),
"coords",
),
)
@pytest.mark.parametrize("func", (method("equals"), method("identical")), ids=repr)
def test_comparisons(self, func, variation, unit, dtype):
def is_compatible(a, b):
a = a if a is not None else 1
b = b if b is not None else 1
quantity = np.arange(5) * a
return a == b or quantity.check(b)
array1 = np.linspace(0, 5, 10).astype(dtype)
array2 = np.linspace(-5, 0, 10).astype(dtype)
coord = np.arange(len(array1)).astype(dtype)
original_unit = unit_registry.m
quantity1 = array1 * original_unit
quantity2 = array2 * original_unit
x = coord * original_unit
y = coord * original_unit
units = {"data": (unit, 1, 1), "dims": (1, unit, 1), "coords": (1, 1, unit)}
data_unit, dim_unit, coord_unit = units.get(variation)
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=quantity1, dims="x"),
"b": xr.DataArray(data=quantity2, dims="x"),
},
coords={"x": x, "y": ("x", y)},
)
other_units = {
"a": data_unit if quantity1.check(data_unit) else None,
"b": data_unit if quantity2.check(data_unit) else None,
"x": dim_unit if x.check(dim_unit) else None,
"y": coord_unit if y.check(coord_unit) else None,
}
other = attach_units(strip_units(convert_units(ds, other_units)), other_units)
units = extract_units(ds)
other_units = extract_units(other)
equal_ds = all(
is_compatible(units[name], other_units[name]) for name in units.keys()
) and (strip_units(ds).equals(strip_units(convert_units(other, units))))
equal_units = units == other_units
expected = equal_ds and (func.name != "identical" or equal_units)
actual = func(ds, other)
assert expected == actual
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
def test_broadcast_like(self, unit, dtype):
array1 = np.linspace(1, 2, 2 * 1).reshape(2, 1).astype(dtype) * unit_registry.Pa
array2 = np.linspace(0, 1, 2 * 3).reshape(2, 3).astype(dtype) * unit_registry.Pa
x1 = np.arange(2) * unit_registry.m
x2 = np.arange(2) * unit
y1 = np.array([0]) * unit_registry.m
y2 = np.arange(3) * unit
ds1 = xr.Dataset(
data_vars={"a": (("x", "y"), array1)}, coords={"x": x1, "y": y1}
)
ds2 = xr.Dataset(
data_vars={"a": (("x", "y"), array2)}, coords={"x": x2, "y": y2}
)
expected = attach_units(
strip_units(ds1).broadcast_like(strip_units(ds2)), extract_units(ds1)
)
actual = ds1.broadcast_like(ds2)
assert_equal_with_units(expected, actual)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
def test_broadcast_equals(self, unit, dtype):
left_array1 = np.ones(shape=(2, 3), dtype=dtype) * unit_registry.m
left_array2 = np.zeros(shape=(3, 6), dtype=dtype) * unit_registry.m
right_array1 = np.ones(shape=(2,)) * unit
right_array2 = np.ones(shape=(3,)) * unit
left = xr.Dataset(
data_vars={
"a": xr.DataArray(data=left_array1, dims=("x", "y")),
"b": xr.DataArray(data=left_array2, dims=("y", "z")),
}
)
right = xr.Dataset(
data_vars={
"a": xr.DataArray(data=right_array1, dims="x"),
"b": xr.DataArray(data=right_array2, dims="y"),
}
)
units = {
**extract_units(left),
**({} if left_array1.check(unit) else {"a": None, "b": None}),
}
expected = strip_units(left).broadcast_equals(
strip_units(convert_units(right, units))
) & left_array1.check(unit)
actual = left.broadcast_equals(right)
assert expected == actual
@pytest.mark.parametrize(
"func",
(method("unstack"), method("reset_index", "v"), method("reorder_levels")),
ids=repr,
)
def test_stacking_stacked(self, func, dtype):
array1 = (
np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * unit_registry.m
)
array2 = (
np.linspace(-10, 0, 5 * 10 * 15).reshape(5, 10, 15).astype(dtype)
* unit_registry.m
)
x = np.arange(array1.shape[0])
y = np.arange(array1.shape[1])
z = np.arange(array2.shape[2])
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("x", "y")),
"b": xr.DataArray(data=array2, dims=("x", "y", "z")),
},
coords={"x": x, "y": y, "z": z},
)
stacked = ds.stack(v=("x", "y"))
expected = attach_units(
func(strip_units(stacked)), {"a": unit_registry.m, "b": unit_registry.m}
)
actual = func(stacked)
assert_equal_with_units(expected, actual)
@pytest.mark.xfail(reason="does not work with quantities yet")
def test_to_stacked_array(self, dtype):
labels = np.arange(5).astype(dtype) * unit_registry.s
arrays = {name: np.linspace(0, 1, 10) * unit_registry.m for name in labels}
ds = xr.Dataset(
data_vars={
name: xr.DataArray(data=array, dims="x")
for name, array in arrays.items()
}
)
func = method("to_stacked_array", "z", variable_dim="y", sample_dims=["x"])
actual = func(ds).rename(None)
expected = attach_units(
func(strip_units(ds)).rename(None),
{None: unit_registry.m, "y": unit_registry.s},
)
assert_equal_with_units(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("transpose", "y", "x", "z1", "z2"),
method("stack", a=("x", "y")),
method("set_index", x="x2"),
pytest.param(
method("shift", x=2),
marks=pytest.mark.xfail(reason="tries to concatenate nan arrays"),
),
method("roll", x=2, roll_coords=False),
method("sortby", "x2"),
),
ids=repr,
)
def test_stacking_reordering(self, func, dtype):
array1 = (
np.linspace(0, 10, 2 * 5 * 10).reshape(2, 5, 10).astype(dtype)
* unit_registry.Pa
)
array2 = (
np.linspace(0, 10, 2 * 5 * 15).reshape(2, 5, 15).astype(dtype)
* unit_registry.degK
)
x = np.arange(array1.shape[0])
y = np.arange(array1.shape[1])
z1 = np.arange(array1.shape[2])
z2 = np.arange(array2.shape[2])
x2 = np.linspace(0, 1, array1.shape[0])[::-1]
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("x", "y", "z1")),
"b": xr.DataArray(data=array2, dims=("x", "y", "z2")),
},
coords={"x": x, "y": y, "z1": z1, "z2": z2, "x2": ("x", x2)},
)
expected = attach_units(
func(strip_units(ds)), {"a": unit_registry.Pa, "b": unit_registry.degK}
)
actual = func(ds)
assert_equal_with_units(expected, actual)
@pytest.mark.xfail(reason="indexes strip units")
@pytest.mark.parametrize(
"indices",
(
pytest.param(4, id="single index"),
pytest.param([5, 2, 9, 1], id="multiple indices"),
),
)
def test_isel(self, indices, dtype):
array1 = np.arange(10).astype(dtype) * unit_registry.s
array2 = np.linspace(0, 1, 10).astype(dtype) * unit_registry.Pa
x = np.arange(len(array1)) * unit_registry.m
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims="x"),
"b": xr.DataArray(data=array2, dims="x"),
},
coords={"x": x},
)
expected = attach_units(
strip_units(ds).isel(x=indices),
{"a": unit_registry.s, "b": unit_registry.Pa, "x": unit_registry.m},
)
actual = ds.isel(x=indices)
assert_equal_with_units(expected, actual)
@pytest.mark.xfail(reason="indexes don't support units")
@pytest.mark.parametrize(
"raw_values",
(
pytest.param(10, id="single_value"),
pytest.param([10, 5, 13], id="list_of_values"),
pytest.param(np.array([9, 3, 7, 12]), id="array_of_values"),
),
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, KeyError, id="no_units"),
pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"),
pytest.param(unit_registry.degree, KeyError, id="incompatible_unit"),
pytest.param(unit_registry.dm, KeyError, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_sel(self, raw_values, unit, error, dtype):
array1 = np.linspace(5, 10, 20).astype(dtype) * unit_registry.degK
array2 = np.linspace(0, 5, 20).astype(dtype) * unit_registry.Pa
x = np.arange(len(array1)) * unit_registry.m
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims="x"),
"b": xr.DataArray(data=array2, dims="x"),
},
coords={"x": x},
)
values = raw_values * unit
if error is not None and not (
isinstance(raw_values, (int, float)) and x.check(unit)
):
with pytest.raises(error):
ds.sel(x=values)
return
expected = attach_units(
strip_units(ds).sel(x=strip_units(convert_units(values, {None: x.units}))),
{"a": array1.units, "b": array2.units, "x": x.units},
)
actual = ds.sel(x=values)
assert_equal_with_units(expected, actual)
@pytest.mark.xfail(reason="indexes don't support units")
@pytest.mark.parametrize(
"raw_values",
(
pytest.param(10, id="single_value"),
pytest.param([10, 5, 13], id="list_of_values"),
pytest.param(np.array([9, 3, 7, 12]), id="array_of_values"),
),
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, KeyError, id="no_units"),
pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"),
pytest.param(unit_registry.degree, KeyError, id="incompatible_unit"),
pytest.param(unit_registry.dm, KeyError, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_drop_sel(self, raw_values, unit, error, dtype):
array1 = np.linspace(5, 10, 20).astype(dtype) * unit_registry.degK
array2 = np.linspace(0, 5, 20).astype(dtype) * unit_registry.Pa
x = np.arange(len(array1)) * unit_registry.m
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims="x"),
"b": xr.DataArray(data=array2, dims="x"),
},
coords={"x": x},
)
values = raw_values * unit
if error is not None and not (
isinstance(raw_values, (int, float)) and x.check(unit)
):
with pytest.raises(error):
ds.drop_sel(x=values)
return
expected = attach_units(
strip_units(ds).drop_sel(
x=strip_units(convert_units(values, {None: x.units}))
),
extract_units(ds),
)
actual = ds.drop_sel(x=values)
assert_equal_with_units(expected, actual)
@pytest.mark.xfail(reason="indexes don't support units")
@pytest.mark.parametrize(
"raw_values",
(
pytest.param(10, id="single_value"),
pytest.param([10, 5, 13], id="list_of_values"),
pytest.param(np.array([9, 3, 7, 12]), id="array_of_values"),
),
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, KeyError, id="no_units"),
pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"),
pytest.param(unit_registry.degree, KeyError, id="incompatible_unit"),
pytest.param(unit_registry.dm, KeyError, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_loc(self, raw_values, unit, error, dtype):
array1 = np.linspace(5, 10, 20).astype(dtype) * unit_registry.degK
array2 = np.linspace(0, 5, 20).astype(dtype) * unit_registry.Pa
x = np.arange(len(array1)) * unit_registry.m
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims="x"),
"b": xr.DataArray(data=array2, dims="x"),
},
coords={"x": x},
)
values = raw_values * unit
if error is not None and not (
isinstance(raw_values, (int, float)) and x.check(unit)
):
with pytest.raises(error):
ds.loc[{"x": values}]
return
expected = attach_units(
strip_units(ds).loc[
{"x": strip_units(convert_units(values, {None: x.units}))}
],
{"a": array1.units, "b": array2.units, "x": x.units},
)
actual = ds.loc[{"x": values}]
assert_equal_with_units(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("head", x=7, y=3, z=6),
method("tail", x=7, y=3, z=6),
method("thin", x=7, y=3, z=6),
),
ids=repr,
)
def test_head_tail_thin(self, func, dtype):
array1 = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_registry.degK
array2 = np.linspace(1, 2, 10 * 8).reshape(10, 8) * unit_registry.Pa
coords = {
"x": np.arange(10) * unit_registry.m,
"y": np.arange(5) * unit_registry.m,
"z": np.arange(8) * unit_registry.m,
}
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("x", "y")),
"b": xr.DataArray(data=array2, dims=("x", "z")),
},
coords=coords,
)
expected = attach_units(func(strip_units(ds)), extract_units(ds))
actual = func(ds)
assert_equal_with_units(expected, actual)
@pytest.mark.parametrize(
"shape",
(
pytest.param((10, 20), id="nothing squeezable"),
pytest.param((10, 20, 1), id="last dimension squeezable"),
pytest.param((10, 1, 20), id="middle dimension squeezable"),
pytest.param((1, 10, 20), id="first dimension squeezable"),
pytest.param((1, 10, 1, 20), id="first and last dimension squeezable"),
),
)
def test_squeeze(self, shape, dtype):
names = "xyzt"
coords = {
name: np.arange(length).astype(dtype)
* (unit_registry.m if name != "t" else unit_registry.s)
for name, length in zip(names, shape)
}
array1 = (
np.linspace(0, 1, 10 * 20).astype(dtype).reshape(shape) * unit_registry.degK
)
array2 = (
np.linspace(1, 2, 10 * 20).astype(dtype).reshape(shape) * unit_registry.Pa
)
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=tuple(names[: len(shape)])),
"b": xr.DataArray(data=array2, dims=tuple(names[: len(shape)])),
},
coords=coords,
)
units = extract_units(ds)
expected = attach_units(strip_units(ds).squeeze(), units)
actual = ds.squeeze()
assert_equal_with_units(actual, expected)
# try squeezing the dimensions separately
names = tuple(dim for dim, coord in coords.items() if len(coord) == 1)
for name in names:
expected = attach_units(strip_units(ds).squeeze(dim=name), units)
actual = ds.squeeze(dim=name)
assert_equal_with_units(actual, expected)
@pytest.mark.xfail(reason="ignores units")
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_interp(self, unit, error):
array1 = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_registry.degK
array2 = np.linspace(1, 2, 10 * 8).reshape(10, 8) * unit_registry.Pa
coords = {
"x": np.arange(10) * unit_registry.m,
"y": np.arange(5) * unit_registry.m,
"z": np.arange(8) * unit_registry.s,
}
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("x", "y")),
"b": xr.DataArray(data=array2, dims=("x", "z")),
},
coords=coords,
)
new_coords = (np.arange(10) + 0.5) * unit
if error is not None:
with pytest.raises(error):
ds.interp(x=new_coords)
return
units = extract_units(ds)
expected = attach_units(
strip_units(ds).interp(x=strip_units(convert_units(new_coords, units))),
units,
)
actual = ds.interp(x=new_coords)
assert_equal_with_units(actual, expected)
@pytest.mark.xfail(reason="ignores units")
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_interp_like(self, unit, error, dtype):
array1 = (
np.linspace(0, 10, 10 * 5).reshape(10, 5).astype(dtype) * unit_registry.degK
)
array2 = (
np.linspace(10, 20, 10 * 8).reshape(10, 8).astype(dtype) * unit_registry.Pa
)
coords = {
"x": np.arange(10) * unit_registry.m,
"y": np.arange(5) * unit_registry.m,
"z": np.arange(8) * unit_registry.m,
}
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("x", "y")),
"b": xr.DataArray(data=array2, dims=("x", "z")),
},
coords=coords,
)
other = xr.Dataset(
data_vars={
"c": xr.DataArray(data=np.empty((20, 10)), dims=("x", "y")),
"d": xr.DataArray(data=np.empty((20, 15)), dims=("x", "z")),
},
coords={
"x": (np.arange(20) + 0.3) * unit,
"y": (np.arange(10) - 0.2) * unit,
"z": (np.arange(15) + 0.4) * unit,
},
)
if error is not None:
with pytest.raises(error):
ds.interp_like(other)
return
units = extract_units(ds)
expected = attach_units(
strip_units(ds).interp_like(strip_units(convert_units(other, units))), units
)
actual = ds.interp_like(other)
assert_equal_with_units(actual, expected)
@pytest.mark.xfail(reason="indexes don't support units")
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_reindex(self, unit, error, dtype):
array1 = (
np.linspace(1, 2, 10 * 5).reshape(10, 5).astype(dtype) * unit_registry.degK
)
array2 = (
np.linspace(1, 2, 10 * 8).reshape(10, 8).astype(dtype) * unit_registry.Pa
)
coords = {
"x": np.arange(10) * unit_registry.m,
"y": np.arange(5) * unit_registry.m,
"z": np.arange(8) * unit_registry.s,
}
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("x", "y")),
"b": xr.DataArray(data=array2, dims=("x", "z")),
},
coords=coords,
)
new_coords = (np.arange(10) + 0.5) * unit
if error is not None:
with pytest.raises(error):
ds.reindex(x=new_coords)
return
expected = attach_units(
strip_units(ds).reindex(
x=strip_units(convert_units(new_coords, {None: coords["x"].units}))
),
extract_units(ds),
)
actual = ds.reindex(x=new_coords)
assert_equal_with_units(actual, expected)
@pytest.mark.xfail(reason="indexes don't support units")
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_reindex_like(self, unit, error, dtype):
array1 = (
np.linspace(0, 10, 10 * 5).reshape(10, 5).astype(dtype) * unit_registry.degK
)
array2 = (
np.linspace(10, 20, 10 * 8).reshape(10, 8).astype(dtype) * unit_registry.Pa
)
coords = {
"x": np.arange(10) * unit_registry.m,
"y": np.arange(5) * unit_registry.m,
"z": np.arange(8) * unit_registry.m,
}
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("x", "y")),
"b": xr.DataArray(data=array2, dims=("x", "z")),
},
coords=coords,
)
other = xr.Dataset(
data_vars={
"c": xr.DataArray(data=np.empty((20, 10)), dims=("x", "y")),
"d": xr.DataArray(data=np.empty((20, 15)), dims=("x", "z")),
},
coords={
"x": (np.arange(20) + 0.3) * unit,
"y": (np.arange(10) - 0.2) * unit,
"z": (np.arange(15) + 0.4) * unit,
},
)
if error is not None:
with pytest.raises(error):
ds.reindex_like(other)
return
units = extract_units(ds)
expected = attach_units(
strip_units(ds).reindex_like(strip_units(convert_units(other, units))),
units,
)
actual = ds.reindex_like(other)
assert_equal_with_units(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("diff", dim="x"),
method("differentiate", coord="x"),
method("integrate", coord="x"),
pytest.param(
method("quantile", q=[0.25, 0.75]),
marks=pytest.mark.xfail(reason="nanquantile not implemented"),
),
method("reduce", func=np.sum, dim="x"),
method("map", np.fabs),
),
ids=repr,
)
def test_computation(self, func, dtype):
array1 = (
np.linspace(-5, 5, 10 * 5).reshape(10, 5).astype(dtype) * unit_registry.degK
)
array2 = (
np.linspace(10, 20, 10 * 8).reshape(10, 8).astype(dtype) * unit_registry.Pa
)
x = np.arange(10) * unit_registry.m
y = np.arange(5) * unit_registry.m
z = np.arange(8) * unit_registry.m
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("x", "y")),
"b": xr.DataArray(data=array2, dims=("x", "z")),
},
coords={"x": x, "y": y, "z": z},
)
units = extract_units(ds)
expected = attach_units(func(strip_units(ds)), units)
actual = func(ds)
assert_equal_with_units(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("groupby", "x"),
method("groupby_bins", "x", bins=4),
method("coarsen", x=2),
pytest.param(
method("rolling", x=3), marks=pytest.mark.xfail(reason="strips units")
),
pytest.param(
method("rolling_exp", x=3),
marks=pytest.mark.xfail(reason="uses numbagg which strips units"),
),
),
ids=repr,
)
def test_computation_objects(self, func, dtype):
array1 = (
np.linspace(-5, 5, 10 * 5).reshape(10, 5).astype(dtype) * unit_registry.degK
)
array2 = (
np.linspace(10, 20, 10 * 5 * 8).reshape(10, 5, 8).astype(dtype)
* unit_registry.Pa
)
x = np.arange(10) * unit_registry.m
y = np.arange(5) * unit_registry.m
z = np.arange(8) * unit_registry.m
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("x", "y")),
"b": xr.DataArray(data=array2, dims=("x", "y", "z")),
},
coords={"x": x, "y": y, "z": z},
)
units = extract_units(ds)
args = [] if func.name != "groupby" else ["y"]
reduce_func = method("mean", *args)
expected = attach_units(reduce_func(func(strip_units(ds))), units)
actual = reduce_func(func(ds))
assert_equal_with_units(expected, actual)
def test_resample(self, dtype):
array1 = (
np.linspace(-5, 5, 10 * 5).reshape(10, 5).astype(dtype) * unit_registry.degK
)
array2 = (
np.linspace(10, 20, 10 * 8).reshape(10, 8).astype(dtype) * unit_registry.Pa
)
t = pd.date_range("10-09-2010", periods=array1.shape[0], freq="1y")
y = np.arange(5) * unit_registry.m
z = np.arange(8) * unit_registry.m
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("time", "y")),
"b": xr.DataArray(data=array2, dims=("time", "z")),
},
coords={"time": t, "y": y, "z": z},
)
units = extract_units(ds)
func = method("resample", time="6m")
expected = attach_units(func(strip_units(ds)).mean(), units)
actual = func(ds).mean()
assert_equal_with_units(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("assign", c=lambda ds: 10 * ds.b),
method("assign_coords", v=("x", np.arange(10) * unit_registry.s)),
method("first"),
method("last"),
pytest.param(
method("quantile", q=[0.25, 0.5, 0.75], dim="x"),
marks=pytest.mark.xfail(reason="nanquantile not implemented"),
),
),
ids=repr,
)
def test_grouped_operations(self, func, dtype):
array1 = (
np.linspace(-5, 5, 10 * 5).reshape(10, 5).astype(dtype) * unit_registry.degK
)
array2 = (
np.linspace(10, 20, 10 * 5 * 8).reshape(10, 5, 8).astype(dtype)
* unit_registry.Pa
)
x = np.arange(10) * unit_registry.m
y = np.arange(5) * unit_registry.m
z = np.arange(8) * unit_registry.m
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("x", "y")),
"b": xr.DataArray(data=array2, dims=("x", "y", "z")),
},
coords={"x": x, "y": y, "z": z},
)
units = extract_units(ds)
units.update({"c": unit_registry.Pa, "v": unit_registry.s})
stripped_kwargs = {
name: strip_units(value) for name, value in func.kwargs.items()
}
expected = attach_units(
func(strip_units(ds).groupby("y"), **stripped_kwargs), units
)
actual = func(ds.groupby("y"))
assert_equal_with_units(expected, actual)
@pytest.mark.parametrize(
"func",
(
method("pipe", lambda ds: ds * 10),
method("assign", d=lambda ds: ds.b * 10),
method("assign_coords", y2=("y", np.arange(5) * unit_registry.mm)),
method("assign_attrs", attr1="value"),
method("rename", x2="x_mm"),
method("rename_vars", c="temperature"),
method("rename_dims", x="offset_x"),
method("swap_dims", {"x": "x2"}),
method("expand_dims", v=np.linspace(10, 20, 12) * unit_registry.s, axis=1),
method("drop_vars", "x"),
method("drop_dims", "z"),
method("set_coords", names="c"),
method("reset_coords", names="x2"),
method("copy"),
),
ids=repr,
)
def test_content_manipulation(self, func, dtype):
array1 = (
np.linspace(-5, 5, 10 * 5).reshape(10, 5).astype(dtype)
* unit_registry.m ** 3
)
array2 = (
np.linspace(10, 20, 10 * 5 * 8).reshape(10, 5, 8).astype(dtype)
* unit_registry.Pa
)
array3 = np.linspace(0, 10, 10).astype(dtype) * unit_registry.degK
x = np.arange(10) * unit_registry.m
x2 = x.to(unit_registry.mm)
y = np.arange(5) * unit_registry.m
z = np.arange(8) * unit_registry.m
ds = xr.Dataset(
data_vars={
"a": xr.DataArray(data=array1, dims=("x", "y")),
"b": xr.DataArray(data=array2, dims=("x", "y", "z")),
"c": xr.DataArray(data=array3, dims="x"),
},
coords={"x": x, "y": y, "z": z, "x2": ("x", x2)},
)
units = {
**extract_units(ds),
**{
"y2": unit_registry.mm,
"x_mm": unit_registry.mm,
"offset_x": unit_registry.m,
"d": unit_registry.Pa,
"temperature": unit_registry.degK,
},
}
stripped_kwargs = {
key: strip_units(value) for key, value in func.kwargs.items()
}
expected = attach_units(func(strip_units(ds), **stripped_kwargs), units)
actual = func(ds)
assert_equal_with_units(expected, actual)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, xr.MergeError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, xr.MergeError, id="dimensionless"
),
pytest.param(unit_registry.s, xr.MergeError, id="incompatible_unit"),
pytest.param(unit_registry.cm, xr.MergeError, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"variant",
(
"data",
pytest.param(
"dims", marks=pytest.mark.xfail(reason="indexes don't support units")
),
"coords",
),
)
def test_merge(self, variant, unit, error, dtype):
original_data_unit = unit_registry.m
original_dim_unit = unit_registry.m
original_coord_unit = unit_registry.m
variants = {
"data": (unit, original_dim_unit, original_coord_unit),
"dims": (original_data_unit, unit, original_coord_unit),
"coords": (original_data_unit, original_dim_unit, unit),
}
data_unit, dim_unit, coord_unit = variants.get(variant)
left_array = np.arange(10).astype(dtype) * original_data_unit
right_array = np.arange(-5, 5).astype(dtype) * data_unit
left_dim = np.arange(10, 20) * original_dim_unit
right_dim = np.arange(5, 15) * dim_unit
left_coord = np.arange(-10, 0) * original_coord_unit
right_coord = np.arange(-15, -5) * coord_unit
left = xr.Dataset(
data_vars={"a": ("x", left_array)},
coords={"x": left_dim, "y": ("x", left_coord)},
)
right = xr.Dataset(
data_vars={"a": ("x", right_array)},
coords={"x": right_dim, "y": ("x", right_coord)},
)
units = extract_units(left)
if error is not None:
with pytest.raises(error):
left.merge(right)
return
converted = convert_units(right, units)
expected = attach_units(strip_units(left).merge(strip_units(converted)), units)
actual = left.merge(right)
assert_equal_with_units(expected, actual)
|
apache-2.0
|
manjunaths/tensorflow
|
tensorflow/contrib/learn/python/learn/tests/dataframe/tensorflow_dataframe_test.py
|
18
|
13089
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.dataframe.tensorflow_dataframe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import math
import sys
import tempfile
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import densify
from tensorflow.core.example import example_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def _assert_df_equals_dict(expected_df, actual_dict):
for col in expected_df:
if expected_df[col].dtype in [np.float32, np.float64]:
assertion = np.testing.assert_allclose
else:
assertion = np.testing.assert_array_equal
if expected_df[col].dtype.kind in ["O", "S", "U"]:
# Python 2/3 compatibility
# TensorFlow always returns bytes, so we just convert the unicode
# expectations to bytes also before comparing.
expected_values = [x.encode("utf-8") for x in expected_df[col].values]
else:
expected_values = expected_df[col].values
assertion(
expected_values,
actual_dict[col],
err_msg="Expected {} in column '{}'; got {}.".format(expected_values,
col,
actual_dict[col]))
def _make_test_csv():
f = tempfile.NamedTemporaryFile(
dir=test.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
intvalue = np.random.randint(-10, 10)
floatvalue = np.random.rand()
boolvalue = int(np.random.rand() > 0.3)
stringvalue = "S: %.4f" % np.random.rand()
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_csv_sparse():
f = tempfile.NamedTemporaryFile(
dir=test.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
# leave columns empty; these will be read as default value (e.g. 0 or NaN)
intvalue = np.random.randint(-10, 10) if np.random.rand() > 0.5 else ""
floatvalue = np.random.rand() if np.random.rand() > 0.5 else ""
boolvalue = int(np.random.rand() > 0.3) if np.random.rand() > 0.5 else ""
stringvalue = (("S: %.4f" % np.random.rand()) if np.random.rand() > 0.5 else
"")
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_tfrecord():
f = tempfile.NamedTemporaryFile(dir=test.get_temp_dir(), delete=False)
w = tf_record.TFRecordWriter(f.name)
for i in range(100):
ex = example_pb2.Example()
ex.features.feature["var_len_int"].int64_list.value.extend(range((i % 3)))
ex.features.feature["fixed_len_float"].float_list.value.extend(
[float(i), 2 * float(i)])
w.write(ex.SerializeToString())
return f.name
class TensorFlowDataFrameTestCase(test.TestCase):
"""Tests for `TensorFlowDataFrame`."""
def _assert_pandas_equals_tensorflow(self, pandas_df, tensorflow_df,
num_batches, batch_size):
self.assertItemsEqual(
list(pandas_df.columns) + ["index"], tensorflow_df.columns())
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
row_numbers = [
total_row_num % pandas_df.shape[0]
for total_row_num in range(batch_size * batch_num, batch_size * (
batch_num + 1))
]
expected_df = pandas_df.iloc[row_numbers]
_assert_df_equals_dict(expected_df, batch)
def testInitFromPandas(self):
"""Test construction from Pandas DataFrame."""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({"sparrow": range(10), "ostrich": 1})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(
pandas_df, batch_size=10, shuffle=False)
batch = tensorflow_df.run_one_batch()
np.testing.assert_array_equal(pandas_df.index.values, batch["index"],
"Expected index {}; got {}".format(
pandas_df.index.values, batch["index"]))
_assert_df_equals_dict(pandas_df, batch)
def testBatch(self):
"""Tests `batch` method.
`DataFrame.batch()` should iterate through the rows of the
`pandas.DataFrame`, and should "wrap around" when it reaches the last row.
"""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({
"albatross": range(10),
"bluejay": 1,
"cockatoo": range(0, 20, 2),
"penguin": list("abcdefghij")
})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df, shuffle=False)
# Rebatch `df` into the following sizes successively.
batch_sizes = [4, 7]
num_batches = 3
final_batch_size = batch_sizes[-1]
for batch_size in batch_sizes:
tensorflow_df = tensorflow_df.batch(batch_size, shuffle=False)
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=final_batch_size)
def testFromNumpy(self):
x = np.eye(20)
tensorflow_df = df.TensorFlowDataFrame.from_numpy(x, batch_size=10)
for batch in tensorflow_df.run(30):
for ind, val in zip(batch["index"], batch["value"]):
expected_val = np.zeros_like(val)
expected_val[ind] = 1
np.testing.assert_array_equal(expected_val, val)
def testFromCSV(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
enqueue_size = 7
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
pandas_df = pd.read_csv(data_path)
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
enqueue_size=enqueue_size,
batch_size=batch_size,
shuffle=False,
default_values=default_values)
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromCSVLimitEpoch(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
actual_num_batches = len(result_batches)
self.assertEqual(expected_num_batches, actual_num_batches)
# TODO(soergel): figure out how to dequeue the final small batch
expected_rows = 1696 # num_epochs * 100
actual_rows = sum([len(x["int"]) for x in result_batches])
self.assertEqual(expected_rows, actual_rows)
def testFromCSVWithFeatureSpec(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
data_path = _make_test_csv_sparse()
feature_spec = {
"int": parsing_ops.FixedLenFeature(None, dtypes.int16, np.nan),
"float": parsing_ops.VarLenFeature(dtypes.float16),
"bool": parsing_ops.VarLenFeature(dtypes.bool),
"string": parsing_ops.FixedLenFeature(None, dtypes.string, "")
}
pandas_df = pd.read_csv(data_path, dtype={"string": object})
# Pandas insanely uses NaN for empty cells in a string column.
# And, we can't use Pandas replace() to fix them because nan != nan
s = pandas_df["string"]
for i in range(0, len(s)):
if isinstance(s[i], float) and math.isnan(s[i]):
pandas_df.set_value(i, "string", "")
tensorflow_df = df.TensorFlowDataFrame.from_csv_with_feature_spec(
[data_path],
batch_size=batch_size,
shuffle=False,
feature_spec=feature_spec)
# These columns were sparse; re-densify them for comparison
tensorflow_df["float"] = densify.Densify(np.nan)(tensorflow_df["float"])
tensorflow_df["bool"] = densify.Densify(np.nan)(tensorflow_df["bool"])
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromExamples(self):
num_batches = 77
enqueue_size = 11
batch_size = 13
data_path = _make_test_tfrecord()
features = {
"fixed_len_float":
parsing_ops.FixedLenFeature(
shape=[2], dtype=dtypes.float32, default_value=[0.0, 0.0]),
"var_len_int":
parsing_ops.VarLenFeature(dtype=dtypes.int64)
}
tensorflow_df = df.TensorFlowDataFrame.from_examples(
data_path,
enqueue_size=enqueue_size,
batch_size=batch_size,
features=features,
shuffle=False)
# `test.tfrecord` contains 100 records with two features: var_len_int and
# fixed_len_float. Entry n contains `range(n % 3)` and
# `float(n)` for var_len_int and fixed_len_float,
# respectively.
num_records = 100
def _expected_fixed_len_float(n):
return np.array([float(n), 2 * float(n)])
def _expected_var_len_int(n):
return np.arange(n % 3)
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
record_numbers = [
n % num_records
for n in range(batch_num * batch_size, (batch_num + 1) * batch_size)
]
for i, j in enumerate(record_numbers):
np.testing.assert_allclose(
_expected_fixed_len_float(j), batch["fixed_len_float"][i])
var_len_int = batch["var_len_int"]
for i, ind in enumerate(var_len_int.indices):
val = var_len_int.values[i]
expected_row = _expected_var_len_int(record_numbers[ind[0]])
expected_value = expected_row[ind[1]]
np.testing.assert_array_equal(expected_value, val)
def testSplitString(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
a, b = tensorflow_df.split("string", 0.7) # no rebatching
total_result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
a_result_batches = list(a.run(num_epochs=num_epochs))
b_result_batches = list(b.run(num_epochs=num_epochs))
self.assertEqual(expected_num_batches, len(total_result_batches))
self.assertEqual(expected_num_batches, len(a_result_batches))
self.assertEqual(expected_num_batches, len(b_result_batches))
total_rows = sum([len(x["int"]) for x in total_result_batches])
a_total_rows = sum([len(x["int"]) for x in a_result_batches])
b_total_rows = sum([len(x["int"]) for x in b_result_batches])
print("Split rows: %s => %s, %s" % (total_rows, a_total_rows, b_total_rows))
# TODO(soergel): figure out how to dequeue the final small batch
expected_total_rows = 1696 # (num_epochs * 100)
self.assertEqual(expected_total_rows, total_rows)
self.assertEqual(1087, a_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.7), a_total_rows)
self.assertEqual(609, b_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.3), b_total_rows)
# The strings used for hashing were all unique in the original data, but
# we ran 17 epochs, so each one should appear 17 times. Each copy should
# be hashed into the same partition, so there should be no overlap of the
# keys.
a_strings = set([s for x in a_result_batches for s in x["string"]])
b_strings = set([s for x in b_result_batches for s in x["string"]])
self.assertEqual(frozenset(), a_strings & b_strings)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
dhennes/pykep
|
PyKEP/trajopt/_pl2pl_N_impulses.py
|
5
|
8898
|
from PyGMO.problem import base as base_problem
from PyKEP.core import epoch, DAY2SEC, lambert_problem, propagate_lagrangian, SEC2DAY, AU, ic2par
from PyKEP.planet import jpl_lp
from math import pi, cos, sin, log, acos
from scipy.linalg import norm
class pl2pl_N_impulses(base_problem):
"""
This class is a PyGMO (http://esa.github.io/pygmo/) problem representing a single leg transfer
between two planets allowing up to a maximum number of impulsive Deep Space Manouvres.
The decision vector is::
[t0,T] + [alpha,u,v,V_inf]*(N-2) +[alpha] + ([tf])
... in the units: [mjd2000, days] + [nd, nd, m/sec, nd] + [nd] + [mjd2000]
Each leg time-of-flight can be decoded as follows, T_n = T log(alpha_n) / \sum_i(log(alpha_i))
.. note::
The resulting problem is box-bounded (unconstrained). The resulting trajectory is time-bounded.
"""
def __init__(self,
start=jpl_lp('earth'),
target=jpl_lp('venus'),
N_max=3,
tof=[20., 400.],
vinf=[0., 4.],
phase_free=True,
multi_objective=False,
t0=None
):
"""
prob = PyKEP.trajopt.pl2pl_N_impulses(start=jpl_lp('earth'), target=jpl_lp('venus'), N_max=3, tof=[20., 400.], vinf=[0., 4.], phase_free=True, multi_objective=False, t0=None)
- start: a PyKEP planet defining the starting orbit
- target: a PyKEP planet defining the target orbit
- N_max: maximum number of impulses
- tof: a list containing the box bounds [lower,upper] for the time of flight (days)
- vinf: a list containing the box bounds [lower,upper] for each DV magnitude (km/sec)
- phase_free: when True, no randezvous condition is enforced and the final orbit will be reached at an optimal true anomaly
- multi_objective: when True, a multi-objective problem is constructed with DV and time of flight as objectives
- t0: launch window defined as a list of two epochs [epoch,epoch]
"""
# Sanity checks
# 1) all planets need to have the same mu_central_body
if (start.mu_central_body != target.mu_central_body):
raise ValueError('Starting and ending PyKEP.planet must have the same mu_central_body')
# 2) Number of impulses must be at least 2
if N_max < 2:
raise ValueError('Number of impulses N is less than 2')
# 3) If phase_free is True, t0 does not make sense
if (t0 is None and not phase_free):
t0 = [epoch(0), epoch(1000)]
if (t0 is not None and phase_free):
raise ValueError('When phase_free is True no t0 can be specified')
# We compute the PyGMO problem dimensions
dim = 2 + 4 * (N_max - 2) + 1 + phase_free
obj_dim = multi_objective + 1
# First we call the constructor for the base PyGMO problem
# As our problem is n dimensional, box-bounded (may be multi-objective), we write
# (dim, integer dim, number of obj, number of con, number of inequality con, tolerance on con violation)
super(pl2pl_N_impulses, self).__init__(dim, 0, obj_dim, 0, 0, 0)
# We then define all class data members
self.start = start
self.target = target
self.N_max = N_max
self.phase_free = phase_free
self.multi_objective = multi_objective
self.__common_mu = start.mu_central_body
# And we compute the bounds
if phase_free:
lb = [start.ref_epoch.mjd2000, tof[0]] + [0.0, 0.0, 0.0, vinf[0] * 1000] * (N_max - 2) + [0.0] + [target.ref_epoch.mjd2000]
ub = [start.ref_epoch.mjd2000 + 2 * start.period * SEC2DAY, tof[1]] + [1.0, 1.0, 1.0, vinf[1] * 1000] * (N_max - 2) + [1.0] + [target.ref_epoch.mjd2000 + 2 * target.period * SEC2DAY]
else:
lb = [t0[0].mjd2000, tof[0]] + [0.0, 0.0, 0.0, vinf[0] * 1000] * (N_max - 2) + [0.0]
ub = [t0[1].mjd2000, tof[1]] + [1.0, 1.0, 1.0, vinf[1] * 1000] * (N_max - 2) + [1.0]
# And we set them
self.set_bounds(lb, ub)
# Objective function
def _objfun_impl(self, x):
# 1 - we 'decode' the chromosome recording the various deep space
# manouvres timing (days) in the list T
T = list([0] * (self.N_max - 1))
for i in range(len(T)):
T[i] = log(x[2 + 4 * i])
total = sum(T)
T = [x[1] * time / total for time in T]
# 2 - We compute the starting and ending position
r_start, v_start = self.start.eph(epoch(x[0]))
if self.phase_free:
r_target, v_target = self.target.eph(epoch(x[-1]))
else:
r_target, v_target = self.target.eph(epoch(x[0] + x[1]))
# 3 - We loop across inner impulses
rsc = r_start
vsc = v_start
for i, time in enumerate(T[:-1]):
theta = 2 * pi * x[3 + 4 * i]
phi = acos(2 * x[4 + 4 * i] - 1) - pi / 2
Vinfx = x[5 + 4 * i] * cos(phi) * cos(theta)
Vinfy = x[5 + 4 * i] * cos(phi) * sin(theta)
Vinfz = x[5 + 4 * i] * sin(phi)
# We apply the (i+1)-th impulse
vsc = [a + b for a, b in zip(vsc, [Vinfx, Vinfy, Vinfz])]
rsc, vsc = propagate_lagrangian(
rsc, vsc, T[i] * DAY2SEC, self.__common_mu)
cw = (ic2par(rsc, vsc, self.start.mu_central_body)[2] > pi / 2)
# We now compute the remaining two final impulses
# Lambert arc to reach seq[1]
dt = T[-1] * DAY2SEC
l = lambert_problem(rsc, r_target, dt, self.__common_mu, cw, False)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
DV1 = norm([a - b for a, b in zip(v_beg_l, vsc)])
DV2 = norm([a - b for a, b in zip(v_end_l, v_target)])
DV_others = sum(x[5::4])
if self.f_dimension == 1:
return (DV1 + DV2 + DV_others,)
else:
return (DV1 + DV2 + DV_others, x[1])
def plot(self, x, ax=None):
"""
ax = prob.plot(x, ax=None)
- x: encoded trajectory
- ax: matplotlib axis where to plot. If None figure and axis will be created
- [out] ax: matplotlib axis where to plot
Plots the trajectory represented by a decision vector x on the 3d axis ax
Example::
ax = prob.plot(x)
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler
if ax is None:
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
axis = fig.gca(projection='3d')
else:
axis = ax
axis.scatter(0, 0, 0, color='y')
# 1 - we 'decode' the chromosome recording the various deep space
# manouvres timing (days) in the list T
T = list([0] * (self.N_max - 1))
for i in range(len(T)):
T[i] = log(x[2 + 4 * i])
total = sum(T)
T = [x[1] * time / total for time in T]
# 2 - We compute the starting and ending position
r_start, v_start = self.start.eph(epoch(x[0]))
if self.phase_free:
r_target, v_target = self.target.eph(epoch(x[-1]))
else:
r_target, v_target = self.target.eph(epoch(x[0] + x[1]))
plot_planet(self.start, t0=epoch(x[0]), color=(0.8, 0.6, 0.8), legend=True, units = AU, ax=axis)
plot_planet(self.target, t0=epoch(x[0] + x[1]), color=(0.8, 0.6, 0.8), legend=True, units = AU, ax=axis)
# 3 - We loop across inner impulses
rsc = r_start
vsc = v_start
for i, time in enumerate(T[:-1]):
theta = 2 * pi * x[3 + 4 * i]
phi = acos(2 * x[4 + 4 * i] - 1) - pi / 2
Vinfx = x[5 + 4 * i] * cos(phi) * cos(theta)
Vinfy = x[5 + 4 * i] * cos(phi) * sin(theta)
Vinfz = x[5 + 4 * i] * sin(phi)
# We apply the (i+1)-th impulse
vsc = [a + b for a, b in zip(vsc, [Vinfx, Vinfy, Vinfz])]
plot_kepler(rsc, vsc, T[
i] * DAY2SEC, self.__common_mu, N=200, color='b', legend=False, units=AU, ax=axis)
rsc, vsc = propagate_lagrangian(
rsc, vsc, T[i] * DAY2SEC, self.__common_mu)
cw = (ic2par(rsc, vsc, self.start.mu_central_body)[2] > pi / 2)
# We now compute the remaining two final impulses
# Lambert arc to reach seq[1]
dt = T[-1] * DAY2SEC
l = lambert_problem(rsc, r_target, dt, self.__common_mu, cw, False)
plot_lambert(
l, sol=0, color='r', legend=False, units=AU, ax=axis, N=200)
plt.show()
return axis
|
gpl-3.0
|
jackru/pybrain
|
examples/rl/environments/linear_fa/bicycle.py
|
26
|
14462
|
from __future__ import print_function
"""An attempt to implement Randlov and Alstrom (1998). They successfully
use reinforcement learning to balance a bicycle, and to control it to drive
to a specified goal location. Their work has been used since then by a few
researchers as a benchmark problem.
We only implement the balance task. This implementation differs at least
slightly, since Randlov and Alstrom did not mention anything about how they
annealed/decayed their learning rate, etc. As a result of differences, the
results do not match those obtained by Randlov and Alstrom.
"""
__author__ = 'Chris Dembia, Bruce Cam, Johnny Israeli'
from scipy import asarray
from numpy import sin, cos, tan, sqrt, arcsin, arctan, sign, clip, argwhere
from matplotlib import pyplot as plt
import pybrain.rl.environments
from pybrain.rl.environments.environment import Environment
from pybrain.rl.learners.valuebased.linearfa import SARSALambda_LinFA
from pybrain.rl.agents.linearfa import LinearFA_Agent
from pybrain.rl.experiments import EpisodicExperiment
from pybrain.utilities import one_to_n
class BicycleEnvironment(Environment):
"""Randlov and Alstrom's bicycle model. This code matches nearly exactly
some c code we found online for simulating Randlov and Alstrom's
bicycle. The bicycle travels at a fixed speed.
"""
# For superclass.
indim = 2
outdim = 10
# Environment parameters.
time_step = 0.01
# Goal position and radius
# Lagouakis (2002) uses angle to goal, not heading, as a state
max_distance = 1000.
# Acceleration on Earth's surface due to gravity (m/s^2):
g = 9.82
# See the paper for a description of these quantities:
# Distances (in meters):
c = 0.66
dCM = 0.30
h = 0.94
L = 1.11
r = 0.34
# Masses (in kilograms):
Mc = 15.0
Md = 1.7
Mp = 60.0
# Velocity of a bicycle (in meters per second), equal to 10 km/h:
v = 10.0 * 1000.0 / 3600.0
# Derived constants.
M = Mc + Mp # See Randlov's code.
Idc = Md * r**2
Idv = 1.5 * Md * r**2
Idl = 0.5 * Md * r**2
Itot = 13.0 / 3.0 * Mc * h**2 + Mp * (h + dCM)**2
sigmad = v / r
def __init__(self):
Environment.__init__(self)
self.reset()
self.actions = [0.0, 0.0]
self._save_wheel_contact_trajectories = False
def performAction(self, actions):
self.actions = actions
self.step()
def saveWheelContactTrajectories(self, opt):
self._save_wheel_contact_trajectories = opt
def step(self):
# Unpack the state and actions.
# -----------------------------
# Want to ignore the previous value of omegadd; it could only cause a
# bug if we assign to it.
(theta, thetad, omega, omegad, _,
xf, yf, xb, yb, psi) = self.sensors
(T, d) = self.actions
# For recordkeeping.
# ------------------
if self._save_wheel_contact_trajectories:
self.xfhist.append(xf)
self.yfhist.append(yf)
self.xbhist.append(xb)
self.ybhist.append(yb)
# Intermediate time-dependent quantities.
# ---------------------------------------
# Avoid divide-by-zero, just as Randlov did.
if theta == 0:
rf = 1e8
rb = 1e8
rCM = 1e8
else:
rf = self.L / np.abs(sin(theta))
rb = self.L / np.abs(tan(theta))
rCM = sqrt((self.L - self.c)**2 + self.L**2 / tan(theta)**2)
phi = omega + np.arctan(d / self.h)
# Equations of motion.
# --------------------
# Second derivative of angular acceleration:
omegadd = 1 / self.Itot * (self.M * self.h * self.g * sin(phi)
- cos(phi) * (self.Idc * self.sigmad * thetad
+ sign(theta) * self.v**2 * (
self.Md * self.r * (1.0 / rf + 1.0 / rb)
+ self.M * self.h / rCM)))
thetadd = (T - self.Idv * self.sigmad * omegad) / self.Idl
# Integrate equations of motion using Euler's method.
# ---------------------------------------------------
# yt+1 = yt + yd * dt.
# Must update omega based on PREVIOUS value of omegad.
omegad += omegadd * self.time_step
omega += omegad * self.time_step
thetad += thetadd * self.time_step
theta += thetad * self.time_step
# Handlebars can't be turned more than 80 degrees.
theta = np.clip(theta, -1.3963, 1.3963)
# Wheel ('tyre') contact positions.
# ---------------------------------
# Front wheel contact position.
front_temp = self.v * self.time_step / (2 * rf)
# See Randlov's code.
if front_temp > 1:
front_temp = sign(psi + theta) * 0.5 * np.pi
else:
front_temp = sign(psi + theta) * arcsin(front_temp)
xf += self.v * self.time_step * -sin(psi + theta + front_temp)
yf += self.v * self.time_step * cos(psi + theta + front_temp)
# Rear wheel.
back_temp = self.v * self.time_step / (2 * rb)
# See Randlov's code.
if back_temp > 1:
back_temp = np.sign(psi) * 0.5 * np.pi
else:
back_temp = np.sign(psi) * np.arcsin(back_temp)
xb += self.v * self.time_step * -sin(psi + back_temp)
yb += self.v * self.time_step * cos(psi + back_temp)
# Preventing numerical drift.
# ---------------------------
# Copying what Randlov did.
current_wheelbase = sqrt((xf - xb)**2 + (yf - yb)**2)
if np.abs(current_wheelbase - self.L) > 0.01:
relative_error = self.L / current_wheelbase - 1.0
xb += (xb - xf) * relative_error
yb += (yb - yf) * relative_error
# Update heading, psi.
# --------------------
delta_y = yf - yb
if (xf == xb) and delta_y < 0.0:
psi = np.pi
else:
if delta_y > 0.0:
psi = arctan((xb - xf) / delta_y)
else:
psi = sign(xb - xf) * 0.5 * np.pi - arctan(delta_y / (xb - xf))
self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi])
def reset(self):
theta = 0
thetad = 0
omega = 0
omegad = 0
omegadd = 0
xf = 0
yf = self.L
xb = 0
yb = 0
psi = np.arctan((xb - xf) / (yf - yb))
self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi])
self.xfhist = []
self.yfhist = []
self.xbhist = []
self.ybhist = []
def getSteer(self):
return self.sensors[0]
def getTilt(self):
return self.sensors[2]
def get_xfhist(self):
return self.xfhist
def get_yfhist(self):
return self.yfhist
def get_xbhist(self):
return self.xbhist
def get_ybhist(self):
return self.ybhist
def getSensors(self):
return self.sensors
class BalanceTask(pybrain.rl.environments.EpisodicTask):
"""The rider is to simply balance the bicycle while moving with the
speed perscribed in the environment. This class uses a continuous 5
dimensional state space, and a discrete state space.
This class is heavily guided by
pybrain.rl.environments.cartpole.balancetask.BalanceTask.
"""
max_tilt = np.pi / 6.
nactions = 9
def __init__(self, max_time=1000.0):
super(BalanceTask, self).__init__(BicycleEnvironment())
self.max_time = max_time
# Keep track of time in case we want to end episodes based on number of
# time steps.
self.t = 0
@property
def indim(self):
return 1
@property
def outdim(self):
return 5
def reset(self):
super(BalanceTask, self).reset()
self.t = 0
def performAction(self, action):
"""Incoming action is an int between 0 and 8. The action we provide to
the environment consists of a torque T in {-2 N, 0, 2 N}, and a
displacement d in {-.02 m, 0, 0.02 m}.
"""
self.t += 1
assert round(action[0]) == action[0]
# -1 for action in {0, 1, 2}, 0 for action in {3, 4, 5}, 1 for
# action in {6, 7, 8}
torque_selector = np.floor(action[0] / 3.0) - 1.0
T = 2 * torque_selector
# Random number in [-1, 1]:
p = 2.0 * np.random.rand() - 1.0
# -1 for action in {0, 3, 6}, 0 for action in {1, 4, 7}, 1 for
# action in {2, 5, 8}
disp_selector = action[0] % 3 - 1.0
d = 0.02 * disp_selector + 0.02 * p
super(BalanceTask, self).performAction([T, d])
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi) = self.env.getSensors()
return self.env.getSensors()[0:5]
def isFinished(self):
# Criterion for ending an episode. From Randlov's paper:
# "When the agent can balance for 1000 seconds, the task is considered
# learned."
if np.abs(self.env.getTilt()) > self.max_tilt:
return True
elapsed_time = self.env.time_step * self.t
if elapsed_time > self.max_time:
return True
return False
def getReward(self):
# -1 reward for falling over; no reward otherwise.
if np.abs(self.env.getTilt()) > self.max_tilt:
return -1.0
return 0.0
class LinearFATileCoding3456BalanceTask(BalanceTask):
"""An attempt to exactly implement Randlov's function approximation. He
discretized (tiled) the state space into 3456 bins. We use the same action
space as in the superclass.
"""
# From Randlov, 1998:
theta_bounds = np.array(
[-0.5 * np.pi, -1.0, -0.2, 0, 0.2, 1.0, 0.5 * np.pi])
thetad_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
omega_bounds = np.array(
[-BalanceTask.max_tilt, -0.15, -0.06, 0, 0.06, 0.15,
BalanceTask.max_tilt])
omegad_bounds = np.array(
[-np.inf, -0.5, -0.25, 0, 0.25, 0.5, np.inf])
omegadd_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
# http://stackoverflow.com/questions/3257619/numpy-interconversion-between-multidimensional-and-linear-indexing
nbins_across_dims = [
len(theta_bounds) - 1,
len(thetad_bounds) - 1,
len(omega_bounds) - 1,
len(omegad_bounds) - 1,
len(omegadd_bounds) - 1]
# This array, when dotted with the 5-dim state vector, gives a 'linear'
# index between 0 and 3455.
magic_array = np.cumprod([1] + nbins_across_dims)[:-1]
@property
def outdim(self):
# Used when constructing LinearFALearner's.
return 3456
def getBin(self, theta, thetad, omega, omegad, omegadd):
bin_indices = [
np.digitize([theta], self.theta_bounds)[0] - 1,
np.digitize([thetad], self.thetad_bounds)[0] - 1,
np.digitize([omega], self.omega_bounds)[0] - 1,
np.digitize([omegad], self.omegad_bounds)[0] - 1,
np.digitize([omegadd], self.omegadd_bounds)[0] - 1,
]
return np.dot(self.magic_array, bin_indices)
def getBinIndices(self, linear_index):
"""Given a linear index (integer between 0 and outdim), returns the bin
indices for each of the state dimensions.
"""
return linear_index / self.magic_array % self.nbins_across_dims
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi) = self.env.getSensors()
state = one_to_n(self.getBin(theta, thetad, omega, omegad, omegadd),
self.outdim)
return state
class SARSALambda_LinFA_ReplacingTraces(SARSALambda_LinFA):
"""Randlov used replacing traces, but this doesn't exist in PyBrain's
SARSALambda.
"""
def _updateEtraces(self, state, action, responsibility=1.):
self._etraces *= self.rewardDiscount * self._lambda * responsibility
# This assumes that state is an identity vector (like, from one_to_n).
self._etraces[action] = clip(self._etraces[action] + state, -np.inf, 1.)
# Set the trace for all other actions in this state to 0:
action_bit = one_to_n(action, self.num_actions)
for argstate in argwhere(state == 1) :
self._etraces[argwhere(action_bit != 1), argstate] = 0.
task = LinearFATileCoding3456BalanceTask()
env = task.env
# The learning is very sensitive to the learning rate decay.
learner = SARSALambda_LinFA_ReplacingTraces(task.nactions, task.outdim,
learningRateDecay=2000)
learner._lambda = 0.95
task.discount = learner.rewardDiscount
agent = LinearFA_Agent(learner)
agent.logging = False
exp = EpisodicExperiment(task, agent)
performance_agent = LinearFA_Agent(learner)
performance_agent.logging = False
performance_agent.greedy = True
performance_agent.learning = False
env.saveWheelContactTrajectories(True)
plt.ion()
plt.figure(figsize=(8, 4))
ax1 = plt.subplot(1, 2, 1)
ax2 = plt.subplot(1, 2, 2)
def update_wheel_trajectories():
front_lines = ax2.plot(env.get_xfhist(), env.get_yfhist(), 'r')
back_lines = ax2.plot(env.get_xbhist(), env.get_ybhist(), 'b')
plt.axis('equal')
perform_cumrewards = []
for irehearsal in range(7000):
# Learn.
# ------
r = exp.doEpisodes(1)
# Discounted reward.
cumreward = exp.task.getTotalReward()
#print 'cumreward: %.4f; nsteps: %i; learningRate: %.4f' % (
# cumreward, len(r[0]), exp.agent.learner.learningRate)
if irehearsal % 50 == 0:
# Perform (no learning).
# ----------------------
# Swap out the agent.
exp.agent = performance_agent
# Perform.
r = exp.doEpisodes(1)
perform_cumreward = task.getTotalReward()
perform_cumrewards.append(perform_cumreward)
print('PERFORMANCE: cumreward:', perform_cumreward, 'nsteps:', len(r[0]))
# Swap back the learning agent.
performance_agent.reset()
exp.agent = agent
ax1.cla()
ax1.plot(perform_cumrewards, '.--')
# Wheel trajectories.
update_wheel_trajectories()
plt.pause(0.001)
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.