repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sho-87/python-machine-learning | CNN/mw/11_eeg_mw_realtime_subject.py | 1 | 14737 | from __future__ import print_function
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
import matplotlib.pyplot as plt
from tqdm import tqdm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from lasagne.layers import InputLayer, Conv1DLayer, Pool1DLayer
from lasagne.regularization import regularize_network_params, l2
VERBOSE = False
GRID_SEARCH = False
def bootstrap(data, labels, boot_type="downsample"):
print("Bootstrapping data...")
ot_class = 0
mw_class = 1
ot_idx = np.where(labels == ot_class)
mw_idx = np.where(labels == mw_class)
# Get OT examples
ot_data = data[ot_idx]
ot_labels = labels[ot_idx]
print(" - OT (class: {}) | Data: {} | Labels: {}".format(ot_class, ot_data.shape, ot_labels.shape))
# Get MW examples
mw_data = data[mw_idx]
mw_labels = labels[mw_idx]
print(" - MW (class: {}) | Data: {} | Labels: {}".format(mw_class, mw_data.shape, mw_labels.shape))
# Set majority and minority classes
if ot_data.shape[0] > mw_data.shape[0]:
maj_class, maj_data, maj_labels = ot_class, ot_data, ot_labels
min_class, min_data, min_labels = mw_class, mw_data, mw_labels
else:
maj_class, maj_data, maj_labels = mw_class, mw_data, mw_labels
min_class, min_data, min_labels = ot_class, ot_data, ot_labels
print(" - Majority class: {} (N = {}) | Minority class: {} (N = {})".format(maj_class, maj_data.shape[0],
min_class, min_data.shape[0]))
# Upsample minority class
if boot_type == "upsample":
print("Upsampling minority class...")
num_to_boot = maj_data.shape[0] - min_data.shape[0]
print(" - Number to upsample: {}".format(num_to_boot))
bootstrap_idx = np.random.randint(min_data.shape[0], size=num_to_boot)
min_data_boot = min_data[bootstrap_idx]
min_labels_boot = min_labels[bootstrap_idx]
final_data = np.concatenate((data, min_data_boot), axis=0)
final_labels = np.concatenate((labels, min_labels_boot), axis=0)
elif boot_type == "downsample":
print("Downsampling majority class...")
# Resample N = number of minority examples
num_to_boot = min_data.shape[0]
bootstrap_idx = np.random.randint(maj_data.shape[0], size=num_to_boot)
maj_data_boot = maj_data[bootstrap_idx]
maj_labels_boot = maj_labels[bootstrap_idx]
final_data = np.concatenate((maj_data_boot, min_data), axis=0)
final_labels = np.concatenate((maj_labels_boot, min_labels), axis=0)
print("Final class balance: {} ({}) - {} ({})".format(
maj_class, len(np.where(final_labels==maj_class)[0]),
min_class, len(np.where(final_labels==min_class)[0])))
return final_data, final_labels
# Load EEG data
base_dir = os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__), os.pardir), os.pardir))
data_dir = os.path.join(base_dir, "data")
data = np.load(os.path.join(data_dir, 'all_data_6_2d_full_30ch.npy'))
data = data.transpose(0,2,1) # Equivalent to dimshuffle
data_labels = np.load(os.path.join(data_dir, 'all_data_6_2d_full_30ch_labels.npy'))
subject_list = data_labels[:,0]
subject_list = np.repeat(subject_list, data.shape[1])
data_labels = data_labels[:,1]
data_labels = data_labels.repeat(data.shape[1]) # Repeat each label
temp_arrays = []
for i in range(data.shape[0]):
temp_arrays.append(data[i,:])
data = np.concatenate(temp_arrays) # Stack the 2D arrays vertically
del temp_arrays
data = data.reshape(-1, 1, 30) # Add extra channel for depth
# Electrode Order (30 channels)
electrode_order = ('Fp1','Fp2','Fz',
'F4','F8','FC6',
'C4','T8','CP6',
'P4','P8','P10',
'O2','Oz','O1',
'P9','P3','P7',
'CP5','C3','T7',
'FC5','F7','F3',
'FC1','FC2','Cz',
'CP1','CP2','Pz')
# Significantly improves gradient descent
data = data*1e5 # Increase size of values
# Remove 1 subject (#19) as test set data
test_data = data[np.where(subject_list == 19)]
test_labels = data_labels[np.where(subject_list == 19)]
data = data[np.where(subject_list != 19)]
data_labels = data_labels[np.where(subject_list != 19)]
# Up/downsample the data to balance classes
data, data_labels = bootstrap(data, data_labels, "downsample")
# Create train, validation, test sets
rng = np.random.RandomState(5347) # Set random seed
indices = rng.permutation(data.shape[0])
split_train = 0.8
split_train = int(round(data.shape[0]*split_train))
train_idx = indices[:split_train]
val_idx = indices[split_train:]
train_data = data[train_idx,:]
train_labels = data_labels[train_idx]
val_data = data[val_idx,:]
val_labels = data_labels[val_idx]
def build_cnn(k_width=5, input_var=None):
# Input layer, as usual:
l_in = InputLayer(shape=(None, 1, 30), input_var=input_var)
l_conv1 = Conv1DLayer(incoming = l_in, num_filters = 16,
filter_size = k_width,
stride = 1, pad = 'same',
W = lasagne.init.Normal(std = 0.02),
nonlinearity = lasagne.nonlinearities.very_leaky_rectify)
l_pool1 = Pool1DLayer(incoming = l_conv1, pool_size = 2, stride = 2)
l_drop1 = lasagne.layers.dropout(l_pool1, p=.2)
l_fc = lasagne.layers.DenseLayer(
l_drop1,
num_units=512,
nonlinearity=lasagne.nonlinearities.rectify)
l_drop2 = lasagne.layers.dropout(l_fc, p=.2)
l_out = lasagne.layers.DenseLayer(
l_drop2,
num_units=2,
nonlinearity=lasagne.nonlinearities.softmax)
return l_out
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
# tqdm() can be removed if no visual progress bar is needed
for start_idx in tqdm(range(0, len(inputs) - batchsize + 1, batchsize)):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def main(model='cnn', batch_size=500, num_epochs=500, k_width=5):
# Prepare Theano variables for inputs and targets
input_var = T.tensor3('inputs')
target_var = T.ivector('targets')
network = build_cnn(k_width, input_var)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
l2_reg = regularize_network_params(network, l2)
loss += l2_reg * 0.00001
train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Create update expressions for training
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.1)
#updates = lasagne.updates.adam(loss, params, learning_rate=0.1)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], [loss, train_acc], updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
training_hist = []
val_hist = []
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
print("Training epoch {}...".format(epoch+1))
train_err = 0
train_acc = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(train_data, train_labels, batch_size, shuffle=True):
inputs, targets = batch
err, acc = train_fn(inputs, targets)
train_err += err
train_acc += acc
train_batches += 1
if VERBOSE:
print("Epoch: {} | Mini-batch: {}/{} | Elapsed time: {:.2f}s".format(
epoch+1,
train_batches,
train_data.shape[0]/batch_size,
time.time()-start_time))
training_hist.append(train_err / train_batches)
# And a full pass over the validation data:
print("Validating epoch...")
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(val_data, val_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
val_hist.append(val_err / val_batches)
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" training accuracy:\t\t{:.2f} %".format(
train_acc / train_batches * 100))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test predictions/error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(test_data, test_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
test_perc = (test_acc / test_batches) * 100
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(test_perc))
# Plot learning
plt.plot(range(1, num_epochs+1), training_hist, label="Training")
plt.plot(range(1, num_epochs+1), val_hist, label="Validation")
plt.grid(True)
plt.title("Training Curve\nKernel size: (1,{}) - Test acc: {:.2f}%".format(k_width, test_perc))
plt.xlim(1, num_epochs+1)
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.legend(loc='best')
plt.show()
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
return test_perc
if GRID_SEARCH:
# Set filter sizes to search across (odd size only)
search_widths = range(3, 30, 4) # Across spatial domain (electrodes)
# Preallocate accuracy grid
grid_accuracy = np.empty((1, len(search_widths)))
num_kernels = grid_accuracy.size
cur_kernel = 0
for i, w in enumerate(search_widths):
# Train with current kernel size
cur_kernel += 1
print("***** Kernel {}/{} | Size: (1,{}) *****".format(cur_kernel, num_kernels, w))
cur_test_acc = main(batch_size=2000, num_epochs=20, k_width=w)
grid_accuracy[0, i] = cur_test_acc
# Show accuracy heatmap
fig, ax = plt.subplots(figsize=(10, 10))
heatmap = ax.imshow(grid_accuracy, cmap = plt.cm.bone, interpolation = 'mitchell')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
cb = plt.colorbar(heatmap, orientation='vertical', cax=cax)
cb.ax.set_title('Test Acc (%)', {'fontsize': 10, 'horizontalalignment': 'left'})
ax.grid(True)
ax.set_xlabel('Kernel Width', weight='bold')
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
ax.set_xticks(range(grid_accuracy.shape[1])) # X element position
ax.set_xticklabels(search_widths) # Labels for X axis
ax.get_yaxis().set_visible(False)
plt.show()
# Get highest accuracy and associated kernel size:
best_idx = np.unravel_index(grid_accuracy.argmax(), grid_accuracy.shape)
print("Highest accuracy: {:.2f}%".format(np.max(grid_accuracy)))
print("Best kernel size: (1,{})".format(search_widths[best_idx[1]]))
# Highest search accuracy: NA
# Best kernel size: NA
# Train model using ideal kernel size over more epochs
cur_test_acc = main(batch_size=2000, num_epochs=100,
k_width=search_widths[best_idx[1]])
# Final test accuracy: NA
else:
cur_test_acc = main(batch_size=2000, num_epochs=100, k_width=11) # 51.31%
| mit |
bsipocz/bokeh | bokeh/mpl_helpers.py | 11 | 5408 | "Helpers function for mpl module."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
from itertools import cycle, islice
from scipy import interpolate, signal
from .models import GlyphRenderer
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def convert_color(mplcolor):
"Converts mpl color formats to Bokeh color formats."
charmap = dict(b="blue", g="green", r="red", c="cyan", m="magenta",
y="yellow", k="black", w="white")
if mplcolor in charmap:
return charmap[mplcolor]
try:
colorfloat = float(mplcolor)
if 0 <= colorfloat <= 1.0:
# This is a grayscale value
return tuple([int(255 * colorfloat)] * 3)
except:
pass
if isinstance(mplcolor, tuple):
# These will be floats in the range 0..1
return int(255 * mplcolor[0]), int(255 * mplcolor[1]), int(255 * mplcolor[2])
return mplcolor
def convert_dashes(dash):
""" Converts a Matplotlib dash specification
bokeh.properties.DashPattern supports the matplotlib named dash styles,
but not the little shorthand characters. This function takes care of
mapping those.
"""
mpl_dash_map = {
"-": "solid",
"--": "dashed",
":": "dotted",
"-.": "dashdot",
}
# If the value doesn't exist in the map, then just return the value back.
return mpl_dash_map.get(dash, dash)
def delete_last_col(x):
"Just delete the last column of the array."
x = np.delete(x, (-1), axis=1)
return x
def get_props_cycled(col, prop, fx=lambda x: x):
""" We need to cycle the `get.property` list (where property can be colors,
line_width, etc) as matplotlib does. We use itertools tools for do this
cycling ans slice manipulation.
Parameters:
col: matplotlib collection object
prop: property we want to get from matplotlib collection
fx: funtion (optional) to transform the elements from list obtained
after the property call. Deafults to identity function.
"""
n = len(col.get_paths())
t_prop = [fx(x) for x in prop]
sliced = islice(cycle(t_prop), None, n)
return list(sliced)
def is_ax_end(r):
"Check if the 'name' (if it exists) in the Glyph's datasource is 'ax_end'"
if isinstance(r, GlyphRenderer):
try:
if r.data_source.data["name"] == "ax_end":
return True
except KeyError:
return False
else:
return False
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.001, f3=5):
"""
Mimic a hand-drawn line from (x, y) data
Source: http://jakevdp.github.io/blog/2012/10/07/xkcd-style-plots-in-matplotlib/
Parameters
----------
x, y : array_like
arrays to be modified
xlim, ylim : data range
the assumed plot range for the modification. If not specified,
they will be guessed from the data
mag : float
magnitude of distortions
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
Returns
-------
x, y : ndarrays
The modified lines
"""
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None:
xlim = (x.min(), x.max())
if ylim is None:
ylim = (y.min(), y.max())
if xlim[1] == xlim[0]:
xlim = ylim
if ylim[1] == ylim[0]:
ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
k = min(3, len(x) - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we'll perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/tests/test_pickle.py | 1 | 8253 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import cPickle as pickle
from matplotlib.externals.six.moves import xrange
from io import BytesIO
from nose.tools import assert_equal, assert_not_equal
import numpy as np
from matplotlib.testing.decorators import cleanup, image_comparison
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
def depth_getter(obj,
current_depth=0,
depth_stack=None,
nest_info='top level object'):
"""
Returns a dictionary mapping:
id(obj): (shallowest_depth, obj, nest_info)
for the given object (and its subordinates).
This, in conjunction with recursive_pickle, can be used to debug
pickling issues, although finding others is sometimes a case of
trial and error.
"""
if depth_stack is None:
depth_stack = {}
if id(obj) in depth_stack:
stack = depth_stack[id(obj)]
if stack[0] > current_depth:
del depth_stack[id(obj)]
else:
return depth_stack
depth_stack[id(obj)] = (current_depth, obj, nest_info)
if isinstance(obj, (list, tuple)):
for i, item in enumerate(obj):
depth_getter(item, current_depth=current_depth + 1,
depth_stack=depth_stack,
nest_info=('list/tuple item #%s in '
'(%s)' % (i, nest_info)))
else:
if isinstance(obj, dict):
state = obj
elif hasattr(obj, '__getstate__'):
state = obj.__getstate__()
if not isinstance(state, dict):
state = {}
elif hasattr(obj, '__dict__'):
state = obj.__dict__
else:
state = {}
for key, value in six.iteritems(state):
depth_getter(value, current_depth=current_depth + 1,
depth_stack=depth_stack,
nest_info=('attribute "%s" in '
'(%s)' % (key, nest_info)))
return depth_stack
def recursive_pickle(top_obj):
"""
Recursively pickle all of the given objects subordinates, starting with
the deepest first. **Very** handy for debugging pickling issues, but
also very slow (as it literally pickles each object in turn).
Handles circular object references gracefully.
"""
objs = depth_getter(top_obj)
# sort by depth then by nest_info
objs = sorted(six.itervalues(objs), key=lambda val: (-val[0], val[2]))
for _, obj, location in objs:
try:
pickle.dump(obj, BytesIO(), pickle.HIGHEST_PROTOCOL)
except Exception as err:
print(obj)
print('Failed to pickle %s. \n Type: %s. Traceback '
'follows:' % (location, type(obj)))
raise
@cleanup
def test_simple():
fig = plt.figure()
# un-comment to debug
# recursive_pickle(fig)
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
ax = plt.subplot(121)
pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
ax = plt.axes(projection='polar')
plt.plot(list(xrange(10)), label='foobar')
plt.legend()
# Uncomment to debug any unpicklable objects. This is slow so is not
# uncommented by default.
# recursive_pickle(fig)
pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
# ax = plt.subplot(121, projection='hammer')
# recursive_pickle(ax, 'figure')
# pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
plt.figure()
plt.bar(left=list(xrange(10)), height=list(xrange(10)))
pickle.dump(plt.gca(), BytesIO(), pickle.HIGHEST_PROTOCOL)
fig = plt.figure()
ax = plt.axes()
plt.plot(list(xrange(10)))
ax.set_yscale('log')
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
@cleanup
@image_comparison(baseline_images=['multi_pickle'],
extensions=['png'], remove_text=True)
def test_complete():
fig = plt.figure('Figure with a label?', figsize=(10, 6))
plt.suptitle('Can you fit any more in a figure?')
# make some arbitrary data
x, y = np.arange(8), np.arange(10)
data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
v = np.sin(v * -0.6)
plt.subplot(3, 3, 1)
plt.plot(list(xrange(10)))
plt.subplot(3, 3, 2)
plt.contourf(data, hatches=['//', 'ooo'])
plt.colorbar()
plt.subplot(3, 3, 3)
plt.pcolormesh(data)
plt.subplot(3, 3, 4)
plt.imshow(data)
plt.subplot(3, 3, 5)
plt.pcolor(data)
ax = plt.subplot(3, 3, 6)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
plt.streamplot(x, y, u, v)
ax = plt.subplot(3, 3, 7)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
plt.quiver(x, y, u, v)
plt.subplot(3, 3, 8)
plt.scatter(x, x**2, label='$x^2$')
plt.legend(loc='upper left')
plt.subplot(3, 3, 9)
plt.errorbar(x, x * -0.5, xerr=0.2, yerr=0.4)
###### plotting is done, now test its pickle-ability #########
# Uncomment to debug any unpicklable objects. This is slow (~200 seconds).
# recursive_pickle(fig)
result_fh = BytesIO()
pickle.dump(fig, result_fh, pickle.HIGHEST_PROTOCOL)
plt.close('all')
# make doubly sure that there are no figures left
assert_equal(plt._pylab_helpers.Gcf.figs, {})
# wind back the fh and load in the figure
result_fh.seek(0)
fig = pickle.load(result_fh)
# make sure there is now a figure manager
assert_not_equal(plt._pylab_helpers.Gcf.figs, {})
assert_equal(fig.get_label(), 'Figure with a label?')
@cleanup
def test_no_pyplot():
# tests pickle-ability of a figure not created with pyplot
from matplotlib.backends.backend_pdf import FigureCanvasPdf as fc
from matplotlib.figure import Figure
fig = Figure()
_ = fc(fig)
ax = fig.add_subplot(1, 1, 1)
ax.plot([1, 2, 3], [1, 2, 3])
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
@cleanup
def test_renderer():
from matplotlib.backends.backend_agg import RendererAgg
renderer = RendererAgg(10, 20, 30)
pickle.dump(renderer, BytesIO())
@cleanup
def test_image():
# Prior to v1.4.0 the Image would cache data which was not picklable
# once it had been drawn.
from matplotlib.backends.backend_agg import new_figure_manager
manager = new_figure_manager(1000)
fig = manager.canvas.figure
ax = fig.add_subplot(1, 1, 1)
ax.imshow(np.arange(12).reshape(3, 4))
manager.canvas.draw()
pickle.dump(fig, BytesIO())
@cleanup
def test_grid():
from matplotlib.backends.backend_agg import new_figure_manager
manager = new_figure_manager(1000)
fig = manager.canvas.figure
ax = fig.add_subplot(1, 1, 1)
ax.grid()
# Drawing the grid triggers instance methods to be attached
# to the Line2D object (_lineFunc).
manager.canvas.draw()
pickle.dump(ax, BytesIO())
@cleanup
def test_polar():
ax = plt.subplot(111, polar=True)
fig = plt.gcf()
result = BytesIO()
pf = pickle.dumps(fig)
pickle.loads(pf)
plt.draw()
class TransformBlob(object):
def __init__(self):
self.identity = mtransforms.IdentityTransform()
self.identity2 = mtransforms.IdentityTransform()
# Force use of the more complex composition.
self.composite = mtransforms.CompositeGenericTransform(
self.identity,
self.identity2)
# Check parent -> child links of TransformWrapper.
self.wrapper = mtransforms.TransformWrapper(self.composite)
# Check child -> parent links of TransformWrapper.
self.composite2 = mtransforms.CompositeGenericTransform(
self.wrapper,
self.identity)
def test_transform():
obj = TransformBlob()
pf = pickle.dumps(obj)
del obj
obj = pickle.loads(pf)
# Check parent -> child links of TransformWrapper.
assert_equal(obj.wrapper._child, obj.composite)
# Check child -> parent links of TransformWrapper.
assert_equal(list(obj.wrapper._parents.values()), [obj.composite2])
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s'])
| mit |
gpfreitas/bokeh | bokeh/sampledata/periodic_table.py | 45 | 1542 | '''
This module provides the periodic table as a data set. It exposes an attribute 'elements'
which is a pandas dataframe with the following fields
elements['atomic Number'] (units: g/cm^3)
elements['symbol']
elements['name']
elements['atomic mass'] (units: amu)
elements['CPK'] (convention for molecular modeling color)
elements['electronic configuration']
elements['electronegativity'] (units: Pauling)
elements['atomic radius'] (units: pm)
elements['ionic radius'] (units: pm)
elements['van der waals radius'] (units: pm)
elements['ionization enerygy'] (units: kJ/mol)
elements['electron affinity'] (units: kJ/mol)
elements['phase'] (standard state: solid, liquid, gas)
elements['bonding type']
elements['melting point'] (units: K)
elements['boiling point'] (units: K)
elements['density'] (units: g/cm^3)
elements['type'] (see below)
elements['year discovered']
elements['group']
elements['period']
element types: actinoid, alkali metal, alkaline earth metal, halogen, lanthanoid, metal, metalloid, noble gas, nonmetal, transition metalloid
'''
from __future__ import absolute_import
from os.path import dirname, join
try:
import pandas as pd
except ImportError as e:
raise RuntimeError("elements data requires pandas (http://pandas.pydata.org) to be installed")
elements = pd.read_csv(join(dirname(__file__), 'elements.csv'))
| bsd-3-clause |
hamid-omid/search_relevance | xgbFunctions.py | 1 | 6347 | '''
Required functions for xgb.py
__Author__:
Ali Narimani
__Version__:
2.1
'''
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import mean_squared_error, make_scorer
import re
from nltk.stem.porter import *
### Keys:
Snow = True # stemmer choice
####
if Snow:
from nltk.stem.snowball import SnowballStemmer #0.003 improvement but takes twice as long as PorterStemmer
stemmer = SnowballStemmer('english')
else:
stemmer = PorterStemmer()
def DeepClean(word):
word = word.replace('kholerhighland', 'kohler highline')
word = word.replace('smart', ' smart ')
word = word.replace('residential', ' residential ')
word = word.replace('whirlpool', ' whirlpool ')
word = word.replace('alexandrea',' alexandria ')
word = word.replace('bicycle',' bicycle ')
word = word.replace('non',' non ')
word = word.replace('replacement',' replacement')
word = word.replace('mowerectrical', 'mow electrical')
word = word.replace('dishwaaher', 'dishwasher')
word = word.replace('fairfield',' fairfield ')
word = word.replace('hooverwindtunnel','hoover windtunnel')
word = word.replace('airconditionerwith','airconditioner with ')
word = word.replace('pfistersaxton', 'pfister saxton')
word = word.replace('eglimgton','ellington')
word = word.replace('chrome', ' chrome ')
word = word.replace('foot', ' foot ')
word = word.replace('samsung', ' samsung ')
word = word.replace('galvanised', ' galvanised ')
word = word.replace('exhaust', ' exhaust ')
word = word.replace('reprobramable', 'reprogramable')
word = word.replace('rackcloset', 'rack closet ')
word = word.replace('hamptonbay', ' hampton bay ')
word = word.replace('cadet', ' cadet ')
word = word.replace('weatherstripping', 'weather stripping')
word = word.replace('poyurethane', 'polyurethane')
word = word.replace('refrigeratorators','refrigerator')
word = word.replace('baxksplash','backsplash')
word = word.replace('inches',' inch ')
word = word.replace('conditioner',' conditioner ')
word = word.replace('landscasping',' landscaping ')
word = word.replace('discontinuedbrown',' discontinued brown ')
word = word.replace('drywall',' drywall ')
word = word.replace('carpet', ' carpet ')
word = word.replace('less', ' less ')
word = word.replace('tub', ' tub')
word = word.replace('tubs', ' tub ')
word = word.replace('marble',' marble ')
word = word.replace('replaclacemt',' replacement ')
word = word.replace('non',' non ')
word = word.replace('soundfroofing', 'sound proofing')
return word
def str_stem(s):
if isinstance(s, str):
s = s.lower()
s = DeepClean(s)
s = re.sub(r"(\w)\.([A-Z])", r"\1 \2", s)
s = re.sub(r"([0-9]+)( *)(inches|inch|in|')\.?", r"\1in. ", s)
s = re.sub(r"([0-9]+)( *)(foot|feet|ft|'')\.?", r"\1ft. ", s)
s = re.sub(r"([0-9]+)( *)(pounds|pound|lbs|lb)\.?", r"\1lb. ", s)
s = s.replace(" x "," xby ")
s = s.replace("*"," xby ")
s = s.replace(" by "," xby")
s = s.replace("x0"," xby 0")
s = s.replace("x1"," xby 1")
s = s.replace("x2"," xby 2")
s = s.replace("x3"," xby 3")
s = s.replace("x4"," xby 4")
s = s.replace("x5"," xby 5")
s = s.replace("x6"," xby 6")
s = s.replace("x7"," xby 7")
s = s.replace("x8"," xby 8")
s = s.replace("x9"," xby 9")
s = s.replace("0x","0 xby ")
s = s.replace("1x","1 xby ")
s = s.replace("2x","2 xby ")
s = s.replace("3x","3 xby ")
s = s.replace("4x","4 xby ")
s = s.replace("5x","5 xby ")
s = s.replace("6x","6 xby ")
s = s.replace("7x","7 xby ")
s = s.replace("8x","8 xby ")
s = s.replace("9x","9 xby ")
s = re.sub(r"([0-9]+)( *)(square|sq) ?\.?(feet|foot|ft)\.?", r"\1sq.ft. ", s)
s = re.sub(r"([0-9]+)( *)(gallons|gallon|gal)\.?", r"\1gal. ", s)
s = re.sub(r"([0-9]+)( *)(ounces|ounce|oz)\.?", r"\1oz. ", s)
s = re.sub(r"([0-9]+)( *)(centimeters|cm)\.?", r"\1cm. ", s)
s = re.sub(r"([0-9]+)( *)(milimeters|mm)\.?", r"\1mm. ", s)
s = re.sub(r"([0-9]+)( *)(degrees|degree)\.?", r"\1deg. ", s)
s = re.sub(r"([0-9]+)( *)(volts|volt)\.?", r"\1volt. ", s)
s = re.sub(r"([0-9]+)( *)(watts|watt)\.?", r"\1watt. ", s)
s = re.sub(r"([0-9]+)( *)(amperes|ampere|amps|amp)\.?", r"\1amp. ", s)
s = s.replace("whirpool","whirlpool")
s = s.replace("whirlpoolga", "whirlpool")
s = s.replace("whirlpoolstainless","whirlpool stainless")
s = s.replace(" "," ")
s = (" ").join([stemmer.stem(z) for z in s.split(" ")])
if s == '':
s = 'null'
return s.lower()
else:
return "null"
def str_common_word(str1, str2):
words, cnt = str1.split(), 0
for word in words:
if str2.find(word)>=0:
cnt+=1
return cnt
def str_whole_word(str1, str2, i_):
cnt = 0
while i_ < len(str2):
i_ = str2.find(str1, i_)
if i_ == -1:
return cnt
else:
cnt += 1
i_ += len(str1)
return cnt
def jaccard(a, b):
a = set(a.split())
b = set(b.split())
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
class cust_regression_vals(BaseEstimator, TransformerMixin):
def fit(self, x, y=None):
return self
def transform(self, hd_searches):
d_col_drops=['id','relevance','search_term','product_title','product_description','attr','brand','Synonym',\
'material','color']
hd_searches = hd_searches.drop(d_col_drops,axis=1).values
return hd_searches
class cust_txt_col(BaseEstimator, TransformerMixin):
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key].apply(str)
def fmean_squared_error(ground_truth, predictions):
fmean_squared_error_ = mean_squared_error(ground_truth, predictions)**0.5
return fmean_squared_error_
RMSE = make_scorer(fmean_squared_error, greater_is_better=False)
| mit |
bzero/networkx | examples/multigraph/chess_masters.py | 54 | 5146 | #!/usr/bin/env python
"""
An example of the MultiDiGraph clas
The function chess_pgn_graph reads a collection of chess
matches stored in the specified PGN file
(PGN ="Portable Game Notation")
Here the (compressed) default file ---
chess_masters_WCC.pgn.bz2 ---
contains all 685 World Chess Championship matches
from 1886 - 1985.
(data from http://chessproblem.my-free-games.com/chess/games/Download-PGN.php)
The chess_pgn_graph() function returns a MultiDiGraph
with multiple edges. Each node is
the last name of a chess master. Each edge is directed
from white to black and contains selected game info.
The key statement in chess_pgn_graph below is
G.add_edge(white, black, game_info)
where game_info is a dict describing each game.
"""
# Copyright (C) 2006-2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
# tag names specifying what game info should be
# stored in the dict on each digraph edge
game_details=["Event",
"Date",
"Result",
"ECO",
"Site"]
def chess_pgn_graph(pgn_file="chess_masters_WCC.pgn.bz2"):
"""Read chess games in pgn format in pgn_file.
Filenames ending in .gz or .bz2 will be uncompressed.
Return the MultiDiGraph of players connected by a chess game.
Edges contain game data in a dict.
"""
import bz2
G=nx.MultiDiGraph()
game={}
datafile = bz2.BZ2File(pgn_file)
lines = (line.decode().rstrip('\r\n') for line in datafile)
for line in lines:
if line.startswith('['):
tag,value=line[1:-1].split(' ',1)
game[str(tag)]=value.strip('"')
else:
# empty line after tag set indicates
# we finished reading game info
if game:
white=game.pop('White')
black=game.pop('Black')
G.add_edge(white, black, **game)
game={}
return G
if __name__ == '__main__':
import networkx as nx
G=chess_pgn_graph()
ngames=G.number_of_edges()
nplayers=G.number_of_nodes()
print("Loaded %d chess games between %d players\n"\
% (ngames,nplayers))
# identify connected components
# of the undirected version
Gcc=list(nx.connected_component_subgraphs(G.to_undirected()))
if len(Gcc)>1:
print("Note the disconnected component consisting of:")
print(Gcc[1].nodes())
# find all games with B97 opening (as described in ECO)
openings=set([game_info['ECO']
for (white,black,game_info) in G.edges(data=True)])
print("\nFrom a total of %d different openings,"%len(openings))
print('the following games used the Sicilian opening')
print('with the Najdorff 7...Qb6 "Poisoned Pawn" variation.\n')
for (white,black,game_info) in G.edges(data=True):
if game_info['ECO']=='B97':
print(white,"vs",black)
for k,v in game_info.items():
print(" ",k,": ",v)
print("\n")
try:
import matplotlib.pyplot as plt
except ImportError:
import sys
print("Matplotlib needed for drawing. Skipping")
sys.exit(0)
# make new undirected graph H without multi-edges
H=nx.Graph(G)
# edge width is proportional number of games played
edgewidth=[]
for (u,v,d) in H.edges(data=True):
edgewidth.append(len(G.get_edge_data(u,v)))
# node size is proportional to number of games won
wins=dict.fromkeys(G.nodes(),0.0)
for (u,v,d) in G.edges(data=True):
r=d['Result'].split('-')
if r[0]=='1':
wins[u]+=1.0
elif r[0]=='1/2':
wins[u]+=0.5
wins[v]+=0.5
else:
wins[v]+=1.0
try:
pos=nx.graphviz_layout(H)
except:
pos=nx.spring_layout(H,iterations=20)
plt.rcParams['text.usetex'] = False
plt.figure(figsize=(8,8))
nx.draw_networkx_edges(H,pos,alpha=0.3,width=edgewidth, edge_color='m')
nodesize=[wins[v]*50 for v in H]
nx.draw_networkx_nodes(H,pos,node_size=nodesize,node_color='w',alpha=0.4)
nx.draw_networkx_edges(H,pos,alpha=0.4,node_size=0,width=1,edge_color='k')
nx.draw_networkx_labels(H,pos,fontsize=14)
font = {'fontname' : 'Helvetica',
'color' : 'k',
'fontweight' : 'bold',
'fontsize' : 14}
plt.title("World Chess Championship Games: 1886 - 1985", font)
# change font and write text (using data coordinates)
font = {'fontname' : 'Helvetica',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 14}
plt.text(0.5, 0.97, "edge width = # games played",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.text(0.5, 0.94, "node size = # games won",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.axis('off')
plt.savefig("chess_masters.png",dpi=75)
print("Wrote chess_masters.png")
plt.show() # display
| bsd-3-clause |
joernhees/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 63 | 3231 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
class_names = iris.target_names
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
shikhardb/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
youngmp/park_and_ermentrout_2016 | trb2simple_dat/fourier_approx.py | 2 | 4130 | """
Plot data and compute Fourier coefficients
Youngmin Park
[email protected]
(Thank you Mario for the help and inspiration)
TODO:
1. Let user choose coefficients up to x% of the L2 norm
2. Add code to write coefficients to file
"""
try:
import matplotlib.pylab as mp
matplotlib_module = True
except ImportError:
print "You must have matplotlib installed to generate plots"
matplotlib_module = False
try:
import numpy as np
#np.random.seed(0)
numpy_module = True
except ImportError:
print "You must have numpy installed to run this script"
numpy_module = False
try:
from scipy.optimize import brentq
scipy_module = True
except ImportError:
print "You must have numpy installed to run this script"
scipy_module = False
def manual_ift(bc,freqc,idxc,N):
"""
manual inverse fourier transform
bc: nonzero output from np.fft.fft
ffreqc: corresponding freq. component
idxc: corresponding index of freq. component
"""
# define domain
n = np.linspace(0,N-1,N)
tot = 0
c = 0 # counter
# for select k, compute value at each n
for k in idxc:
tot += np.real(bc[c])*np.cos(k*2*np.pi*n/N) - np.imag(bc[c])*np.sin(k*2*np.pi*n/N)
c += 1
return tot
def manual_ift_sin(bc,freqc,idxc,N):
"""
manual inverse fourier transform for sine terms only
bc: nonzero output from np.fft.fft
ffreqc: corresponding freq. component
idxc: corresponding index of freq. component
"""
# define domain
n = np.linspace(0,N-1,N)
tot = 0
c = 0 # counter
# for select k, compute value at each n
for k in idxc:
#print np.imag(bc[c])
tot += np.imag(bc[c])*np.sin(k*2*np.pi*n/N)
#print k,np.imag(bc[c]),'sin'
c += 1
return tot
def manual_ift_cos(bc,freqc,idxc,N):
"""
manual inverse fourier transform for cosine terms only
bc: nonzero output from np.fft.fft
ffreqc: corresponding freq. component
idxc: corresponding index of freq. component
"""
# define domain
n = np.linspace(0,N-1,N)
tot = 0
c = 0 # counter
# for select k, compute value at each n
for k in idxc:
tot += np.real(bc[c])*np.cos(k*2*np.pi*n/N)
#print k,np.real(bc[c]),'cos'
c += 1
return tot
def amp_cutoff(x,n,fcoeff):
# goal: find ideal x s.t. sum(coeff_array_idx) = n
"""
fcoeff: output from np.fft.fft
x: cutoff for magnitude of fourier coefficient
n: desired number of fourier coefficients
"""
coeff_array_idx = np.absolute(fcoeff) > x
return sum(coeff_array_idx) - n
def main():
# load/define data
dat = np.loadtxt("fourier/hfun.gm_0.5.dat")
dat = dat[:,1]
#dat2 = np.sin(np.linspace(0,10,N))
#dom = np.linspace(0,1000,1000)
N = len(dat)
dom = np.linspace(0,N,N)
#print np.shape(dat), np.shape(dat2)
# get Fourier transform and frequencies
fcoeff = np.fft.fft(dat)
ffreq = np.fft.fftfreq(dat.size)
# find cutoff x for desired number of coefficients
n = 5 # desired # of coefficients
x = brentq(amp_cutoff,0,np.amax(np.abs(fcoeff)),args=(n,fcoeff))
# array corresponding to desired coefficients
coeff_array_idx = np.absolute(fcoeff) > x
# build list of desired coefficients
b = fcoeff*coeff_array_idx
# extract corresponding frequencies
freq = ffreq*coeff_array_idx
# build lits of only desired coeff & freq
bc = fcoeff[coeff_array_idx]/N
freqc = ffreq[coeff_array_idx]
idxc = np.nonzero(coeff_array_idx)[0]
#print bc
# come back to time domain
c = np.fft.ifft(b)
# or
c2 = manual_ift(bc,freqc,idxc,N)
# for sine/cosine component only:
c3 = manual_ift_sin(bc,freqc,idxc,N)
c4 = manual_ift_cos(bc,freqc,idxc,N)
# add option to write coefficients to file
if True:
pass
if matplotlib_module:
mp.figure()
mp.plot(dat)
mp.plot(c4-c3)
#mp.plot(c)
#mp.plot(c2)
#mp.plot(c3)
#mp.plot(c4)
mp.show()
if __name__ == "__main__":
main()
| bsd-2-clause |
martinjrobins/paper_crowding | src/parameter_sweep.py | 1 | 5811 | #!/usr/bin/python
import os
import subprocess
from __builtin__ import range
import threading
from math import pi
import numpy
import csv
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import mayavi.mlab as mlab
k_b = 1.3806488e-23
diameter = 5e-9
T = 300.0
viscosity = 8.9e-4
D = k_b*T/(3.0*pi*viscosity*diameter);
time = 1e-6
nout = 500
run_dir = "/scratch/robinsonm/git/paper_crowding"
#run_dir = "/home/mrobins/git/paper_crowding"
data_dir = "/mi/share/scratch/robinsonm/data/crowding/self_crowding"
#data_dir = "/home/mrobins/tmp"
def run(k_s, sl_div_diam, vol_ratio):
new_dir = str(k_s)+"_"+str(sl_div_diam)+"_"+str(vol_ratio)
print "creating subdir ",new_dir
if not os.path.exists(data_dir + "/" + new_dir):
os.makedirs(data_dir + "/" + new_dir)
subprocess.call([run_dir+"/self_crowding",data_dir + "/" + new_dir,
str(time),str(nout),str(k_s),str(sl_div_diam),str(vol_ratio)])
def run_sweep():
k_s_sweep = [10**(x-5) for x in range(10)]
print "k_s = ",k_s_sweep
sl_div_diam_sweep = [5.0*(x+1)/1000 for x in range(20)]
print "sl_div_diam = ",sl_div_diam_sweep
vol_ratio_sweep = [5.0*(x+1)/100 for x in range(10)]
print "vol_ratio = ",vol_ratio_sweep
params = []
for k_s in k_s_sweep:
for sl_div_diam in sl_div_diam_sweep:
for vol_ratio in vol_ratio_sweep:
params.append((k_s,sl_div_diam,vol_ratio))
n_cpu = 4
num_its = int(len(params)/n_cpu)
for i in range(num_its):
threads = [threading.Thread(target = run, args = params[i*n_cpu+j]) for j in range(n_cpu)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def calc_average_rdf(k_s, sl_div_diam, vol_ratio):
subdir = str(k_s)+"_"+str(sl_div_diam)+"_"+str(vol_ratio)
dir = data_dir + "/" + subdir
print "reducing dir ",dir
data = numpy.zeros((100,2,400))
for i in range(100,500):
data[:,:,i-100] = numpy.loadtxt(dir+ "/rdf%05d.csv"%i, delimiter=',', usecols = (0,1))
return numpy.average(data, 2)
def reduce(k_s, sl_div_diam, vol_ratio):
subdir = str(k_s)+"_"+str(sl_div_diam)+"_"+str(vol_ratio)
dir = data_dir + "/" + subdir
print "reducing dir ",dir
data = numpy.loadtxt(dir+ "/" + 'msd.csv', delimiter=',')
valid_range = data[20:,:]
t = valid_range[:,1]
msd = valid_range[:,2]
#flux = valid_range[:,3]
A = numpy.array([t,numpy.ones(len(t))])
msd_fit = numpy.linalg.lstsq(A.T, msd)[0]
#flux_fit = numpy.linalg.lstsq(A.T, flux)[0]
rdf = numpy.zeros((100,2,400))
for i in range(100,500):
rdf[:,:,i-100] = numpy.loadtxt(dir+ "/rdf%05d.csv"%i, delimiter=',', usecols = (0,1))
numpy.savetxt(dir+ "/rdf_average.csv", numpy.average(rdf,2), delimiter=',')
return msd_fit[0]/6.0
def reduce_sweep():
k_s_sweep = [10**(x-5) for x in range(10)]
print "k_s = ",k_s_sweep
sl_div_diam_sweep = [5.0*(x+1)/1000 for x in range(20)]
print "sl_div_diam = ",sl_div_diam_sweep
vol_ratio_sweep = [5.0*(x+1)/100 for x in range(10)]
print "vol_ratio = ",vol_ratio_sweep
params = []
Dmsd = numpy.zeros((10,20,10))
with open(data_dir + "/D_row.csv", 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='#')
for k_s,i in zip(k_s_sweep,range(10)):
for sl_div_diam,j in zip(sl_div_diam_sweep,range(20)):
for vol_ratio,k in zip(vol_ratio_sweep,range(10)):
Dmsd[i,j,k] = reduce(k_s,sl_div_diam,vol_ratio)
writer.writerow((k_s,sl_div_diam,vol_ratio,Dmsd[i,j,k]))
numpy.save(data_dir + "/D_numpy.npy", Dmsd)
def showDsweep():
k_s_sweep = [10**(x-5) for x in range(10)]
sl_div_diam_sweep = [5.0*(x+1)/1000 for x in range(20)]
vol_ratio_sweep = [5.0*(x+1)/100 for x in range(10)]
Dmsd = numpy.load(data_dir + "/D_numpy.npy")
kDa = 1.660538921e-30;
mass = 40.0*kDa;
viscosity = 8.9e-4;
diameter = 5e-9;
T = 300.0;
Dbase = k_b*T/(3.0*numpy.pi*viscosity*diameter);
Dmsd = Dmsd/Dbase
mlab.figure(1, size=(800, 800), fgcolor=(1, 1, 1),
bgcolor=(0.5, 0.5, 0.5))
mlab.clf()
contours = numpy.arange(0.01,2,0.2).tolist()
obj = mlab.contour3d(Dmsd,contours=contours,transparent=True,vmin=contours[0],vmax=contours[-1])
outline = mlab.outline(color=(.7, .7, .7),extent=(0,10,0,20,0,10))
axes = mlab.axes(outline, color=(.7, .7, .7),
nb_labels = 5,
ranges=(k_s_sweep[0], k_s_sweep[-1], sl_div_diam_sweep[0], sl_div_diam_sweep[-1], vol_ratio_sweep[0], vol_ratio_sweep[-1]),
xlabel='spring stiffness',
ylabel='step length',
zlabel='volume ratio')
mlab.colorbar(obj,title='D',nb_labels=5)
mlab.show()
def plots():
k_s_sweep = [10**(x-5) for x in range(10)]
sl_div_diam_sweep = [5.0*(x+1)/1000 for x in range(20)]
sl_div_diam_sweep.reverse()
vol_ratio = 0.3
plt.figure(figsize=(6,4.5))
for k_s,i in zip(k_s_sweep,range(10)):
plt.clf()
for sl_div_diam,j in zip(sl_div_diam_sweep,range(20)):
subdir = str(k_s)+"_"+str(sl_div_diam)+"_"+str(vol_ratio)
dir = data_dir + "/" + subdir
rdf = numpy.loadtxt(dir+ "/rdf_average.csv", delimiter=',', usecols = (0,1))
plt.plot(rdf[:,0],rdf[:,1],label="step = %f"%(sl_div_diam),color=(1-j/20.0, 1-j/20.0, 1.0))
plt.xlabel('$r$')
plt.ylabel('RDF')
print "saving file /rdf_%f.pdf"%(k_s)
plt.savefig(data_dir + "/rdf_%f.pdf"%(k_s))
if __name__ == '__main__':
#reduce_sweep()
plots()
| gpl-2.0 |
goerz/mgplottools | mgplottools/mpl.py | 1 | 16768 | """
Support routines for matplotlib plotting.
The module also contains a standard palette of colors (`colors` module
dictionary) and line styles (`ls` module dictionary).
For the colors, it is recommended to set up the color cycle by hand in your
matplotlibrc file. Alternatively, you can call
>>> mgplottools.mpl.set_color_cycle()
in order to enforce the use of the module color palette at runtime.
For the linestyles (which are encoded as "dashes"), you may use a cyclic
iterator while plotting:
>>> ls_cycle = mgplottools.mpl.new_ls_cycle()
>>> ax.plot(linspace(0, 10, 100), linspace(0, 10, 100),
>>> dashes=next(ls_cycle))
You must create a new cycle object each time your want to restart the cycle
(e.g. for a new panel)
"""
from __future__ import print_function, absolute_import
from itertools import cycle
import matplotlib
import matplotlib.figure
import numpy as np
import os
from matplotlib.ticker import AutoMinorLocator, FormatStrFormatter
cm2inch = 0.39370079
# colors
colors = {
"white" : (255, 255, 255), #ffffff
"black" : (0, 0, 0), #000000
"blue" : (55, 126, 184), #377eb8
"orange" : (255, 127, 0), #ff7f00
"red" : (228, 26, 28), #e41a1c
"green" : (77, 175, 74), #4daf4a
"purple" : (152, 78, 163), #984ea3
"brown" : (166, 86, 40), #a65628
"pink" : (247, 129, 191), #f781bf
"yellow" : (210, 210, 21), #d2d215
"lightred" : (251, 154, 153), #fb9a99
"lightblue" : (166, 206, 227), #a6cee3
"lightorange" : (253, 191, 111), #fdbf6f
"lightgreen" : (178, 223, 138), #b2df8a
"lightpurple" : (202, 178, 214), #cab2d6
"grey" : (153, 153, 153), #999999
}
def get_color(name, alpha=0.0, format='web'):
"""
Return color for the given color name, depending on `format`.
If format is 'rgb', return (r,g,b) tuple ( values in [0,1) )
If format is 'rgba', return (r,g,b,a) tuple ( values in [0,1) ), where a is
the given alpha value
If format is 'web', return rgb hex string (with '#' prefix)
"""
r, g, b = colors[name.lower()]
if format == 'web':
return "#%02x%02x%02x" % (r, g, b)
if format == 'rgb':
return (r, g, b)
elif format == 'rgba':
return (r, g, b, alpha)
def set_color_cycle(color_cycle=None):
"""
Set the automatic matplotlib color cycle to the given array of color names.
`color_cycle` must be an array of color names that appear in the module
`color` dictionary. It defaults to
["blue", "orange", "red", "green", "purple", "brown", "pink", "yellow",
"lightred", "lightblue", "lightorange", "lightgreen", "lightpurple"]
"""
if color_cycle is None:
color_cycle = ["blue", "orange", "red", "green", "purple",
"brown", "pink", "yellow", "lightred", "lightblue", "lightorange",
"lightgreen", "lightpurple"]
matplotlib.rc('axes',
color_cycle=[get_color(cname) for cname in color_cycle])
# line styles
ls = {
"solid" : (None, None),
"dashed" : (4,1.5),
"long-dashed" : (8,1),
"double-dashed" : (3,1,3,2.5),
"dash-dotted" : (5,1,1,1),
"dot-dot-dashed" : (1,1,1,1,7,1),
"dash-dash-dotted" : (4,1,4,1,1,1),
"dotted" : (1,1),
"double-dotted" : (1,1,1,3),
}
def new_ls_cycle(ls_cycle=None):
"""
Return a cyclic iterator of linestyles (dashes)
ls_cycle must be an array of line style names that appear in the module
`ls` dictionary. It defaults to
["solid", "dashed", "long-dashed", "dash-dotted", "dash-dash-dotted",
"dot-dot-dashed", "double-dashed", "dotted", "double-dotted"]
Use as:
>>> ls_cycle = mgplottools.mpl.new_ls_cycle()
>>> ax.plot(linspace(0, 10, 100), linspace(0, 10, 100),
>>> dashes=next(ls_cycle))
"""
if ls_cycle is None:
ls_cycle = ["solid", "dashed", "long-dashed", "dash-dotted",
"dash-dash-dotted", "dot-dot-dashed", "double-dashed",
"dotted", "double-dotted"]
return cycle([ls[l] for l in ls_cycle])
# utilities
def new_figure(fig_width, fig_height, size_in_cm=True, style=None,
no_backend=False, quiet=False, **kwargs):
"""
Return a new matplotlib figure of the specified size (in cm by default)
Information about the matplotlib backend, settings and the figure will be
printed on STDOUT, unless `quiet=True` is given.
The remaining kwargs are passed to the Figure init routine
Arguments
---------
fig_width: float
total width of figure canvas, in cm (or inches if `size_in_cm=False`
fig_height: float
total height of figure canvas, in cm (or inches if `size_in_cm=False`
size_in_cm: boolean, optional
give as False to indicate that `fig_width` and `fig_height` are in
inches instead of cm
style: string or array of strings, optional
A style file to overwrite or ammend the matplotlibrc file. For
matplotlib version >=1.4, the style sheet feature will be used, see
<http://matplotlib.org/users/style_sheets.html>
In older versions of matplotlib, `style` must a filename or URL string;
the contents of the file will be merged with the matplotlibrc settings
no_backend: boolean, optional
If given as True, skip the use of the pyplot entirely, creating the
figure in a purely object-oriented way.
quiet: boolean, optional
Notes
-----
You may use the figure as follows, assuming the pyplot is used
(`no_backend=False`)
>>> import matplotlib
>>> matplotlib.use('PDF') # backend ('PDF' for pdf, 'Agg' for png)
>>> fig = mgplottools.mpl.new_figure(10, 4)
>>> pos = [0.05, 0.05, 0.9, 0.9] # left, bottom offset, width, height
>>> ax = fig.add_axes(pos)
>>> ax.plot(linspace(0, 10, 100), linspace(0, 10, 100))
>>> fig.savefig('out.pdf', format='pdf')
Alternatively, for a GUI backend, instead of `fig.savefig()`, you can
display all created figures using `fig.show()` -- it is set up as an alias
to matplotlib.pyplot.show().
If you want to do any interactive plotting in ipython (i.e. manipulating
the plot after its creation), make sure to load the %matplotlib or %pylab
magic functions. Also, you must use pyplot
>>> import matplotlib.pyplot as plt
Do not use `plt.ion()`, which does not work in ipython.
Simply create a figure, then call `plt.show()` and `plt.draw()`
If not using a backend (`no_backend=True`, bypassing the pyplot state
machine), you must create the canvas manually. Consider using the
`show_fig`, `write_pdf`, `write_eps`, and `write_png` routines
"""
if (no_backend):
from matplotlib.figure import Figure as figure
backend = "N/A"
using_pyplot = False
else:
using_pyplot = True
from matplotlib.pyplot import figure
backend = matplotlib.get_backend().lower()
if not quiet:
print("Using backend: %s" % backend)
print("Using maplotlibrc: %s" % matplotlib.matplotlib_fname())
if style is not None:
try:
import matplotlib.style as mpl_style
mpl_style.use(style)
if not quiet:
print("Using style: %s" % style)
except ImportError:
if not quiet:
print("The style package was added to matplotlib in version " \
"1.4. It is not available in your release.\n")
print("Using fall-back implementation")
try:
from matplotlib import rc_params_from_file
rc = rc_params_from_file(style, use_default_template=False)
matplotlib.rcParams.update(rc)
except:
print("Style '%s' not found" % style)
except ValueError as e:
print("Error loading style %s: %s" % (style, e))
if size_in_cm:
if not quiet:
print("Figure height: %s cm" % fig_height)
print("Figure width : %s cm" % fig_width)
fig = figure(figsize=(fig_width*cm2inch, fig_height*cm2inch),
**kwargs)
else:
if not quiet:
print("Figure height: %s cm" % (fig_height / cm2inch))
print("Figure width : %s cm" % (fig_width / cm2inch))
fig = figure(figsize=(fig_width, fig_height), **kwargs)
if using_pyplot:
# replace fig.show() with matplotlib.pyplot.show()
from matplotlib.pyplot import show
fig.show = show
return fig
def set_axis(ax, which_axis, start, stop, step=None, range=None, minor=0,
format=None, label=None, labelpad=None, tickpad=None,
label_coords=None, ticklabels=None, logscale=False,
drop_ticklabels=None):
"""
Format the x or y axis of the given axes object
Parameters
----------
ax: instance of matplotlib.axes.Axes
Axes instance in which to set the x or y axis
which_axis: str
Either 'x', or 'y'
start: float
value for first tick on the axis (and start of axis, unless range is
given
stop: float
value for last tick on the axis (and stop of axis, unless range is
given)
step: float
step between major ticks. If not given, use automatic ticks. Must not
be given if logscale=True
range: tuple
The minimum and maximum value of the axis. If not given, [start, stop]
minor:
Number of subdivisions of the interval between major ticks; e.g.,
minor=2 will place a single minor tick midway between major ticks.
format: str
Format string to use for tick labels. Will be chosen automatically if
not given
label: str
Axis-label
labelpad: float
spacing in points between the label and the axis. Use label_coords for
more control
tickpad: float
spacing in points between the ticklabel and the axes.
label_coords: tuple (x,y)
exact position of the axis label, in the axes relative coordinate
system
ticklabels: array of strings, boolean
If given, labels for the major tick marks. Alternatively, if given as a
boolean value "False", suppress labels
logscale: set the given axis to use log scale
drop_ticklabels: list of tick label indices to make invisible. Note that
for logscale plots, there may be spurious tick labels, so some trial
and error is required.
"""
if which_axis == 'x':
axis = ax.xaxis
if logscale:
ax.set_xscale('log')
elif which_axis == 'y':
axis = ax.yaxis
if logscale:
ax.set_yscale('log')
else:
raise ValueError('which_axis must be either "x", or "y"')
if step is not None:
if not logscale:
axis.set_ticks(np.arange(float(start),
float(stop) + float(step)/2.0,
float(step)))
else:
raise ValueError('step must not be given in conjuction with '
'logscale=True')
if format is not None:
majorFormatter = FormatStrFormatter(format)
axis.set_major_formatter(majorFormatter)
if minor > 0:
minorLocator = AutoMinorLocator(minor)
axis.set_minor_locator(minorLocator)
if range is None:
range = [start, stop]
if which_axis == 'x':
ax.set_xlim(range)
if label is not None:
ax.set_xlabel(label, labelpad=labelpad)
if ticklabels is not None:
try:
ax.set_xticklabels([str(v) for v in ticklabels])
except TypeError:
if not ticklabels:
ax.set_xticklabels([])
elif which_axis == 'y':
ax.set_ylim(range)
if label is not None:
ax.set_ylabel(label, labelpad=labelpad)
if ticklabels is not None:
try:
ax.set_yticklabels([str(v) for v in ticklabels])
except TypeError:
if not ticklabels:
ax.set_yticklabels([])
if label_coords is not None:
axis.set_label_coords(*label_coords)
if drop_ticklabels is not None:
labels = axis.get_ticklabels()
for index in drop_ticklabels:
labels[index].set_visible(False)
if tickpad is not None:
ax.tick_params(axis=which_axis, pad=tickpad)
def show_fig(fig):
"""
Display the given figure in a custom Qt4 window. This is independent from
the pyplot framework and thus works for figures that were created with
`no_backend=True`.
However, after calling this routine, an existing pyplot backend will be
permanently changed and may be dysfunctional.
Since interactively showing the figure can change its size, we attempt to
restore the original size on exit from the routine.
"""
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QMainWindow, QWidget, QVBoxLayout, QApplication
# TODO: make interactive
# http://stackoverflow.com/questions/22729206/matplotlib-show-figure-again
#
# See
# https://github.com/eliben/code-for-blog/blob/master/2009/qt_mpl_bars.py
# for further examples of GUI programming
class AppForm(QMainWindow):
def __init__(self, fig):
parent = None
QMainWindow.__init__(self, parent)
self.create_main_frame(fig)
self.on_draw()
def create_main_frame(self, fig):
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
self.main_frame = QWidget()
self.fig = fig
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
self.canvas.setFocusPolicy(Qt.StrongFocus)
self.canvas.setFocus()
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
self.canvas.mpl_connect('key_press_event', self.on_key_press)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas) # the matplotlib canvas
vbox.addWidget(self.mpl_toolbar)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def on_draw(self):
self.canvas.draw()
def on_key_press(self, event):
from matplotlib.backend_bases import key_press_handler
print('you pressed', event.key)
# implement the default mpl key press events described at
# http://matplotlib.org/users/navigation_toolbar.html
key_press_handler(event, self.canvas, self.mpl_toolbar)
w = fig.get_figwidth()
h = fig.get_figheight()
app = QApplication([])
form = AppForm(fig)
form.show()
app.exec_()
fig.set_figwidth(w)
fig.set_figheight(h)
def write_pdf(fig, outfile, dpi=72):
"""
Write a pdf of the given figure, indendent of the pyplot backend.
However, if the figure was created from pyplot, an existing pyplot backend
will be permanently changed and may be dysfunctional.
"""
from matplotlib.backends.backend_pdf \
import FigureCanvasPdf as FigureCanvas
canvas = FigureCanvas(fig)
canvas.print_figure(outfile, dpi=dpi)
def write_png(fig, outfile, dpi=72):
"""
Write a png of the given figure, indendent of the pyplot backend.
However, if the figure was created from pyplot, an existing pyplot backend
will be permanently changed and may be dysfunctional.
"""
from matplotlib.backends.backend_agg \
import FigureCanvasAgg as FigureCanvas
canvas = FigureCanvas(fig)
canvas.print_figure(outfile, dpi=dpi)
def write_eps(fig, outfile, dpi=72):
"""
Write a eps of the given figure, indendent of the pyplot backend.
However, if the figure was created from pyplot, an existing pyplot backend
will be permanently changed and may be dysfunctional.
"""
from matplotlib.backends.backend_ps \
import FigureCanvasPS as FigureCanvas
canvas = FigureCanvas(fig)
canvas.print_figure(outfile, dpi=dpi)
def write_figure(fig, outfile, dpi=72):
"""
Write out a figure to the given outfile, either in pdf, eps, or png format
depending on the extension of outfile. This works independently of the
pyplot backend; however, it may disable any existing pyplot backend.
"""
format = os.path.splitext(outfile)[1][1:].lower()
writer = {
'pdf': write_pdf,
'eps': write_eps,
'png': write_eps
}
writer[format](fig, outfile, dpi)
| gpl-3.0 |
raincoatrun/basemap | examples/hires.py | 4 | 1615 | from __future__ import print_function
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import pickle, time
# create figure with aqua background (will be oceans)
fig = plt.figure()
# create Basemap instance. Use 'high' resolution coastlines.
t1 = time.clock()
#m = Basemap(llcrnrlon=-10.5,llcrnrlat=49.5,urcrnrlon=3.5,urcrnrlat=59.5,
# resolution='h',projection='tmerc',lon_0=-4,lat_0=0)
m = Basemap(width=920000,height=1100000,
resolution='f',projection='tmerc',lon_0=-4.2,lat_0=54.6)
# make sure countries and rivers are loaded
m.drawcountries()
m.drawrivers()
print(time.clock()-t1,' secs to create original Basemap instance')
# pickle the class instance.
pickle.dump(m,open('map.pickle','wb'),-1)
# clear the figure
plt.clf()
# read pickle back in and plot it again (should be much faster).
t1 = time.clock()
m2 = pickle.load(open('map.pickle','rb'))
# draw coastlines and fill continents.
m.drawcoastlines()
# fill continents and lakes
m.fillcontinents(color='coral',lake_color='aqua')
# draw political boundaries.
m.drawcountries(linewidth=1)
# fill map projection region light blue (this will
# paint ocean areas same color as lakes).
m.drawmapboundary(fill_color='aqua')
# draw major rivers.
m.drawrivers(color='b')
print(time.clock()-t1,' secs to plot using using a pickled Basemap instance')
# draw parallels
circles = np.arange(48,65,2).tolist()
m.drawparallels(circles,labels=[1,1,0,0])
# draw meridians
meridians = np.arange(-12,13,2)
m.drawmeridians(meridians,labels=[0,0,1,1])
plt.title("High-Res British Isles",y=1.04)
plt.show()
| gpl-2.0 |
dannyjacobs/PRISim | main/interferometer_array_theory_units_figures_producer.py | 1 | 233172 | import numpy as NP
import copy
from astropy.io import fits
from astropy.io import ascii
from astropy import coordinates as coord
from astropy.coordinates import Galactic, FK5
from astropy import units
import astropy.cosmology as CP
import scipy.constants as FCNST
from scipy import interpolate
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import matplotlib.cm as CM
from matplotlib.ticker import FuncFormatter
import healpy as HP
from mwapy.pb import primary_beam as MWAPB
import geometry as GEOM
import interferometry as RI
import catalog as SM
import constants as CNST
import my_DSP_modules as DSP
import my_operations as OPS
import primary_beams as PB
import baseline_delay_horizon as DLY
import lookup_operations as LKP
import ipdb as PDB
# 01) Plot pointings information
# 02) Plot power patterns for snapshots
# 03) Plot foreground models with power pattern contours for snapshots
# 04) Plot delay maps on sky for baselines of different orientations
# 05) Plot FHD data and simulations on all baselines combined
# 06) Plot FHD data to simulation ratio on all baselines combined
# 07) Plot uncertainties in FHD data to simulation ratio on all baselines combined
# 08) Plot ratio of differences between FHD data and simulation to expected error on all baselines combined
# 09) Plot histogram of fractional differences between FHD data and simulation
# 10) Plot noiseless delay spectra from simulations for diffuse, compact and all-sky models
# 11) Plot noiseless delay spectra for all sky models broken down by baseline orientation
# 12) Plot delay spectra on northward and eastward baselines along with delay maps and sky models (with and without power pattern contours)
# 13) Plot EoR window foreground contamination when baselines are selectively removed
# 14) Plot delay spectra before and after baselines are selectively removed
# 15) Plot Fourier space
# 16) Plot average thermal noise in simulations and data as a function of baseline length
# 17) Plot delay spectra of the MWA tile power pattern using a uniform sky model
# 18) Plot delay spectra of the all-sky model with dipole, MWA tile, and HERA dish antenna shapes
# 19) Plot delay spectrum of uniform sky model with a uniform power pattern
plot_01 = False
plot_02 = False
plot_03 = False
plot_04 = False
plot_05 = True
plot_06 = False
plot_07 = False
plot_08 = False
plot_09 = False
plot_10 = False
plot_11 = False
plot_12 = False
plot_13 = False
plot_14 = False
plot_15 = False
plot_16 = False
plot_17 = False
plot_18 = False
plot_19 = False
# PLT.ioff()
PLT.ion()
project_MWA = True
project_HERA = False
project_beams = False
project_drift_scan = False
project_global_EoR = False
if project_MWA: project_dir = 'project_MWA'
if project_HERA: project_dir = 'project_HERA'
if project_beams: project_dir = 'project_beams'
if project_drift_scan: project_dir = 'project_drift_scan'
if project_global_EoR: project_dir = 'project_global_EoR'
telescope_id = 'custom'
element_size = 0.74
element_shape = 'delta'
phased_array = True
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
elif telescope_id == 'vla':
element_size = 25.0
element_shape = 'dish'
elif telescope_id == 'gmrt':
element_size = 45.0
element_shape = 'dish'
elif telescope_id == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id == 'custom':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
else:
raise ValueError('telescope ID must be specified.')
if telescope_id == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id + '_array'
telescope_str = telescope_id+'_'
ground_plane = 0.3 # height of antenna element above ground plane
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
delayerr = 0.05 # delay error rms in ns
if delayerr is None:
delayerr_str = ''
delayerr = 0.0
elif delayerr < 0.0:
raise ValueError('delayerr must be non-negative.')
else:
delayerr_str = 'derr_{0:.3f}ns'.format(delayerr)
delayerr *= 1e-9
gainerr = None # Gain error rms in dB
if gainerr is None:
gainerr_str = ''
gainerr = 0.0
elif gainerr < 0.0:
raise ValueError('gainerr must be non-negative.')
else:
gainerr_str = '_gerr_{0:.2f}dB'.format(gainerr)
nrand = 1 # Number of random realizations
if nrand is None:
nrandom_str = ''
nrand = 1
elif nrand < 1:
raise ValueError('nrandom must be positive')
else:
nrandom_str = '_nrand_{0:0d}_'.format(nrand)
if (delayerr_str == '') and (gainerr_str == ''):
nrand = 1
nrandom_str = ''
delaygain_err_str = delayerr_str + gainerr_str + nrandom_str
if project_MWA:
delaygain_err_str = ''
latitude = -26.701
antenna_file = '/data3/t_nithyanandan/project_MWA/MWA_128T_antenna_locations_MNRAS_2012_Beardsley_et_al.txt'
max_bl_length = 200.0 # Maximum baseline length (in m)
ant_locs = NP.loadtxt(antenna_file, skiprows=6, comments='#', usecols=(0,1,2,3))
ref_bl, ref_bl_id = RI.baseline_generator(ant_locs[:,1:], ant_id=ant_locs[:,0].astype(int).astype(str), auto=False, conjugate=False)
ref_bl_length = NP.sqrt(NP.sum(ref_bl**2, axis=1))
ref_bl_orientation = NP.angle(ref_bl[:,0] + 1j * ref_bl[:,1], deg=True)
neg_ref_bl_orientation_ind = ref_bl_orientation < 0.0
ref_bl[neg_ref_bl_orientation_ind,:] = -1.0 * ref_bl[neg_ref_bl_orientation_ind,:]
ref_bl_orientation = NP.angle(ref_bl[:,0] + 1j * ref_bl[:,1], deg=True)
sortind = NP.argsort(ref_bl_length, kind='mergesort')
ref_bl = ref_bl[sortind,:]
ref_bl_length = ref_bl_length[sortind]
ref_bl_orientation = ref_bl_orientation[sortind]
ref_bl_id = ref_bl_id[sortind]
n_bins_baseline_orientation = 4
nmax_baselines = 2048
ref_bl = ref_bl[:nmax_baselines,:]
ref_bl_length = ref_bl_length[:nmax_baselines]
ref_bl_id = ref_bl_id[:nmax_baselines]
ref_bl_orientation = ref_bl_orientation[:nmax_baselines]
total_baselines = ref_bl_length.size
Tsys = 95.0 # System temperature in K
freq = 185.0e6 # center frequency in Hz
wavelength = FCNST.c / freq # in meters
redshift = CNST.rest_freq_HI / freq - 1
oversampling_factor = 2.0
n_sky_sectors = 1
sky_sector = None # if None, use all sky sector. Accepted values are None, 0, 1, 2, or 3
if sky_sector is None:
sky_sector_str = '_all_sky_'
n_sky_sectors = 1
sky_sector = 0
else:
sky_sector_str = '_sky_sector_{0:0d}_'.format(sky_sector)
n_bl_chunks = 32
baseline_chunk_size = 64
baseline_bin_indices = range(0,total_baselines,baseline_chunk_size)
bl_chunk = range(len(baseline_bin_indices))
bl_chunk = bl_chunk[:n_bl_chunks]
truncated_ref_bl = NP.copy(ref_bl)
truncated_ref_bl_id = NP.copy(ref_bl_id)
truncated_ref_bl_length = NP.sqrt(NP.sum(truncated_ref_bl[:,:2]**2, axis=1))
# truncated_ref_bl_length = NP.copy(ref_bl_length)
truncated_ref_bl_orientation = NP.copy(ref_bl_orientation)
truncated_total_baselines = truncated_ref_bl_length.size
if max_bl_length is not None:
truncated_ref_bl_ind = ref_bl_length <= max_bl_length
truncated_ref_bl = truncated_ref_bl[truncated_ref_bl_ind,:]
truncated_ref_bl_id = truncated_ref_bl_id[truncated_ref_bl_ind]
truncated_ref_bl_orientation = truncated_ref_bl_orientation[truncated_ref_bl_ind]
truncated_ref_bl_length = truncated_ref_bl_length[truncated_ref_bl_ind]
truncated_total_baselines = truncated_ref_bl_length.size
bl_orientation_str = ['South-East', 'East', 'North-East', 'North']
spindex_rms = 0.0
spindex_seed = None
spindex_seed_str = ''
if spindex_rms > 0.0:
spindex_rms_str = '{0:.1f}'.format(spindex_rms)
else:
spindex_rms = 0.0
if spindex_seed is not None:
spindex_seed_str = '{0:0d}_'.format(spindex_seed)
use_alt_spindex = False
alt_spindex_rms = 0.3
alt_spindex_seed = 95
alt_spindex_seed_str = ''
if alt_spindex_rms > 0.0:
alt_spindex_rms_str = '{0:.1f}'.format(alt_spindex_rms)
else:
alt_spindex_rms = 0.0
if alt_spindex_seed is not None:
alt_spindex_seed_str = '{0:0d}_'.format(alt_spindex_seed)
nside = 64
use_GSM = True
use_DSM = False
use_CSM = False
use_NVSS = False
use_SUMSS = False
use_MSS = False
use_GLEAM = False
use_PS = False
obs_mode = 'custom'
avg_drifts = False
beam_switch = False
snapshot_type_str = ''
if avg_drifts:
snapshot_type_str = 'drift_averaged_'
if beam_switch:
snapshot_type_str = 'beam_switches_'
freq_resolution = 80e3 # in kHz
nchan = 384
bpass_shape = 'bhw'
max_abs_delay = 1.5 # in micro seconds
coarse_channel_resolution = 1.28e6 # in Hz
bw = nchan * freq_resolution
dsm_base_freq = 408e6 # Haslam map frequency
csm_base_freq = 1.420e9 # NVSS frequency
dsm_dalpha = 0.7/2 # Spread in spectral index in Haslam map
csm_dalpha = 0.7/2 # Spread in spectral index in NVSS
csm_jacobian_spindex = NP.abs(csm_dalpha * NP.log(freq/csm_base_freq))
dsm_jacobian_spindex = NP.abs(dsm_dalpha * NP.log(freq/dsm_base_freq))
if use_GSM:
fg_str = 'asm'
elif use_DSM:
fg_str = 'dsm'
elif use_CSM:
fg_str = 'csm'
elif use_SUMSS:
fg_str = 'sumss'
elif use_GLEAM:
fg_str = 'gleam'
elif use_PS:
fg_str = 'point'
elif use_NVSS:
fg_str = 'nvss'
else:
fg_str = 'other'
roifile = '/data3/t_nithyanandan/'+project_dir+'/roi_info_'+telescope_str+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'.fits'
roi = RI.ROI_parameters(init_file=roifile)
telescope = roi.telescope
if (telescope['shape'] == 'dipole') or (telescope['shape'] == 'delta'):
A_eff = (0.5*wavelength)**2
if (telescope_id == 'mwa') or phased_array:
A_eff *= 16
if telescope['shape'] == 'dish':
A_eff = NP.pi * (0.5*element_size)**2
pc = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
pc_coords = 'dircos'
h = 0.7 # Hubble constant coefficient
cosmodel100 = CP.FlatLambdaCDM(H0=100.0, Om0=0.27) # Using H0 = 100 km/s/Mpc
cosmodel = CP.FlatLambdaCDM(H0=h*100.0, Om0=0.27) # Using H0 = h * 100 km/s/Mpc
dr_z = (FCNST.c/1e3) * bw * (1+redshift)**2 / CNST.rest_freq_HI / cosmodel100.H0.value / cosmodel100.efunc(redshift) # in Mpc/h
r_z = cosmodel100.comoving_transverse_distance(redshift).value # in Mpc/h
volfactor1 = A_eff / wavelength**2 / bw
volfactor2 = r_z**2 * dr_z / bw
Jy2K = wavelength**2 * CNST.Jy / (2*FCNST.k)
mJy2mK = NP.copy(Jy2K)
Jy2mK = 1e3 * Jy2K
mK2Jy = 1/Jy2mK
mK2mJy = 1/mJy2mK
K2Jy = 1/Jy2K
dspec_min = None
dspec_max = None
def kprll(eta, z):
return 2 * NP.pi * eta * cosmodel100.H0.value * CNST.rest_freq_HI * cosmodel100.efunc(z) / FCNST.c / (1+z)**2 * 1e3
def kperp(u, z):
return 2 * NP.pi * u / cosmodel100.comoving_transverse_distance(z).value
##########################################
if plot_01:
# 01) Plot pointings information
pointing_file = '/data3/t_nithyanandan/project_MWA/Aug23_obsinfo.txt'
pointing_info_from_file = NP.loadtxt(pointing_file, comments='#', usecols=(1,2,3), delimiter=',')
obs_id = NP.loadtxt(pointing_file, comments='#', usecols=(0,), delimiter=',', dtype=str)
if (telescope_id == 'mwa') or (phased_array):
delays_str = NP.loadtxt(pointing_file, comments='#', usecols=(4,), delimiter=',', dtype=str)
delays_list = [NP.fromstring(delaystr, dtype=float, sep=';', count=-1) for delaystr in delays_str]
delay_settings = NP.asarray(delays_list)
delay_settings *= 435e-12
delays = NP.copy(delay_settings)
n_snaps = pointing_info_from_file.shape[0]
pointing_info_from_file = pointing_info_from_file[:min(n_snaps, pointing_info_from_file.shape[0]),:]
obs_id = obs_id[:min(n_snaps, pointing_info_from_file.shape[0])]
if (telescope_id == 'mwa') or (phased_array):
delays = delay_settings[:min(n_snaps, pointing_info_from_file.shape[0]),:]
n_snaps = min(n_snaps, pointing_info_from_file.shape[0])
pointings_altaz = pointing_info_from_file[:,:2].reshape(-1,2)
pointings_altaz_orig = pointing_info_from_file[:,:2].reshape(-1,2)
lst = 15.0 * pointing_info_from_file[:,2]
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
lst = 0.5*(lst_edges[1:]+lst_edges[:-1])
t_snap = (lst_edges[1:]-lst_edges[:-1]) / 15.0 * 3.6e3
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_radec[:,0] = pointings_radec[:,0] % 360.0
pointings_ha = pointings_hadec[:,0]
pointings_ha[pointings_ha > 180.0] = pointings_ha[pointings_ha > 180.0] - 360.0
pointings_ra = pointings_radec[:,0]
pointings_ra[pointings_ra > 180.0] = pointings_ra[pointings_ra > 180.0] - 360.0
pointings_dec = pointings_radec[:,1]
infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'.fits'
hdulist = fits.open(infile)
lst_select = hdulist['POINTING AND PHASE CENTER INFO'].data['LST']
hdulist.close()
lst_select[lst_select > 180.0] -= 360.0
fig = PLT.figure(figsize=(6,6))
ax1a = fig.add_subplot(111)
ax1a.set_xlabel('Local Sidereal Time [hours]', fontsize=18, weight='medium')
ax1a.set_ylabel('Longitude [degrees]', fontsize=18, weight='medium')
ax1a.set_xlim((lst_wrapped.min()-1)/15.0, (lst_wrapped.max()-1)/15.0)
ax1a.set_ylim(pointings_ha.min()-15.0, pointings_ha.max()+15.0)
ax1a.plot(lst_wrapped/15.0, pointings_ha, 'k--', lw=2, label='HA')
ax1a.plot(lst_wrapped/15.0, pointings_ra, 'k-', lw=2, label='RA')
for i in xrange(lst_select.size):
if i == 0:
ax1a.axvline(x=lst_select[i]/15.0, color='gray', ls='-.', lw=2, label='Selected LST')
else:
ax1a.axvline(x=lst_select[i]/15.0, color='gray', ls='-.', lw=2)
ax1a.tick_params(which='major', length=18, labelsize=12)
ax1a.tick_params(which='minor', length=12, labelsize=12)
# legend1a = ax1a.legend(loc='lower right')
# legend1a.draw_frame(False)
for axis in ['top','bottom','left','right']:
ax1a.spines[axis].set_linewidth(2)
xticklabels = PLT.getp(ax1a, 'xticklabels')
yticklabels = PLT.getp(ax1a, 'yticklabels')
PLT.setp(xticklabels, fontsize=15, weight='medium')
PLT.setp(yticklabels, fontsize=15, weight='medium')
ax1b = ax1a.twinx()
ax1b.set_ylabel('Declination [degrees]', fontsize=18, weight='medium')
ax1b.set_ylim(pointings_dec.min()-5.0, pointings_dec.max()+5.0)
ax1b.plot(lst_wrapped/15.0, pointings_dec, 'k:', lw=2, label='Dec')
ax1b.tick_params(which='major', length=12, labelsize=12)
# legend1b = ax1b.legend(loc='upper right')
# legend1b.draw_frame(False)
yticklabels = PLT.getp(ax1b, 'yticklabels')
PLT.setp(yticklabels, fontsize=15, weight='medium')
decline = PLT.Line2D(range(1), range(0), color='k', ls=':', lw=2)
haline = PLT.Line2D(range(1), range(0), color='k', ls='--', lw=2)
raline = PLT.Line2D(range(1), range(0), color='k', ls='-', lw=2)
lstline = PLT.Line2D(range(1), range(0), color='gray', ls='-.', lw=2)
legend = PLT.legend((haline, raline, decline, lstline), ('HA', 'RA', 'Dec', 'Chosen LST'), loc='lower right', frameon=False)
fig.subplots_adjust(right=0.85)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+obs_mode+'_pointings.eps', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+obs_mode+'_pointings.png', bbox_inches=0)
#############################################################################
if plot_02 or plot_03 or plot_04 or plot_12:
infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'.fits'
hdulist = fits.open(infile)
n_snaps = hdulist[0].header['n_acc']
lst = hdulist['POINTING AND PHASE CENTER INFO'].data['LST']
hdulist.close()
backdrop_xsize = 100
xmin = -180.0
xmax = 180.0
ymin = -90.0
ymax = 90.0
xgrid, ygrid = NP.meshgrid(NP.linspace(xmax, xmin, backdrop_xsize), NP.linspace(ymin, ymax, backdrop_xsize/2))
xvect = xgrid.ravel()
yvect = ygrid.ravel()
pb_snapshots = []
pbx_MWA_snapshots = []
pby_MWA_snapshots = []
src_ind_csm_snapshots = []
src_ind_gsm_snapshots = []
dsm_snapshots = []
if plot_03 or plot_12:
freq_SUMSS = 0.843 # in GHz
SUMSS_file = '/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt'
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = freq_SUMSS*1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('SUMSS', fint.size)
ra_deg = ra_deg_SUMSS + 0.0
dec_deg = dec_deg_SUMSS
spindex = spindex_SUMSS
majax = fmajax/3.6e3
minax = fminax/3.6e3
fluxes = NP.copy(fint)
nvss_file = '/data3/t_nithyanandan/project_MWA/foregrounds/NVSS_catalog.fits'
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(nvss_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))
catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))
ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
fluxes = NP.concatenate((fluxes, nvss_fpeak))
ra_deg_wrapped = ra_deg.ravel()
ra_deg_wrapped[ra_deg_wrapped > 180.0] -= 360.0
# csmctlg = SM.Catalog(catlabel, freq_catalog, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fluxes, spectral_index=spindex, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
csmskymod = SM.SkyModel(catlabel, freq, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
dsm_file = '/data3/t_nithyanandan/project_MWA/foregrounds/gsmdata_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq/1e6,nside)
hdulist = fits.open(dsm_file)
dsm_table = hdulist[1].data
dsm_ra_deg = dsm_table['RA']
dsm_dec_deg = dsm_table['DEC']
dsm_temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
dsm = HP.cartview(dsm_temperatures.ravel(), coord=['G','C'], rot=[0,0,0], xsize=backdrop_xsize, return_projected_map=True)
dsm = dsm.ravel()
for i in xrange(n_snaps):
havect = lst[i] - xvect
altaz = GEOM.hadec2altaz(NP.hstack((havect.reshape(-1,1),yvect.reshape(-1,1))), latitude, units='degrees')
dircos = GEOM.altaz2dircos(altaz, units='degrees')
roi_altaz = NP.asarray(NP.where(altaz[:,0] >= 0.0)).ravel()
az = altaz[:,1] + 0.0
az[az > 360.0 - 0.5*180.0/n_sky_sectors] -= 360.0
roi_sector_altaz = NP.asarray(NP.where(NP.logical_or(NP.logical_and(az[roi_altaz] >= -0.5*180.0/n_sky_sectors + sky_sector*180.0/n_sky_sectors, az[roi_altaz] < -0.5*180.0/n_sky_sectors + (sky_sector+1)*180.0/n_sky_sectors), NP.logical_and(az[roi_altaz] >= 180.0 - 0.5*180.0/n_sky_sectors + sky_sector*180.0/n_sky_sectors, az[roi_altaz] < 180.0 - 0.5*180.0/n_sky_sectors + (sky_sector+1)*180.0/n_sky_sectors)))).ravel()
pb = NP.empty(xvect.size)
pb.fill(NP.nan)
pbx_MWA_vect = NP.empty(xvect.size)
pbx_MWA_vect.fill(NP.nan)
pby_MWA_vect = NP.empty(xvect.size)
pby_MWA_vect.fill(NP.nan)
pb[roi_altaz] = PB.primary_beam_generator(altaz[roi_altaz,:], freq, telescope=telescope, skyunits='altaz', freq_scale='Hz', pointing_info=roi.pinfo[i])
if (telescope_id == 'mwa') or (phased_array):
pbx_MWA, pby_MWA = MWAPB.MWA_Tile_advanced(NP.radians(90.0-altaz[roi_altaz,0]).reshape(-1,1), NP.radians(altaz[roi_altaz,1]).reshape(-1,1), freq=185e6, delays=roi.pinfo[i]['delays']/435e-12)
pbx_MWA_vect[roi_altaz] = pbx_MWA.ravel()
pby_MWA_vect[roi_altaz] = pby_MWA.ravel()
pb_snapshots += [pb]
pbx_MWA_snapshots += [pbx_MWA_vect]
pby_MWA_snapshots += [pby_MWA_vect]
if plot_03 or plot_12:
# csm_hadec = NP.hstack(((lst[i]-csmctlg.location[:,0]).reshape(-1,1), csmctlg.location[:,1].reshape(-1,1)))
csm_hadec = NP.hstack(((lst[i]-csmskymod.location[:,0]).reshape(-1,1), csmskymod.location[:,1].reshape(-1,1)))
csm_altaz = GEOM.hadec2altaz(csm_hadec, latitude, units='degrees')
roi_csm_altaz = NP.asarray(NP.where(csm_altaz[:,0] >= 0.0)).ravel()
src_ind_csm_snapshots += [roi_csm_altaz]
dsm_snapshot = NP.empty(xvect.size)
dsm_snapshot.fill(NP.nan)
dsm_snapshot[roi_altaz] = dsm[roi_altaz]
dsm_snapshots += [dsm_snapshot]
if plot_02:
descriptor_str = ['off-zenith', 'zenith']
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
pbsky = axs[j].imshow(pb_snapshots[j].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm(vmin=1e-3, vmax=1.0), cmap=CM.jet)
axs[j].set_xlim(xvect.max(), xvect.min())
axs[j].set_ylim(yvect.min(), yvect.max())
axs[j].grid(True, which='both')
axs[j].set_aspect('auto')
axs[j].tick_params(which='major', length=12, labelsize=12)
axs[j].tick_params(which='minor', length=6)
axs[j].locator_params(axis='x', nbins=5)
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='black')
cbax = fig.add_axes([0.9, 0.122, 0.02, 0.84])
cbar = fig.colorbar(pbsky, cax=cbax, orientation='vertical')
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium', labelpad=20)
# PLT.tight_layout()
fig.subplots_adjust(right=0.9)
fig.subplots_adjust(top=0.98)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'powerpattern_'+ground_plane_str+snapshot_type_str+obs_mode+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'powerpattern_'+ground_plane_str+snapshot_type_str+obs_mode+'.eps', bbox_inches=0)
# Plot each snapshot separately
for j in xrange(n_snaps):
fig = PLT.figure(figsize=(6,4))
ax = fig.add_subplot(111)
pbsky = ax.imshow(pb_snapshots[j].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm(vmin=1e-5, vmax=1.0), cmap=CM.jet)
ax.set_xlim(xvect.max(), xvect.min())
ax.set_ylim(yvect.min(), yvect.max())
ax.grid(True, which='both')
ax.set_aspect('auto')
ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium')
ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium')
# ax.tick_params(which='major', length=12, labelsize=12)
# ax.tick_params(which='minor', length=6)
# ax.locator_params(axis='x', nbins=5)
# ax.text(0.5, 0.9, descriptor_str[j], transform=ax.transAxes, fontsize=14, weight='semibold', ha='center', color='black')
cbax = fig.add_axes([0.9, 0.15, 0.02, 0.81])
cbar = fig.colorbar(pbsky, cax=cbax, orientation='vertical')
# PLT.tight_layout()
fig.subplots_adjust(right=0.89)
fig.subplots_adjust(top=0.96)
fig.subplots_adjust(bottom=0.15)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'powerpattern_'+ground_plane_str+snapshot_type_str+obs_mode+'_snapshot_{0:1d}.png'.format(j), bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'powerpattern_'+ground_plane_str+snapshot_type_str+obs_mode+'_snapshot_{0:1d}.eps'.format(j), bbox_inches=0)
if plot_03 or plot_12:
# csm_fluxes = csmctlg.flux_density * (freq/csmctlg.frequency)**csmctlg.spectral_index
csm_fluxes = csmskymod.spec_parms['flux-scale'] * (freq/csmskymod.spec_parms['freq-ref'])**csmskymod.spec_parms['power-law-index']
if plot_03:
# 03) Plot foreground models with power pattern contours for snapshots
descriptor_str = ['off-zenith', 'zenith']
n_fg_ticks = 5
fg_ticks = NP.round(NP.logspace(NP.log10(dsm.min()), NP.log10(dsm.max()), n_fg_ticks)).astype(NP.int)
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
dsmsky = axs[j].imshow(dsm_snapshots[j].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm(vmin=dsm.min(), vmax=dsm.max()), cmap=CM.jet)
pbskyc = axs[j].contour(xgrid[0,:], ygrid[:,0], pb_snapshots[j].reshape(-1,backdrop_xsize), levels=[0.001953125, 0.0078125, 0.03125, 0.125, 0.5], colors='k', linewidths=1.5)
axs[j].set_xlim(xvect.max(), xvect.min())
axs[j].set_ylim(yvect.min(), yvect.max())
axs[j].grid(True, which='both')
axs[j].set_aspect('auto')
axs[j].tick_params(which='major', length=12, labelsize=12)
axs[j].tick_params(which='minor', length=6)
axs[j].locator_params(axis='x', nbins=5)
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='black')
cbax = fig.add_axes([0.85, 0.125, 0.02, 0.84])
cbar = fig.colorbar(dsmsky, cax=cbax, orientation='vertical')
cbar.set_ticks(fg_ticks.tolist())
cbar.set_ticklabels(fg_ticks.tolist())
cbax.set_ylabel('Temperature [K]', labelpad=0, fontsize=14)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium', labelpad=20)
# PLT.tight_layout()
fig.subplots_adjust(right=0.85)
fig.subplots_adjust(top=0.98)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/dsm.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/dsm.eps', bbox_inches=0)
n_fg_ticks = 5
fg_ticks = NP.round(NP.logspace(NP.log10(csm_fluxes.min()), NP.log10(csm_fluxes.max()), n_fg_ticks)).astype(NP.int)
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
csmsky = axs[j].scatter(ra_deg_wrapped[src_ind_csm_snapshots[j]], dec_deg[src_ind_csm_snapshots[j]], c=csm_fluxes[src_ind_csm_snapshots[j]], norm=PLTC.LogNorm(vmin=csm_fluxes.min(), vmax=csm_fluxes.max()), cmap=CM.jet, edgecolor='none', s=20)
pbskyc = axs[j].contour(xgrid[0,:], ygrid[:,0], pb_snapshots[j].reshape(-1,backdrop_xsize), levels=[0.001953125, 0.0078125, 0.03125, 0.125, 0.5], colors='k', linewidths=1.5)
axs[j].set_xlim(xvect.max(), xvect.min())
axs[j].set_ylim(yvect.min(), yvect.max())
axs[j].grid(True, which='both')
axs[j].set_aspect('auto')
axs[j].tick_params(which='major', length=12, labelsize=12)
axs[j].tick_params(which='minor', length=6)
axs[j].locator_params(axis='x', nbins=5)
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='black')
cbax = fig.add_axes([0.88, 0.125, 0.02, 0.84])
cbar = fig.colorbar(csmsky, cax=cbax, orientation='vertical')
cbar.set_ticks(fg_ticks.tolist())
cbar.set_ticklabels(fg_ticks.tolist())
cbax.set_ylabel('Flux Density [Jy]', labelpad=0, fontsize=14)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium', labelpad=20)
# PLT.tight_layout()
fig.subplots_adjust(right=0.88)
fig.subplots_adjust(top=0.98)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/csm.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/csm.eps', bbox_inches=0)
if plot_12:
descriptor_str = ['Diffuse Sky Model', 'Compact Emission']
n_fg_ticks = 5
fg_ticks = NP.round(NP.logspace(NP.log10(dsm.min()), NP.log10(dsm.max()), n_fg_ticks)).astype(NP.int)
# Plot sky models with power pattern contours
for j in xrange(n_snaps):
fig, axs = PLT.subplots(2, sharex=True, sharey=True, figsize=(6,6))
dsmsky = axs[0].imshow(dsm_snapshots[j].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm(vmin=dsm.min(), vmax=dsm.max()), cmap=CM.jet)
csmsky = axs[1].scatter(ra_deg_wrapped[src_ind_csm_snapshots[j]], dec_deg[src_ind_csm_snapshots[j]], c=csm_fluxes[src_ind_csm_snapshots[j]], norm=PLTC.LogNorm(vmin=csm_fluxes.min(), vmax=csm_fluxes.max()), cmap=CM.jet, edgecolor='none', s=20)
for i in xrange(2):
pbskyc = axs[i].contour(xgrid[0,:], ygrid[:,0], pb_snapshots[j].reshape(-1,backdrop_xsize), levels=[0.001953125, 0.0078125, 0.03125, 0.125, 0.5], colors='k', linewidths=1.5)
axs[i].set_xlim(xvect.max(), xvect.min())
axs[i].set_ylim(yvect.min(), yvect.max())
axs[i].grid(True, which='both')
axs[i].set_aspect('auto')
axs[i].tick_params(which='major', length=12, labelsize=12)
axs[i].tick_params(which='minor', length=6)
axs[i].locator_params(axis='x', nbins=5)
axs[i].text(0.5, 0.9, descriptor_str[i], transform=axs[i].transAxes, fontsize=16, weight='semibold', ha='center', color='black')
fg_ticks = NP.round(NP.logspace(NP.log10(dsm.min()), NP.log10(dsm.max()), n_fg_ticks)).astype(NP.int)
cbaxtr = fig.add_axes([0.86, 0.55, 0.02, 0.4])
cbartr = fig.colorbar(dsmsky, cax=cbaxtr, orientation='vertical')
cbartr.set_ticks(fg_ticks.tolist())
cbartr.set_ticklabels(fg_ticks.tolist())
cbaxtr.set_ylabel('K', labelpad=0, fontsize=14)
fg_ticks = NP.round(NP.logspace(NP.log10(csm_fluxes.min()), NP.log10(csm_fluxes.max()), n_fg_ticks)).astype(NP.int)
cbaxbr = fig.add_axes([0.86, 0.11, 0.02, 0.4])
cbarbr = fig.colorbar(csmsky, cax=cbaxbr, orientation='vertical')
cbarbr.set_ticks(fg_ticks.tolist())
cbarbr.set_ticklabels(fg_ticks.tolist())
cbaxbr.set_ylabel('Jy', labelpad=0, fontsize=14)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium', labelpad=20)
# PLT.tight_layout()
fig.subplots_adjust(right=0.85)
fig.subplots_adjust(top=0.98)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/sky_model_with_pb_contours_snapshot_{0:0d}.png'.format(j), bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/sky_model_with_pb_contours_snapshot_{0:0d}.eps'.format(j), bbox_inches=0)
# Plot sky models without power pattern contours
for j in xrange(n_snaps):
fig, axs = PLT.subplots(2, sharex=True, sharey=True, figsize=(6,6))
dsmsky = axs[0].imshow(dsm_snapshots[j].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm(vmin=dsm.min(), vmax=dsm.max()), cmap=CM.jet)
csmsky = axs[1].scatter(ra_deg_wrapped[src_ind_csm_snapshots[j]], dec_deg[src_ind_csm_snapshots[j]], c=csm_fluxes[src_ind_csm_snapshots[j]], norm=PLTC.LogNorm(vmin=csm_fluxes.min(), vmax=csm_fluxes.max()), cmap=CM.jet, edgecolor='none', s=20)
for i in xrange(2):
axs[i].set_xlim(xvect.max(), xvect.min())
axs[i].set_ylim(yvect.min(), yvect.max())
axs[i].grid(True, which='both')
axs[i].set_aspect('auto')
axs[i].tick_params(which='major', length=12, labelsize=12)
axs[i].tick_params(which='minor', length=6)
axs[i].locator_params(axis='x', nbins=5)
axs[i].text(0.5, 0.9, descriptor_str[i], transform=axs[i].transAxes, fontsize=16, weight='semibold', ha='center', color='black')
fg_ticks = NP.round(NP.logspace(NP.log10(dsm.min()), NP.log10(dsm.max()), n_fg_ticks)).astype(NP.int)
cbaxtr = fig.add_axes([0.86, 0.55, 0.02, 0.4])
cbartr = fig.colorbar(dsmsky, cax=cbaxtr, orientation='vertical')
cbartr.set_ticks(fg_ticks.tolist())
cbartr.set_ticklabels(fg_ticks.tolist())
cbaxtr.set_ylabel('K', labelpad=0, fontsize=14)
fg_ticks = NP.round(NP.logspace(NP.log10(csm_fluxes.min()), NP.log10(csm_fluxes.max()), n_fg_ticks)).astype(NP.int)
cbaxbr = fig.add_axes([0.86, 0.11, 0.02, 0.4])
cbarbr = fig.colorbar(csmsky, cax=cbaxbr, orientation='vertical')
cbarbr.set_ticks(fg_ticks.tolist())
cbarbr.set_ticklabels(fg_ticks.tolist())
cbaxbr.set_ylabel('Jy', labelpad=0, fontsize=14)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium', labelpad=20)
# PLT.tight_layout()
fig.subplots_adjust(right=0.85)
fig.subplots_adjust(top=0.98)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/sky_model_snapshot_{0:0d}.png'.format(j), bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/sky_model_snapshot_{0:0d}.eps'.format(j), bbox_inches=0)
if plot_04 or plot_12:
# 04) Plot delay maps on sky for baselines of different orientations
cardinal_blo = 180.0 / n_bins_baseline_orientation * (NP.arange(n_bins_baseline_orientation)-1).reshape(-1,1)
cardinal_bll = 100.0
cardinal_bl = cardinal_bll * NP.hstack((NP.cos(NP.radians(cardinal_blo)), NP.sin(NP.radians(cardinal_blo)), NP.zeros_like(cardinal_blo)))
delay_map = NP.empty((n_bins_baseline_orientation, xvect.size, n_snaps))
delay_map.fill(NP.nan)
for i in xrange(n_snaps):
havect = lst[i] - xvect
altaz = GEOM.hadec2altaz(NP.hstack((havect.reshape(-1,1),yvect.reshape(-1,1))), latitude, units='degrees')
dircos = GEOM.altaz2dircos(altaz, units='degrees')
roi_altaz = NP.asarray(NP.where(altaz[:,0] >= 0.0)).ravel()
delay_map[:,roi_altaz,i] = (DLY.geometric_delay(cardinal_bl, altaz[roi_altaz,:], altaz=True, dircos=False, hadec=False, latitude=latitude)-DLY.geometric_delay(cardinal_bl, pc, altaz=False, dircos=True, hadec=False, latitude=latitude)).T
# mindelay = NP.nanmin(delay_map)
# maxdelay = NP.nanmax(delay_map)
# norm_b = PLTC.Normalize(vmin=mindelay, vmax=maxdelay)
# for i in xrange(n_snaps):
# fig = PLT.figure(figsize=(8,6))
# for j in xrange(n_bins_baseline_orientation):
# ax = fig.add_subplot(2,2,j+1)
# imdmap = ax.imshow(1e6 * delay_map[j,:,i].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), vmin=1e6*NP.nanmin(delay_map), vmax=1e6*NP.nanmax(delay_map))
# cbax = fig.add_axes([0.95, 0.1, 0.02, 0.8])
# cbar = fig.colorbar(imdmap, cax=cbax, orientation='vertical')
# cbax.set_ylabel(r'$\times\,(|\mathbf{b}|/100)\,\mu$s', labelpad=-90, fontsize=18)
# PLT.tight_layout()
# fig.subplots_adjust(right=0.85)
# # fig.subplots_adjust(left=0.15)
if plot_04:
for i in xrange(n_snaps):
fig, axs = PLT.subplots(ncols=2, nrows=2, sharex=True, sharey=True, figsize=(12,6))
for j in xrange(n_bins_baseline_orientation):
imdmap = axs[j/2,j%2].imshow(1e6 * OPS.reverse(delay_map[j,:,i].reshape(-1,backdrop_xsize), axis=1), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), vmin=1e6*NP.nanmin(delay_map), vmax=1e6*NP.nanmax(delay_map))
axs[j/2,j%2].set_xlim(xvect.min(), xvect.max())
axs[j/2,j%2].set_ylim(yvect.min(), yvect.max())
axs[j/2,j%2].text(0.8, 0.9, 'E: {0[0]:.1f}m\nN: {0[1]:.1f}m\nZ: {0[2]:.1f}m'.format(cardinal_bl[j,:].ravel()), ha='left', va='top', transform=axs[j/2,j%2].transAxes)
# axs[j/2,j%2].set_aspect(1.33)
# ax.text(0.05, 0.7, '{0:.1f} %'.format(confidence_level*100), horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
# axs[j/2,j%2].set_autoscaley_on(False)
# imdmap = axs[j/2,j%2].imshow(1e6 * delay_map[j,:,i].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), vmin=1e6*NP.nanmin(delay_map), vmax=1e6*NP.nanmax(delay_map))
# axs[j/2,j%2].set_ylim(yvect.min(), yvect.max())
# ax = axs[j/2,j%2]
# ax.set_ylim(yvect.min(), yvect.max())
# axs[j/2,j%2] = ax
fig.subplots_adjust(wspace=0, hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium', labelpad=20)
big_ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.13, 0.92, 0.77, 0.02])
cbar = fig.colorbar(imdmap, cax=cbax, orientation='horizontal')
cbax.set_xlabel(r'delay [$\times\,(|\mathbf{b}|/100)\,\mu$s]', labelpad=-50, fontsize=18)
# PLT.tight_layout()
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/delay_map_snapshot_{0:0d}.png'.format(i), bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/delay_map_snapshot_{0:0d}.eps'.format(i), bbox_inches=0)
# for i in xrange(n_bins_baseline_orientation):
# axs[j/2,j%2].set_ylim(yvect.min(), yvect.max())
# axs[0,0].set_autoscaley_on(False)
if plot_12:
required_bl_orientation = ['North', 'East']
for i in xrange(n_snaps):
fig, axs = PLT.subplots(len(required_bl_orientation), sharex=True, sharey=True, figsize=(6,6))
for k in xrange(len(required_bl_orientation)):
j = bl_orientation_str.index(required_bl_orientation[k])
imdmap = axs[k].imshow(1e6 * OPS.reverse(delay_map[j,:,i].reshape(-1,backdrop_xsize), axis=1), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), vmin=1e6*NP.nanmin(delay_map), vmax=1e6*NP.nanmax(delay_map))
pbskyc = axs[k].contour(xgrid[0,:], ygrid[:,0], OPS.reverse(pb_snapshots[i].reshape(-1,backdrop_xsize), axis=1), levels=[0.001953125, 0.0078125, 0.03125, 0.125, 0.5], colors='k', linewidths=1.5)
axs[k].set_xlim(xvect.min(), xvect.max())
axs[k].set_ylim(yvect.min(), yvect.max())
axs[k].text(0.8, 0.9, required_bl_orientation[k], transform=axs[k].transAxes, fontsize=16, weight='semibold', ha='left', va='top')
axs[k].set_aspect('auto')
fig.subplots_adjust(wspace=0, hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium', labelpad=20)
big_ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.13, 0.92, 0.82, 0.02])
cbar = fig.colorbar(imdmap, cax=cbax, orientation='horizontal')
cbax.set_xlabel(r'$\tau\,[(|\mathbf{b}|/100\,\mathrm{m})\,\mu\mathrm{s}]$', labelpad=-50, fontsize=18)
# PLT.tight_layout()
fig.subplots_adjust(top=0.88)
fig.subplots_adjust(right=0.95)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/directional_delay_map_snapshot_{0:0d}.png'.format(i), bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/directional_delay_map_snapshot_{0:0d}.eps'.format(i), bbox_inches=0)
##############################################################################
if plot_05 or plot_06 or plot_07 or plot_09 or plot_16:
# 05) Plot FHD data and simulations on baselines by orientation and all combined
fhd_obsid = [1061309344, 1061316544]
infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)
asm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_asm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+bpass_shape
if use_alt_spindex:
alt_asm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_asm'+sky_sector_str+'sprms_{0:.1f}_'.format(alt_spindex_rms)+alt_spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+bpass_shape
ia = RI.InterferometerArray(None, None, None, init_file=infile+'.fits')
simdata_bl_orientation = NP.angle(ia.baselines[:,0] + 1j * ia.baselines[:,1], deg=True)
simdata_neg_bl_orientation_ind = simdata_bl_orientation > 90.0 + 0.5*180.0/n_bins_baseline_orientation
simdata_bl_orientation[simdata_neg_bl_orientation_ind] -= 180.0
ia.baselines[simdata_neg_bl_orientation_ind,:] = -ia.baselines[simdata_neg_bl_orientation_ind,:]
hdulist = fits.open(infile+'.fits')
latitude = hdulist[0].header['latitude']
pointing_coords = hdulist[0].header['pointing_coords']
pointings_table = hdulist['POINTING AND PHASE CENTER INFO'].data
lst = pointings_table['LST']
n_snaps = lst.size
hdulist.close()
if pointing_coords == 'altaz':
pointings_altaz = NP.hstack((pointings_table['pointing_latitude'].reshape(-1,1), pointings_table['pointing_longitude'].reshape(-1,1)))
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'radec':
pointings_radec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'hadec':
pointings_hadec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
hdulist = fits.open(asm_CLEAN_infile+'.fits')
clean_lags = hdulist['SPECTRAL INFO'].data['lag']
clean_lags_orig = NP.copy(clean_lags)
asm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
asm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
asm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
asm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
asm_cc_vis[simdata_neg_bl_orientation_ind,:,:] = asm_cc_vis[simdata_neg_bl_orientation_ind,:,:].conj()
asm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:] = asm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:].conj()
asm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(asm_cc_skyvis, axis=1),axes=1) * asm_cc_skyvis.shape[1] * freq_resolution
asm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(asm_cc_skyvis_res, axis=1),axes=1) * asm_cc_skyvis.shape[1] * freq_resolution
asm_cc_skyvis_lag = asm_cc_skyvis_lag + asm_ccres_sky
asm_cc_vis_lag = NP.fft.fftshift(NP.fft.ifft(asm_cc_vis, axis=1),axes=1) * asm_cc_vis.shape[1] * freq_resolution
asm_ccres = NP.fft.fftshift(NP.fft.ifft(asm_cc_vis_res, axis=1),axes=1) * asm_cc_vis.shape[1] * freq_resolution
asm_cc_vis_lag = asm_cc_vis_lag + asm_ccres
asm_cc_skyvis_lag = DSP.downsampler(asm_cc_skyvis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
asm_cc_vis_lag = DSP.downsampler(asm_cc_vis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
if use_alt_spindex:
alt_hdulist = fits.open(alt_asm_CLEAN_infile+'.fits')
alt_asm_cc_skyvis = alt_hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * alt_hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
alt_asm_cc_skyvis_res = alt_hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * alt_hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
alt_asm_cc_vis = alt_hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * alt_hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
alt_asm_cc_vis_res = alt_hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * alt_hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
alt_hdulist.close()
alt_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = alt_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
alt_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = alt_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
alt_asm_cc_vis[simdata_neg_bl_orientation_ind,:,:] = alt_asm_cc_vis[simdata_neg_bl_orientation_ind,:,:].conj()
alt_asm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:] = alt_asm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:].conj()
alt_asm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(alt_asm_cc_skyvis, axis=1),axes=1) * alt_asm_cc_skyvis.shape[1] * freq_resolution
alt_asm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(alt_asm_cc_skyvis_res, axis=1),axes=1) * alt_asm_cc_skyvis.shape[1] * freq_resolution
alt_asm_cc_skyvis_lag = alt_asm_cc_skyvis_lag + alt_asm_ccres_sky
alt_asm_cc_vis_lag = NP.fft.fftshift(NP.fft.ifft(alt_asm_cc_vis, axis=1),axes=1) * alt_asm_cc_vis.shape[1] * freq_resolution
alt_asm_ccres = NP.fft.fftshift(NP.fft.ifft(alt_asm_cc_vis_res, axis=1),axes=1) * alt_asm_cc_vis.shape[1] * freq_resolution
alt_asm_cc_vis_lag = alt_asm_cc_vis_lag + alt_asm_ccres
alt_asm_cc_skyvis_lag = DSP.downsampler(alt_asm_cc_skyvis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
alt_asm_cc_vis_lag = DSP.downsampler(alt_asm_cc_vis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
clean_lags = DSP.downsampler(clean_lags, 1.0*clean_lags.size/ia.lags.size, axis=-1)
clean_lags = clean_lags.ravel()
vis_noise_lag = NP.copy(ia.vis_noise_lag)
asm_cc_skyvis_lag = asm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
asm_cc_vis_lag = asm_cc_vis_lag[truncated_ref_bl_ind,:,:]
vis_noise_lag = vis_noise_lag[truncated_ref_bl_ind,:,:]
delaymat = DLY.delay_envelope(ia.baselines[truncated_ref_bl_ind,:], pc, units='mks')
min_delay = -delaymat[0,:,1]-delaymat[0,:,0]
max_delay = delaymat[0,:,0]-delaymat[0,:,1]
clags = clean_lags.reshape(1,-1)
min_delay = min_delay.reshape(-1,1)
max_delay = max_delay.reshape(-1,1)
thermal_noise_window = NP.abs(clags) >= max_abs_delay*1e-6
thermal_noise_window = NP.repeat(thermal_noise_window, ia.baselines[truncated_ref_bl_ind,:].shape[0], axis=0)
EoR_window = NP.logical_or(clags > max_delay+1/bw, clags < min_delay-1/bw)
strict_EoR_window = NP.logical_and(EoR_window, NP.abs(clags) < 1/coarse_channel_resolution)
wedge_window = NP.logical_and(clags <= max_delay, clags >= min_delay)
non_wedge_window = NP.logical_not(wedge_window)
# vis_rms_lag = OPS.rms(asm_cc_vis_lag.reshape(-1,n_snaps), mask=NP.logical_not(NP.repeat(thermal_noise_window.reshape(-1,1), n_snaps, axis=1)), axis=0)
# vis_rms_freq = NP.abs(vis_rms_lag) / NP.sqrt(nchan) / freq_resolution
# T_rms_freq = vis_rms_freq / (2.0 * FCNST.k) * NP.mean(ia.A_eff) * NP.mean(ia.eff_Q) * NP.sqrt(2.0*freq_resolution*NP.asarray(ia.t_acc).reshape(1,-1)) * CNST.Jy
# vis_rms_lag_theory = OPS.rms(vis_noise_lag.reshape(-1,n_snaps), mask=NP.logical_not(NP.repeat(EoR_window.reshape(-1,1), n_snaps, axis=1)), axis=0)
# vis_rms_freq_theory = NP.abs(vis_rms_lag_theory) / NP.sqrt(nchan) / freq_resolution
# T_rms_freq_theory = vis_rms_freq_theory / (2.0 * FCNST.k) * NP.mean(ia.A_eff) * NP.mean(ia.eff_Q) * NP.sqrt(2.0*freq_resolution*NP.asarray(ia.t_acc).reshape(1,-1)) * CNST.Jy
vis_rms_lag = OPS.rms(asm_cc_vis_lag, mask=NP.logical_not(NP.repeat(thermal_noise_window[:,:,NP.newaxis], n_snaps, axis=2)), axis=1)
vis_rms_freq = NP.abs(vis_rms_lag) / NP.sqrt(nchan) / freq_resolution
T_rms_freq = vis_rms_freq / (2.0 * FCNST.k) * NP.mean(ia.A_eff[truncated_ref_bl_ind,:]) * NP.mean(ia.eff_Q[truncated_ref_bl_ind,:]) * NP.sqrt(2.0*freq_resolution*NP.asarray(ia.t_acc).reshape(1,1,-1)) * CNST.Jy
vis_rms_lag_theory = OPS.rms(vis_noise_lag, mask=NP.logical_not(NP.repeat(EoR_window[:,:,NP.newaxis], n_snaps, axis=2)), axis=1)
vis_rms_freq_theory = NP.abs(vis_rms_lag_theory) / NP.sqrt(nchan) / freq_resolution
T_rms_freq_theory = vis_rms_freq_theory / (2.0 * FCNST.k) * NP.mean(ia.A_eff[truncated_ref_bl_ind,:]) * NP.mean(ia.eff_Q[truncated_ref_bl_ind,:]) * NP.sqrt(2.0*freq_resolution*NP.asarray(ia.t_acc).reshape(1,1,-1)) * CNST.Jy
if max_abs_delay is not None:
small_delays_ind = NP.abs(clean_lags) <= max_abs_delay * 1e-6
clean_lags = clean_lags[small_delays_ind]
asm_cc_vis_lag = asm_cc_vis_lag[:,small_delays_ind,:]
asm_cc_skyvis_lag = asm_cc_skyvis_lag[:,small_delays_ind,:]
if use_alt_spindex:
alt_asm_cc_vis_lag = alt_asm_cc_vis_lag[:,small_delays_ind,:]
alt_asm_cc_skyvis_lag = alt_asm_cc_skyvis_lag[:,small_delays_ind,:]
## Read in FHD data and other required information
pointing_file = '/data3/t_nithyanandan/project_MWA/Aug23_obsinfo.txt'
pointing_info_from_file = NP.loadtxt(pointing_file, comments='#', usecols=(1,2,3), delimiter=',')
obs_id = NP.loadtxt(pointing_file, comments='#', usecols=(0,), delimiter=',', dtype=str)
obsfile_lst = 15.0 * pointing_info_from_file[:,2]
obsfile_pointings_altaz = pointing_info_from_file[:,:2].reshape(-1,2)
obsfile_pointings_dircos = GEOM.altaz2dircos(obsfile_pointings_altaz, units='degrees')
obsfile_pointings_hadec = GEOM.altaz2hadec(obsfile_pointings_altaz, latitude, units='degrees')
common_bl_ind_in_ref_snapshots = []
fhd_info = {}
for j in range(len(fhd_obsid)):
fhd_infile = '/data3/t_nithyanandan/project_MWA/fhd_delay_spectrum_{0:0d}_reformatted.npz'.format(fhd_obsid[j])
fhd_data = NP.load(fhd_infile)
fhd_vis_lag_noisy = fhd_data['fhd_vis_lag_noisy']
fhd_C = fhd_data['fhd_C']
valid_ind = NP.logical_and(NP.abs(NP.sum(fhd_vis_lag_noisy[:,:,0],axis=1))!=0.0, NP.abs(NP.sum(fhd_C[:,:,0],axis=1))!=0.0)
fhd_C = fhd_C[valid_ind,:,:]
fhd_vis_lag_noisy = fhd_vis_lag_noisy[valid_ind,:,:]
fhd_delays = fhd_data['fhd_delays']
fhdfile_bl_id = fhd_data['fhd_bl_id'][valid_ind]
fhdfile_bl_length = fhd_data['fhd_bl_length'][valid_ind]
common_bl_id = NP.intersect1d(truncated_ref_bl_id, fhdfile_bl_id, assume_unique=True)
common_bl_ind_in_ref = NP.in1d(truncated_ref_bl_id, common_bl_id, assume_unique=True)
common_bl_ind_in_fhd = NP.in1d(fhdfile_bl_id, common_bl_id, assume_unique=True)
fhd_bl_id = fhdfile_bl_id[common_bl_ind_in_fhd]
fhd_bl_length = fhdfile_bl_length[common_bl_ind_in_fhd]
fhd_k_perp = 2 * NP.pi * fhd_bl_length / (FCNST.c/freq) / cosmodel100.comoving_transverse_distance(z=redshift).value
fhd_bl = truncated_ref_bl[common_bl_ind_in_ref, :]
fhd_bl_orientation = truncated_ref_bl_orientation[common_bl_ind_in_ref]
common_bl_ind_in_ref_snapshots += [common_bl_ind_in_ref]
fhd_neg_bl_orientation_ind = fhd_bl_orientation > 90.0 + 0.5*180.0/n_bins_baseline_orientation
fhd_bl_orientation[fhd_neg_bl_orientation_ind] -= 180.0
fhd_bl[fhd_neg_bl_orientation_ind,:] = -fhd_bl[fhd_neg_bl_orientation_ind,:]
fhd_C = fhd_C[common_bl_ind_in_fhd,:,:]
fhd_vis_lag_noisy = fhd_vis_lag_noisy[common_bl_ind_in_fhd,:,:]*2.78*nchan*freq_resolution/fhd_C
fhd_obsid_pointing_dircos = obsfile_pointings_dircos[obs_id==str(fhd_obsid[j]),:].reshape(1,-1)
fhd_obsid_pointing_altaz = obsfile_pointings_altaz[obs_id==str(fhd_obsid[j]),:].reshape(1,-1)
fhd_obsid_pointing_hadec = obsfile_pointings_hadec[obs_id==str(fhd_obsid[j]),:].reshape(1,-1)
fhd_lst = NP.asscalar(obsfile_lst[obs_id==str(fhd_obsid[j])])
fhd_obsid_pointing_radec = NP.copy(fhd_obsid_pointing_hadec)
fhd_obsid_pointing_radec[0,0] = fhd_lst - fhd_obsid_pointing_hadec[0,0]
fhd_delaymat = DLY.delay_envelope(fhd_bl, pc, units='mks')
fhd_min_delay = -fhd_delaymat[0,:,1]-fhd_delaymat[0,:,0]
fhd_max_delay = fhd_delaymat[0,:,0]-fhd_delaymat[0,:,1]
fhd_min_delay = fhd_min_delay.reshape(-1,1)
fhd_max_delay = fhd_max_delay.reshape(-1,1)
fhd_thermal_noise_window = NP.abs(fhd_delays) >= max_abs_delay*1e-6
fhd_thermal_noise_window = fhd_thermal_noise_window.reshape(1,-1)
fhd_thermal_noise_window = NP.repeat(fhd_thermal_noise_window, fhd_bl.shape[0], axis=0)
fhd_EoR_window = NP.logical_or(fhd_delays > fhd_max_delay+1/bw, fhd_delays < fhd_min_delay-1/bw)
fhd_wedge_window = NP.logical_and(fhd_delays <= fhd_max_delay, fhd_delays >= fhd_min_delay)
fhd_non_wedge_window = NP.logical_not(fhd_wedge_window)
fhd_vis_rms_lag = OPS.rms(fhd_vis_lag_noisy[:,:,0], mask=NP.logical_not(fhd_thermal_noise_window), axis=1)
fhd_vis_rms_freq = NP.abs(fhd_vis_rms_lag) / NP.sqrt(nchan) / freq_resolution
if max_abs_delay is not None:
small_delays_ind = NP.abs(fhd_delays) <= max_abs_delay * 1e-6
fhd_delays = fhd_delays[small_delays_ind]
fhd_vis_lag_noisy = fhd_vis_lag_noisy[:,small_delays_ind,:]
fhd_k_prll = 2 * NP.pi * fhd_delays * cosmodel100.H0.value * CNST.rest_freq_HI * cosmodel100.efunc(z=redshift) / FCNST.c / (1+redshift)**2 * 1e3
fhd_info[fhd_obsid[j]] = {}
fhd_info[fhd_obsid[j]]['bl_id'] = fhd_bl_id
fhd_info[fhd_obsid[j]]['bl'] = fhd_bl
fhd_info[fhd_obsid[j]]['bl_length'] = fhd_bl_length
fhd_info[fhd_obsid[j]]['k_perp'] = fhd_k_perp
fhd_info[fhd_obsid[j]]['bl_orientation'] = fhd_bl_orientation
fhd_info[fhd_obsid[j]]['delays'] = fhd_delays
fhd_info[fhd_obsid[j]]['k_prll'] = fhd_k_prll
fhd_info[fhd_obsid[j]]['C'] = fhd_C
fhd_info[fhd_obsid[j]]['vis_lag_noisy'] = fhd_vis_lag_noisy
fhd_info[fhd_obsid[j]]['lst'] = fhd_lst
fhd_info[fhd_obsid[j]]['pointing_radec'] = fhd_obsid_pointing_radec
fhd_info[fhd_obsid[j]]['pointing_hadec'] = fhd_obsid_pointing_hadec
fhd_info[fhd_obsid[j]]['pointing_altaz'] = fhd_obsid_pointing_altaz
fhd_info[fhd_obsid[j]]['pointing_dircos'] = fhd_obsid_pointing_dircos
fhd_info[fhd_obsid[j]]['min_delays'] = fhd_min_delay
fhd_info[fhd_obsid[j]]['max_delays'] = fhd_max_delay
fhd_info[fhd_obsid[j]]['rms_lag'] = fhd_vis_rms_lag
fhd_info[fhd_obsid[j]]['rms_freq'] = fhd_vis_rms_freq
if (dspec_min is None) or (dspec_max is None):
dspec_min = min(min([NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j]).min() for j in xrange(n_snaps)]), min([NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy']).min() for j in xrange(len(fhd_obsid))]))
dspec_max = max(max([NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j]).max() for j in xrange(n_snaps)]), max([NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy']).max() for j in xrange(len(fhd_obsid))]))
dspec_min = dspec_min**2 * volfactor1 * volfactor2 * Jy2K**2
dspec_max = dspec_max**2 * volfactor1 * volfactor2 * Jy2K**2
cardinal_blo = 180.0 / n_bins_baseline_orientation * (NP.arange(n_bins_baseline_orientation)-1).reshape(-1,1)
cardinal_bll = 100.0
cardinal_bl = cardinal_bll * NP.hstack((NP.cos(NP.radians(cardinal_blo)), NP.sin(NP.radians(cardinal_blo)), NP.zeros_like(cardinal_blo)))
small_delays_EoR_window = EoR_window.T
small_delays_strict_EoR_window = strict_EoR_window.T
small_delays_wedge_window = wedge_window.T
if max_abs_delay is not None:
small_delays_EoR_window = small_delays_EoR_window[small_delays_ind,:]
small_delays_strict_EoR_window = small_delays_strict_EoR_window[small_delays_ind,:]
small_delays_wedge_window = small_delays_wedge_window[small_delays_ind,:]
small_delays_non_wedge_window = NP.logical_not(small_delays_wedge_window)
data_sim_ratio = []
data_sim_difference_fraction = []
# data_sim_difference_fraction = NP.zeros(len(fhd_obsid))
if use_alt_spindex:
alt_data_sim_ratio = []
alt_data_sim_difference_fraction = []
relevant_EoR_window = []
relevant_wedge_window = []
relevant_non_wedge_window = []
if plot_05:
descriptor_str = ['off-zenith', 'zenith']
# Plot FHD and modeled delay power spectra all snapshots together
fig, axs = PLT.subplots(nrows=n_snaps, ncols=2, sharex=True, sharey=True, figsize=(12,6))
for j in xrange(n_snaps):
fhddspec = axs[j,0].pcolorfast(fhd_info[fhd_obsid[j]]['bl_length'], 1e6*clean_lags, NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:-1,:-1,0].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=dspec_max))
horizonb = axs[j,0].plot(fhd_info[fhd_obsid[j]]['bl_length'], 1e6*min_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
horizont = axs[j,0].plot(fhd_info[fhd_obsid[j]]['bl_length'], 1e6*max_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
axs[j,0].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[j,0].set_aspect('auto')
axs[j,0].text(0.5, 0.9, descriptor_str[j]+' data', transform=axs[j,0].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
simdspec = axs[j,1].pcolorfast(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*clean_lags, NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j][:-1],:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=dspec_max))
horizonb = axs[j,1].plot(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*min_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
horizont = axs[j,1].plot(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*max_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
axs[j,1].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[j,1].set_aspect('auto')
axs[j,1].text(0.5, 0.9, descriptor_str[j]+' model', transform=axs[j,1].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
for j in xrange(n_snaps):
axs_kprll = axs[j,1].twinx()
axs_kprll.set_yticks(kprll(axs[j,1].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[j,1].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
if j == 0:
for col in range(2):
axs_kperp = axs[j,col].twiny()
axs_kperp.set_xticks(kperp(axs[j,col].get_xticks()*freq/FCNST.c, redshift))
axs_kperp.set_xlim(kperp(NP.asarray(axs[j,col].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axs_kperp.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(wspace=0, hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.93, 0.125, 0.02, 0.74])
cbar = fig.colorbar(simdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.82)
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noisy_PS_fhd_sim_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noisy_PS_fhd_sim_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
# Plot the FHD delay power spectra all snapshots together
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
imdspec = axs[j].pcolorfast(fhd_info[fhd_obsid[j]]['bl_length'], 1e6*clean_lags, NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:-1,:-1,0].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=dspec_max))
horizonb = axs[j].plot(fhd_info[fhd_obsid[j]]['bl_length'], 1e6*min_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
horizont = axs[j].plot(fhd_info[fhd_obsid[j]]['bl_length'], 1e6*max_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
# kcontour = axs[j].contour(fhd_info[fhd_obsid[j]]['bl_length'], 1e6*clean_lags, NP.sqrt((fhd_info[fhd_obsid[j]]['k_perp'].reshape(1,-1))**2+(fhd_info[fhd_obsid[j]]['k_prll'].reshape(-1,1))**2), levels=[0.04, 0.08, 0.16, 0.32], colors='k', linewidth=1.0)
# axs[j].clabel(kcontour, inline=1, fontsize=8, colors='k', fmt='%0.2f')
axs[j].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[j].set_aspect('auto')
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
for j in xrange(n_snaps):
axs_kprll = axs[j].twinx()
axs_kprll.set_yticks(kprll(axs[j].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[j].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
if j == 0:
axs_kperp = axs[j].twiny()
axs_kperp.set_xticks(kperp(axs[j].get_xticks()*freq/FCNST.c, redshift))
axs_kperp.set_xlim(kperp(NP.asarray(axs[j].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axs_kperp.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noisy_PS_fhd_data_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noisy_PS_fhd_data_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
# Plot the modeled delay power spectra all snapshots together
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
imdspec = axs[j].pcolorfast(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*clean_lags, NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j][:-1],:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=dspec_max))
horizonb = axs[j].plot(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*min_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
horizont = axs[j].plot(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*max_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
axs[j].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[j].set_aspect('auto')
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
for j in xrange(n_snaps):
axs_kprll = axs[j].twinx()
axs_kprll.set_yticks(kprll(axs[j].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[j].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
if j == 0:
axs_kperp = axs[j].twiny()
axs_kperp.set_xticks(kperp(axs[j].get_xticks()*freq/FCNST.c, redshift))
axs_kperp.set_xlim(kperp(NP.asarray(axs[j].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axs_kperp.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
# fig = PLT.figure(figsize=(6,6))
# for j in xrange(n_snaps):
# # Determine the baselines common to simulations and data
# # common_bl_ind = NP.squeeze(NP.where(NP.in1d(truncated_ref_bl_id, fhd_info[fhd_obsid[j]]['bl_id'])))
# # sortind = NP.argsort(truncated_ref_bl_length[common_bl_ind], kind='heapsort')
# # bloh, bloe, blon, blori = OPS.binned_statistic(fhd_info[fhd_obsid[j]]['bl_orientation'], statistic='count', bins=n_bins_baseline_orientation, range=[(-90.0+0.5*180.0/n_bins_baseline_orientation, 90.0+0.5*180.0/n_bins_baseline_orientation)])
# ax = fig.add_subplot(n_snaps,1,j+1)
# ax.set_ylim(NP.amin(clean_lags*1e6), NP.amax(clean_lags*1e6))
# ax.set_ylabel(r'lag [$\mu$s]', fontsize=18)
# ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=18)
# imdspec = ax.pcolorfast(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*clean_lags, NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j][:-1],:-1,j].T), norm=PLTC.LogNorm(vmin=1e6, vmax=dspec_max))
# # imdspec = ax.pcolorfast(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*clean_lags, NP.abs(asm_cc_vis_lag[common_bl_ind[sortind[:-1]],:-1,j].T), norm=PLTC.LogNorm(vmin=1e6, vmax=dspec_max))
# horizonb = ax.plot(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*min_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls='-', lw=1.5)
# horizont = ax.plot(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*max_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls='-', lw=1.5)
# ax.set_aspect('auto')
# cbax = fig.add_axes([0.86, 0.125, 0.02, 0.84])
# cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
# cbax.set_ylabel('Jy Hz', labelpad=0, fontsize=18)
# PLT.tight_layout()
# fig.subplots_adjust(right=0.83)
# # fig.subplots_adjust(top=0.9)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noisy_PS_sim_data_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noisy_PS_sim_data_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
# Plot each snapshot separately
for j in xrange(n_snaps):
fig = PLT.figure(figsize=(6,6))
ax = fig.add_subplot(111)
imdspec = ax.pcolorfast(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*clean_lags, NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j][:-1],:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=dspec_max))
horizonb = ax.plot(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*min_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
horizont = ax.plot(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*max_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
ax.set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
ax.set_aspect('auto')
# ax.text(0.5, 0.9, descriptor_str[j], transform=ax.transAxes, fontsize=14, weight='semibold', ha='center', color='white')
ax_kprll = ax.twinx()
ax_kprll.set_yticks(kprll(ax.get_yticks()*1e-6, redshift))
ax_kprll.set_ylim(kprll(NP.asarray(ax.get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
ax_kprll.yaxis.set_major_formatter(yformatter)
ax_kperp = ax.twiny()
ax_kperp.set_xticks(kperp(ax.get_xticks()*freq/FCNST.c, redshift))
ax_kperp.set_xlim(kperp(NP.asarray(ax.get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
ax_kperp.xaxis.set_major_formatter(xformatter)
ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium')
ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium')
ax_kprll.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
ax_kperp.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noisy_PS_sim_data_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}_snapshot_{1:1d}'.format(oversampling_factor, j)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noisy_PS_sim_data_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}_snapshot_{1:1d}'.format(oversampling_factor, j)+'.eps', bbox_inches=0)
for j in xrange(n_snaps):
# Determine the baselines common to simulations and data
# common_bl_ind = NP.squeeze(NP.where(NP.in1d(truncated_ref_bl_id, fhd_info[fhd_obsid[j]]['bl_id'])))
# sortind = NP.argsort(truncated_ref_bl_length[common_bl_ind], kind='heapsort')
# bloh, bloe, blon, blori = OPS.binned_statistic(fhd_info[fhd_obsid[j]]['bl_orientation'], statistic='count', bins=n_bins_baseline_orientation, range=[(-90.0+0.5*180.0/n_bins_baseline_orientation, 90.0+0.5*180.0/n_bins_baseline_orientation)])
# data_sim_ratio = NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T) / NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j].T)
data_sim_ratio += [NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T) / NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j].T)]
data_sim_difference_fraction += [(NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T) - NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j].T)) / NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T)]
if use_alt_spindex:
# alt_data_sim_ratio += [NP.abs(alt_asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j].T) / NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j].T)]
# alt_data_sim_difference_fraction += [(NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T) - NP.abs(alt_asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j].T)) / NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T)]
alt_data_sim_ratio += [NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T) / NP.abs(alt_asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j].T)]
alt_data_sim_difference_fraction += [(NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T) - NP.abs(alt_asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j].T)) / NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T)]
relevant_EoR_window += [small_delays_EoR_window[:,common_bl_ind_in_ref_snapshots[j]]]
relevant_wedge_window += [small_delays_wedge_window[:,common_bl_ind_in_ref_snapshots[j]]]
relevant_non_wedge_window += [small_delays_non_wedge_window[:,common_bl_ind_in_ref_snapshots[j]]]
# data_sim_difference_fraction[j] = NP.mean(NP.abs(NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][sortind,:,0].T) - NP.abs(asm_cc_vis_lag[common_bl_ind[sortind],:,j].T))/NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][sortind,:,0].T))
# data_sim_difference_fraction[j] = NP.mean(NP.abs(NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][sortind,:,0].T) - NP.abs(asm_cc_vis_lag[common_bl_ind[sortind],:,j].T))/NP.abs(asm_cc_vis_lag[common_bl_ind[sortind],:,j].T))
# data_sim_difference_fraction[j] = NP.sum(NP.abs(NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T) - NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j].T))) / NP.sum(NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T))
# data_sim_difference_fraction[j] = NP.sum(NP.abs(NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][sortind,:,0].T) - NP.abs(asm_cc_vis_lag[common_bl_ind[sortind],:,j].T))) / NP.sum(NP.abs(asm_cc_vis_lag[common_bl_ind[sortind],:,j].T))
# data_sim_difference_fraction[j] = NP.abs(NP.sum(NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][sortind,:,0].T)-NP.abs(NP.mean(fhd_info[fhd_obsid[j]]['rms_lag'])) - NP.abs(asm_cc_vis_lag[common_bl_ind[sortind],:,j].T)+NP.abs(NP.mean(vis_rms_lag)))) / NP.sum(NP.abs(asm_cc_vis_lag[common_bl_ind[sortind],:,j].T)-NP.abs(NP.mean(vis_rms_lag)))
# mu = NP.mean(NP.log10(data_sim_ratio[relevant_EoR_window]))
# sig= NP.std(NP.log10(data_sim_ratio[relevant_EoR_window]))
if plot_06:
# 06) Plot FHD data to simulation ratio on all baselines combined
fig = PLT.figure(figsize=(6,6))
for j in xrange(n_snaps):
ax = fig.add_subplot(n_snaps,1,j+1)
ax.set_ylim(NP.amin(clean_lags*1e6), NP.amax(clean_lags*1e6))
ax.set_ylabel(r'lag [$\mu$s]', fontsize=18)
ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=18)
imdspec = ax.pcolorfast(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*clean_lags, NP.log10(data_sim_ratio[j][:-1,:-1]), vmin=-1.0, vmax=1.0)
# imdspec = ax.pcolorfast(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*clean_lags, data_sim_ratio[:-1,:-1], norm=PLTC.LogNorm(vmin=1e-1, vmax=1e1))
horizonb = ax.plot(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*min_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
horizont = ax.plot(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*max_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
ax.set_aspect('auto')
cbax = fig.add_axes([0.91, 0.125, 0.02, 0.84])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
# cbax.set_ylabel('Jy Hz', labelpad=0, fontsize=18)
PLT.tight_layout()
fig.subplots_adjust(right=0.88)
# fig.subplots_adjust(top=0.9)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noisy_PS_sim_data_ratio_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noisy_PS_sim_data_ratio_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
descriptor_str = ['off-zenith', 'zenith']
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
hpdf, bins, patches = axs[j].hist(NP.log10(data_sim_ratio[j][relevant_wedge_window[j]]), bins=50, normed=True, cumulative=False, histtype='step', linewidth=2.5, color='black')
mu = NP.mean(NP.log10(data_sim_ratio[j][relevant_wedge_window[j]]))
sig = NP.std(NP.log10(data_sim_ratio[j][relevant_wedge_window[j]]))
if use_alt_spindex:
alt_hpdf, alt_bins, alt_patches = axs[j].hist(NP.log10(alt_data_sim_ratio[j][relevant_wedge_window[j]]), bins=50, normed=True, cumulative=False, histtype='step', linewidth=2.5, color='gray')
alt_mu = NP.mean(NP.log10(alt_data_sim_ratio[j][relevant_wedge_window[j]]))
alt_sig = NP.std(NP.log10(alt_data_sim_ratio[j][relevant_wedge_window[j]]))
print NP.median(NP.abs(NP.log10(data_sim_ratio[j][relevant_wedge_window[j]]) - NP.median(NP.log10(data_sim_ratio[j][relevant_wedge_window[j]])))), NP.median(NP.abs(NP.log10(alt_data_sim_ratio[j][relevant_wedge_window[j]]) - NP.median(NP.log10(alt_data_sim_ratio[j][relevant_wedge_window[j]]))))
else:
print NP.median(NP.abs(NP.log10(data_sim_ratio[j][relevant_wedge_window[j]]) - NP.median(NP.log10(data_sim_ratio[j][relevant_wedge_window[j]]))))
# print mu, sig
# gauss_model = mlab.normpdf(bins, mu, sig)
# modl = ax.plot(bins, gauss_model, 'k-')
# axs[j].set_xlabel(r'$\log_{10}\,\rho$', fontsize=24, weight='medium')
axs[j].set_xlim(-2, 2)
axs[j].set_ylim(0.0, 1.1)
axs[j].set_aspect('auto')
axs[j].tick_params(which='major', length=12, labelsize=12)
axs[j].tick_params(which='minor', length=6)
axs[j].text(0.1, 0.8, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='left', color='black')
fig.subplots_adjust(hspace=0)
# PLT.tight_layout()
fig.subplots_adjust(top=0.95)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_xlabel(r'$\log_{10}\,\rho$', fontsize=24, weight='medium', labelpad=20)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'histogram_wedge_sim_data_log_ratio_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'histogram_wedge_sim_data_log_ratio_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
if plot_09:
# 09) Plot histogram of fractional differences between FHD data and simulation
fig = PLT.figure(figsize=(6,6))
for j in xrange(n_snaps):
ax = fig.add_subplot(n_snaps,1,j+1)
hpdf, bins, patches = ax.hist(data_sim_difference_fraction[j][relevant_wedge_window[j]], bins=50, normed=True, cumulative=False, histtype='step', linewidth=2.5, color='black', label='pdf (wedge)')
ax.set_xlabel(r'$\rho-1$', fontsize=24, weight='medium')
ax.set_xlim(-2, 2)
PLT.tight_layout()
fig.subplots_adjust(bottom=0.15)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'histogram_wedge_sim_data_snr_log_fractional_diff_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'histogram_wedge_sim_data_snr_log_fractional_diff_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
if plot_07 or plot_08:
dsm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_dsm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+bpass_shape
csm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_csm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+bpass_shape
hdulist = fits.open(dsm_CLEAN_infile+'.fits')
dsm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
dsm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
dsm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
dsm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
dsm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = dsm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
dsm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = dsm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
dsm_cc_vis[simdata_neg_bl_orientation_ind,:,:] = dsm_cc_vis[simdata_neg_bl_orientation_ind,:,:].conj()
dsm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:] = dsm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:].conj()
dsm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(dsm_cc_skyvis, axis=1),axes=1) * dsm_cc_skyvis.shape[1] * freq_resolution
dsm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(dsm_cc_skyvis_res, axis=1),axes=1) * dsm_cc_skyvis.shape[1] * freq_resolution
dsm_cc_skyvis_lag = dsm_cc_skyvis_lag + dsm_ccres_sky
dsm_cc_vis_lag = NP.fft.fftshift(NP.fft.ifft(dsm_cc_vis, axis=1),axes=1) * dsm_cc_vis.shape[1] * freq_resolution
dsm_ccres = NP.fft.fftshift(NP.fft.ifft(dsm_cc_vis_res, axis=1),axes=1) * dsm_cc_vis.shape[1] * freq_resolution
dsm_cc_vis_lag = dsm_cc_vis_lag + dsm_ccres
hdulist = fits.open(csm_CLEAN_infile+'.fits')
csm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
csm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
csm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
csm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
csm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = csm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
csm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = csm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
csm_cc_vis[simdata_neg_bl_orientation_ind,:,:] = csm_cc_vis[simdata_neg_bl_orientation_ind,:,:].conj()
csm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:] = csm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:].conj()
csm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(csm_cc_skyvis, axis=1),axes=1) * csm_cc_skyvis.shape[1] * freq_resolution
csm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(csm_cc_skyvis_res, axis=1),axes=1) * csm_cc_skyvis.shape[1] * freq_resolution
csm_cc_skyvis_lag = csm_cc_skyvis_lag + csm_ccres_sky
csm_cc_vis_lag = NP.fft.fftshift(NP.fft.ifft(csm_cc_vis, axis=1),axes=1) * csm_cc_vis.shape[1] * freq_resolution
csm_ccres = NP.fft.fftshift(NP.fft.ifft(csm_cc_vis_res, axis=1),axes=1) * csm_cc_vis.shape[1] * freq_resolution
csm_cc_vis_lag = csm_cc_vis_lag + csm_ccres
dsm_cc_skyvis_lag = DSP.downsampler(dsm_cc_skyvis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
dsm_cc_vis_lag = DSP.downsampler(dsm_cc_vis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
csm_cc_skyvis_lag = DSP.downsampler(csm_cc_skyvis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
csm_cc_vis_lag = DSP.downsampler(csm_cc_vis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
dsm_cc_skyvis_lag = dsm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
dsm_cc_vis_lag = dsm_cc_vis_lag[truncated_ref_bl_ind,:,:]
csm_cc_skyvis_lag = csm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
csm_cc_vis_lag = csm_cc_vis_lag[truncated_ref_bl_ind,:,:]
if max_abs_delay is not None:
dsm_cc_skyvis_lag = dsm_cc_skyvis_lag[:,small_delays_ind,:]
csm_cc_skyvis_lag = csm_cc_skyvis_lag[:,small_delays_ind,:]
dsm_cc_skyvis_lag_err = dsm_cc_skyvis_lag * NP.log(dsm_base_freq/freq) * dsm_dalpha
csm_cc_skyvis_lag_err = csm_cc_skyvis_lag * NP.log(csm_base_freq/freq) * csm_dalpha
# cc_skyvis_lag_err = NP.abs(dsm_cc_skyvis_lag_err) + NP.abs(csm_cc_skyvis_lag_err)
# cc_skyvis_lag_err = NP.abs(dsm_cc_skyvis_lag_err + csm_cc_skyvis_lag_err)
cc_skyvis_lag_err = NP.sqrt(NP.abs(dsm_cc_skyvis_lag_err)**2 + NP.abs(csm_cc_skyvis_lag_err)**2)
if plot_07:
# 07) Plot ratio of differences between FHD data and simulation to expected error on all baselines combined
fig = PLT.figure(figsize=(6,6))
for j in xrange(n_snaps):
err_log_ratio_fhd = NP.abs(fhd_info[fhd_obsid[j]]['rms_lag'].T)/NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T)
err_log_ratio_sim = NP.sqrt(cc_skyvis_lag_err[common_bl_ind_in_ref_snapshots[j],:,j]**2 + NP.abs(vis_rms_lag[common_bl_ind_in_ref_snapshots[j],:,j])**2).T / NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j]).T
err_log_ratio = NP.sqrt(err_log_ratio_sim**2 + err_log_ratio_fhd**2)
data_sim_log_ratio = NP.log10(data_sim_ratio[j])
# data_sim_log_ratio = NP.log10(NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T) / NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j].T))
snr_log_ratio = data_sim_log_ratio / err_log_ratio
ax = fig.add_subplot(n_snaps,1,j+1)
ax.set_ylim(NP.amin(clean_lags*1e6), NP.amax(clean_lags*1e6))
ax.set_ylabel(r'lag [$\mu$s]', fontsize=18)
ax.set_xlabel(r'$|\vec{mathbf{x}}|$ [m]', fontsize=18)
imdspec = ax.pcolorfast(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*clean_lags, snr_log_ratio[:-1,:-1], vmin=-2, vmax=2)
horizonb = ax.plot(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*min_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
horizont = ax.plot(truncated_ref_bl_length[common_bl_ind_in_ref_snapshots[j]], 1e6*max_delay[common_bl_ind_in_ref_snapshots[j]].ravel(), color='white', ls=':', lw=1.5)
ax.set_aspect('auto')
cbax = fig.add_axes([0.86, 0.125, 0.02, 0.84])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
PLT.tight_layout()
fig.subplots_adjust(right=0.83)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noisy_PS_sim_data_snr_log_ratio_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noisy_PS_sim_data_snr_log_ratio_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
fig = PLT.figure(figsize=(6,8))
for j in xrange(n_snaps):
err_log_ratio_fhd = NP.abs(fhd_info[fhd_obsid[j]]['rms_lag'].T)/NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T)
err_log_ratio_sim = NP.sqrt(cc_skyvis_lag_err[common_bl_ind_in_ref_snapshots[j],:,j]**2 + NP.abs(vis_rms_lag[common_bl_ind_in_ref_snapshots[j],:,j])**2).T / NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j]).T
err_log_ratio = NP.sqrt(err_log_ratio_sim**2 + err_log_ratio_fhd**2)
data_sim_log_ratio = NP.log10(data_sim_ratio[j])
# data_sim_log_ratio = NP.log10(NP.abs(fhd_info[fhd_obsid[j]]['vis_lag_noisy'][:,:,0].T) / NP.abs(asm_cc_vis_lag[common_bl_ind_in_ref_snapshots[j],:,j].T))
snr_data_sim_log_ratio = data_sim_log_ratio / err_log_ratio
# relevant_EoR_window = small_delays_EoR_window[:,common_bl_ind_in_ref_snapshots[j]]
# relevant_wedge_window = small_delays_wedge_window[:,common_bl_ind_in_ref_snapshots[j]]
# relevant_non_wedge_window = small_delays_non_wedge_window[:,common_bl_ind_in_ref_snapshots[j]]
ax = fig.add_subplot(n_snaps,1,j+1)
hpdf, bins, patches = ax.hist(snr_data_sim_log_ratio[relevant_wedge_window[j]], bins=50, normed=True, cumulative=False, histtype='step', linewidth=2.5, color='black', label='pdf (wedge)')
confidence_level = (NP.sum(hpdf[bins[:-1] <= 1.0]) - NP.sum(hpdf[bins[:-1] <= -1.0])) * (bins[1]-bins[0])
print confidence_level
hpdf, bins, patches = ax.hist(snr_data_sim_log_ratio[relevant_non_wedge_window[j]], bins=50, normed=True, cumulative=False, histtype='step', linewidth=2.5, color='black', label='pdf (outside)', linestyle='dashed')
hcdf, bins, patches = ax.hist(snr_data_sim_log_ratio[relevant_wedge_window[j]], bins=50, normed=True, cumulative=True, histtype='step', linewidth=2.5, color='gray', label='cdf (outside)')
hcdf, bins, patches = ax.hist(snr_data_sim_log_ratio[relevant_non_wedge_window[j]], bins=50, normed=True, cumulative=True, histtype='step', linewidth=2.5, color='gray', label='cdf (wedge)', linestyle='dashed')
ax.set_xlabel(r'$\frac{\mathrm{log}_{10}\,\rho}{\Delta\,\mathrm{log}_{10}\,\rho}$', fontsize=24, weight='medium')
# ax.set_xlabel('log(ratio) / err[log(ratio)]', fontsize=16, weight='medium')
ax.set_xlim(-2, 2)
ax.axvline(x=-1.0, ymax=0.67, color='black', ls=':', lw=2)
ax.axvline(x=1.0, ymax=0.67, color='black', ls=':', lw=2)
l1 = PLT.Line2D(range(1), range(1), color='black', linestyle='-', linewidth=2.5)
l2 = PLT.Line2D(range(1), range(1), color='black', linestyle='--', linewidth=2.5)
l3 = PLT.Line2D(range(1), range(1), color='gray', linestyle='-', linewidth=2.5)
l4 = PLT.Line2D(range(1), range(1), color='gray', linestyle='--', linewidth=2.5)
pdflegend = PLT.legend((l1, l2), ('pdf (wedge)', 'pdf (outside)'), loc='upper left', frameon=False)
ax = PLT.gca().add_artist(pdflegend)
cdflegend = PLT.legend((l3, l4), ('cdf (wedge)', 'cdf (outside)'), loc='upper right', frameon=False)
# ax.text(0.05, 0.7, '{0:.1f} %'.format(confidence_level*100), horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
PLT.tight_layout()
fig.subplots_adjust(bottom=0.15)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'histogram_wedge_sim_data_snr_log_ratio_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'histogram_wedge_sim_data_snr_log_ratio_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
if plot_16:
# 16) Plot average thermal noise in simulations and data as a function of baseline length
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
axs[j].plot(truncated_ref_bl_length, NP.abs(vis_rms_lag[:,0,j]).ravel(), 'k.', label='Simulation')
axs[j].plot(fhd_info[fhd_obsid[j]]['bl_length'], NP.abs(fhd_info[fhd_obsid[j]]['rms_lag']).ravel(), 'r.', label='MWA Data')
axs[j].set_xlim(0.0, truncated_ref_bl_length.max())
axs[j].set_yscale('log')
axs[j].set_aspect('auto')
legend = axs[j].legend(loc='upper right')
legend.draw_frame(False)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=24, weight='medium', labelpad=20)
big_ax.set_ylabel(r'$V_{b\tau}^\mathrm{rms}(\mathbf{b})$ [Jy Hz]', fontsize=24, weight='medium', labelpad=20)
if plot_10 or plot_11 or plot_12 or plot_13 or plot_14:
infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)
asm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_asm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+bpass_shape
dsm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_dsm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+bpass_shape
csm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_csm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+bpass_shape
ia = RI.InterferometerArray(None, None, None, init_file=infile+'.fits')
simdata_bl_orientation = NP.angle(ia.baselines[:,0] + 1j * ia.baselines[:,1], deg=True)
simdata_neg_bl_orientation_ind = simdata_bl_orientation > 90.0 + 0.5*180.0/n_bins_baseline_orientation
simdata_bl_orientation[simdata_neg_bl_orientation_ind] -= 180.0
ia.baselines[simdata_neg_bl_orientation_ind,:] = -ia.baselines[simdata_neg_bl_orientation_ind,:]
hdulist = fits.open(infile+'.fits')
latitude = hdulist[0].header['latitude']
pointing_coords = hdulist[0].header['pointing_coords']
pointings_table = hdulist['POINTING AND PHASE CENTER INFO'].data
lst = pointings_table['LST']
n_snaps = lst.size
hdulist.close()
if pointing_coords == 'altaz':
pointings_altaz = NP.hstack((pointings_table['pointing_latitude'].reshape(-1,1), pointings_table['pointing_longitude'].reshape(-1,1)))
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'radec':
pointings_radec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'hadec':
pointings_hadec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
hdulist = fits.open(asm_CLEAN_infile+'.fits')
clean_lags = hdulist['SPECTRAL INFO'].data['lag']
clean_lags_orig = NP.copy(clean_lags)
asm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
asm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
asm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
asm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(dsm_CLEAN_infile+'.fits')
dsm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
dsm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
dsm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
dsm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(csm_CLEAN_infile+'.fits')
csm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
csm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
csm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
csm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
asm_cc_vis[simdata_neg_bl_orientation_ind,:,:] = asm_cc_vis[simdata_neg_bl_orientation_ind,:,:].conj()
asm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:] = asm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:].conj()
asm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(asm_cc_skyvis, axis=1),axes=1) * asm_cc_skyvis.shape[1] * freq_resolution
asm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(asm_cc_skyvis_res, axis=1),axes=1) * asm_cc_skyvis.shape[1] * freq_resolution
asm_cc_skyvis_lag = asm_cc_skyvis_lag + asm_ccres_sky
asm_cc_vis_lag = NP.fft.fftshift(NP.fft.ifft(asm_cc_vis, axis=1),axes=1) * asm_cc_vis.shape[1] * freq_resolution
asm_ccres = NP.fft.fftshift(NP.fft.ifft(asm_cc_vis_res, axis=1),axes=1) * asm_cc_vis.shape[1] * freq_resolution
asm_cc_vis_lag = asm_cc_vis_lag + asm_ccres
dsm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = dsm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
dsm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = dsm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
dsm_cc_vis[simdata_neg_bl_orientation_ind,:,:] = dsm_cc_vis[simdata_neg_bl_orientation_ind,:,:].conj()
dsm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:] = dsm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:].conj()
dsm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(dsm_cc_skyvis, axis=1),axes=1) * dsm_cc_skyvis.shape[1] * freq_resolution
dsm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(dsm_cc_skyvis_res, axis=1),axes=1) * dsm_cc_skyvis.shape[1] * freq_resolution
dsm_cc_skyvis_lag = dsm_cc_skyvis_lag + dsm_ccres_sky
dsm_cc_vis_lag = NP.fft.fftshift(NP.fft.ifft(dsm_cc_vis, axis=1),axes=1) * dsm_cc_vis.shape[1] * freq_resolution
dsm_ccres = NP.fft.fftshift(NP.fft.ifft(dsm_cc_vis_res, axis=1),axes=1) * dsm_cc_vis.shape[1] * freq_resolution
dsm_cc_vis_lag = dsm_cc_vis_lag + dsm_ccres
csm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = csm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
csm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = csm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
csm_cc_vis[simdata_neg_bl_orientation_ind,:,:] = csm_cc_vis[simdata_neg_bl_orientation_ind,:,:].conj()
csm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:] = csm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:].conj()
csm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(csm_cc_skyvis, axis=1),axes=1) * csm_cc_skyvis.shape[1] * freq_resolution
csm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(csm_cc_skyvis_res, axis=1),axes=1) * csm_cc_skyvis.shape[1] * freq_resolution
csm_cc_skyvis_lag = csm_cc_skyvis_lag + csm_ccres_sky
csm_cc_vis_lag = NP.fft.fftshift(NP.fft.ifft(csm_cc_vis, axis=1),axes=1) * csm_cc_vis.shape[1] * freq_resolution
csm_ccres = NP.fft.fftshift(NP.fft.ifft(csm_cc_vis_res, axis=1),axes=1) * csm_cc_vis.shape[1] * freq_resolution
csm_cc_vis_lag = csm_cc_vis_lag + csm_ccres
asm_cc_skyvis_lag = DSP.downsampler(asm_cc_skyvis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
asm_cc_vis_lag = DSP.downsampler(asm_cc_vis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
dsm_cc_skyvis_lag = DSP.downsampler(dsm_cc_skyvis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
dsm_cc_vis_lag = DSP.downsampler(dsm_cc_vis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
csm_cc_skyvis_lag = DSP.downsampler(csm_cc_skyvis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
csm_cc_vis_lag = DSP.downsampler(csm_cc_vis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
clean_lags = DSP.downsampler(clean_lags, 1.0*clean_lags.size/ia.lags.size, axis=-1)
clean_lags = clean_lags.ravel()
vis_noise_lag = NP.copy(ia.vis_noise_lag)
vis_noise_lag = vis_noise_lag[truncated_ref_bl_ind,:,:]
asm_cc_skyvis_lag = asm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
asm_cc_vis_lag = asm_cc_vis_lag[truncated_ref_bl_ind,:,:]
csm_cc_skyvis_lag = csm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
csm_cc_vis_lag = csm_cc_vis_lag[truncated_ref_bl_ind,:,:]
dsm_cc_skyvis_lag = dsm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
dsm_cc_vis_lag = dsm_cc_vis_lag[truncated_ref_bl_ind,:,:]
delaymat = DLY.delay_envelope(ia.baselines[truncated_ref_bl_ind,:], pc, units='mks')
bw = nchan * freq_resolution
min_delay = -delaymat[0,:,1]-delaymat[0,:,0]
max_delay = delaymat[0,:,0]-delaymat[0,:,1]
clags = clean_lags.reshape(1,-1)
min_delay = min_delay.reshape(-1,1)
max_delay = max_delay.reshape(-1,1)
thermal_noise_window = NP.abs(clags) >= max_abs_delay*1e-6
thermal_noise_window = NP.repeat(thermal_noise_window, ia.baselines[truncated_ref_bl_ind,:].shape[0], axis=0)
EoR_window = NP.logical_or(clags > max_delay+3/bw, clags < min_delay-3/bw)
strict_EoR_window = NP.logical_and(EoR_window, NP.abs(clags) < 1/coarse_channel_resolution)
wedge_window = NP.logical_and(clags <= max_delay, clags >= min_delay)
non_wedge_window = NP.logical_not(wedge_window)
vis_rms_lag = OPS.rms(asm_cc_vis_lag, mask=NP.logical_not(NP.repeat(thermal_noise_window[:,:,NP.newaxis], n_snaps, axis=2)), axis=1)
vis_rms_freq = NP.abs(vis_rms_lag) / NP.sqrt(nchan) / freq_resolution
T_rms_freq = vis_rms_freq / (2.0 * FCNST.k) * NP.mean(ia.A_eff[truncated_ref_bl_ind,:]) * NP.mean(ia.eff_Q[truncated_ref_bl_ind,:]) * NP.sqrt(2.0*freq_resolution*NP.asarray(ia.t_acc).reshape(1,1,-1)) * CNST.Jy
vis_rms_lag_theory = OPS.rms(vis_noise_lag, mask=NP.logical_not(NP.repeat(EoR_window[:,:,NP.newaxis], n_snaps, axis=2)), axis=1)
vis_rms_freq_theory = NP.abs(vis_rms_lag_theory) / NP.sqrt(nchan) / freq_resolution
T_rms_freq_theory = vis_rms_freq_theory / (2.0 * FCNST.k) * NP.mean(ia.A_eff[truncated_ref_bl_ind,:]) * NP.mean(ia.eff_Q[truncated_ref_bl_ind,:]) * NP.sqrt(2.0*freq_resolution*NP.asarray(ia.t_acc).reshape(1,1,-1)) * CNST.Jy
if (dspec_min is None) or (dspec_max is None):
dspec_max = max([NP.abs(asm_cc_skyvis_lag).max(), NP.abs(dsm_cc_skyvis_lag).max(), NP.abs(csm_cc_skyvis_lag).max()])
dspec_min = min([NP.abs(asm_cc_skyvis_lag).min(), NP.abs(dsm_cc_skyvis_lag).min(), NP.abs(csm_cc_skyvis_lag).min()])
dspec_max = dspec_max**2 * volfactor1 * volfactor2 * Jy2K**2
dspec_min = dspec_min**2 * volfactor1 * volfactor2 * Jy2K**2
small_delays_EoR_window = EoR_window.T
small_delays_strict_EoR_window = strict_EoR_window.T
small_delays_wedge_window = wedge_window.T
if max_abs_delay is not None:
small_delays_ind = NP.abs(clean_lags) <= max_abs_delay * 1e-6
clean_lags = clean_lags[small_delays_ind]
asm_cc_vis_lag = asm_cc_vis_lag[:,small_delays_ind,:]
asm_cc_skyvis_lag = asm_cc_skyvis_lag[:,small_delays_ind,:]
dsm_cc_vis_lag = dsm_cc_vis_lag[:,small_delays_ind,:]
dsm_cc_skyvis_lag = dsm_cc_skyvis_lag[:,small_delays_ind,:]
csm_cc_vis_lag = csm_cc_vis_lag[:,small_delays_ind,:]
csm_cc_skyvis_lag = csm_cc_skyvis_lag[:,small_delays_ind,:]
small_delays_EoR_window = small_delays_EoR_window[small_delays_ind,:]
small_delays_strict_EoR_window = small_delays_strict_EoR_window[small_delays_ind,:]
small_delays_wedge_window = small_delays_wedge_window[small_delays_ind,:]
if plot_10:
# 10) Plot noiseless delay spectra from simulations for diffuse, compact and all-sky models
descriptor_str = ['off-zenith', 'zenith']
All-sky model
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
imdspec = axs[j].pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(asm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=dspec_max))
horizonb = axs[j].plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = axs[j].plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
axs[j].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[j].set_aspect('auto')
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
for j in xrange(n_snaps):
axs_kprll = axs[j].twinx()
axs_kprll.set_yticks(kprll(axs[j].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[j].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
if j == 0:
axs_kperp = axs[j].twiny()
axs_kperp.set_xticks(kperp(axs[j].get_xticks()*freq/FCNST.c, redshift))
axs_kperp.set_xlim(kperp(NP.asarray(axs[j].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axs_kperp.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
# fig = PLT.figure(figsize=(6,6))
# for j in xrange(n_snaps):
# ax = fig.add_subplot(n_snaps,1,j+1)
# ax.set_ylim(NP.amin(clean_lags*1e6), NP.amax(clean_lags*1e6))
# ax.set_ylabel(r'lag [$\mu$s]', fontsize=18)
# ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=18)
# imdspec = ax.pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(asm_cc_skyvis_lag[:-1,:-1,j].T), norm=PLTC.LogNorm(vmin=1e6, vmax=dspec_max))
# horizonb = ax.plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls='-', lw=1.5)
# horizont = ax.plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls='-', lw=1.5)
# ax.set_aspect('auto')
# cbax = fig.add_axes([0.86, 0.125, 0.02, 0.84])
# cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
# cbax.set_ylabel('Jy Hz', labelpad=0, fontsize=18)
# PLT.tight_layout()
# fig.subplots_adjust(right=0.83)
# # fig.subplots_adjust(top=0.9)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
# Plot each snapshot separately
for j in xrange(n_snaps):
fig = PLT.figure(figsize=(6,6))
ax = fig.add_subplot(111)
imdspec = ax.pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(asm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=1e0, vmax=1e12))
horizonb = ax.plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = ax.plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
ax.set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
ax.set_aspect('auto')
# ax.text(0.5, 0.9, descriptor_str[j], transform=ax.transAxes, fontsize=14, weight='semibold', ha='center', color='white')
ax_kprll = ax.twinx()
ax_kprll.set_yticks(kprll(ax.get_yticks()*1e-6, redshift))
ax_kprll.set_ylim(kprll(NP.asarray(ax.get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
ax_kprll.yaxis.set_major_formatter(yformatter)
ax_kperp = ax.twiny()
ax_kperp.set_xticks(kperp(ax.get_xticks()*freq/FCNST.c, redshift))
ax_kperp.set_xlim(kperp(NP.asarray(ax.get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
ax_kperp.xaxis.set_major_formatter(xformatter)
ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium')
ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium')
ax_kprll.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
ax_kperp.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}_snapshot_{1:1d}'.format(oversampling_factor, j)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}_snapshot_{1:1d}'.format(oversampling_factor, j)+'.eps', bbox_inches=0)
# Diffuse foreground model
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
imdspec = axs[j].pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(dsm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=dspec_max))
horizonb = axs[j].plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = axs[j].plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
axs[j].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[j].set_aspect('auto')
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
for j in xrange(n_snaps):
axs_kprll = axs[j].twinx()
axs_kprll.set_yticks(kprll(axs[j].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[j].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
if j == 0:
axs_kperp = axs[j].twiny()
axs_kperp.set_xticks(kperp(axs[j].get_xticks()*freq/FCNST.c, redshift))
axs_kperp.set_xlim(kperp(NP.asarray(axs[j].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axs_kperp.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
# fig = PLT.figure(figsize=(6,6))
# for j in xrange(n_snaps):
# ax = fig.add_subplot(n_snaps,1,j+1)
# ax.set_ylim(NP.amin(clean_lags*1e6), NP.amax(clean_lags*1e6))
# ax.set_ylabel(r'lag [$\mu$s]', fontsize=18)
# ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=18)
# imdspec = ax.pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(dsm_cc_skyvis_lag[:-1,:-1,j].T), norm=PLTC.LogNorm(vmin=1e6, vmax=dspec_max))
# horizonb = ax.plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls='-', lw=1.5)
# horizont = ax.plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls='-', lw=1.5)
# ax.set_aspect('auto')
# cbax = fig.add_axes([0.86, 0.125, 0.02, 0.84])
# cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
# cbax.set_ylabel('Jy Hz', labelpad=0, fontsize=18)
# PLT.tight_layout()
# fig.subplots_adjust(right=0.83)
# # fig.subplots_adjust(top=0.9)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_dsm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_dsm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
# Plot each snapshot separately
for j in xrange(n_snaps):
fig = PLT.figure(figsize=(6,6))
ax = fig.add_subplot(111)
imdspec = ax.pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(dsm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=1e0, vmax=1e12))
horizonb = ax.plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = ax.plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
ax.set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
ax.set_aspect('auto')
# ax.text(0.5, 0.9, descriptor_str[j], transform=ax.transAxes, fontsize=14, weight='semibold', ha='center', color='white')
ax_kprll = ax.twinx()
ax_kprll.set_yticks(kprll(ax.get_yticks()*1e-6, redshift))
ax_kprll.set_ylim(kprll(NP.asarray(ax.get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
ax_kprll.yaxis.set_major_formatter(yformatter)
ax_kperp = ax.twiny()
ax_kperp.set_xticks(kperp(ax.get_xticks()*freq/FCNST.c, redshift))
ax_kperp.set_xlim(kperp(NP.asarray(ax.get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
ax_kperp.xaxis.set_major_formatter(xformatter)
ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium')
ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium')
ax_kprll.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
ax_kperp.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_dsm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}_snapshot_{1:1d}'.format(oversampling_factor, j)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_dsm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}_snapshot_{1:1d}'.format(oversampling_factor, j)+'.eps', bbox_inches=0)
# Compact foreground model
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
imdspec = axs[j].pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(csm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=dspec_max))
horizonb = axs[j].plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = axs[j].plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
axs[j].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[j].set_aspect('auto')
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
for j in xrange(n_snaps):
axs_kprll = axs[j].twinx()
axs_kprll.set_yticks(kprll(axs[j].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[j].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
if j == 0:
axs_kperp = axs[j].twiny()
axs_kperp.set_xticks(kperp(axs[j].get_xticks()*freq/FCNST.c, redshift))
axs_kperp.set_xlim(kperp(NP.asarray(axs[j].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axs_kperp.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
# fig = PLT.figure(figsize=(6,6))
# for j in xrange(n_snaps):
# ax = fig.add_subplot(n_snaps,1,j+1)
# ax.set_ylim(NP.amin(clean_lags*1e6), NP.amax(clean_lags*1e6))
# ax.set_ylabel(r'lag [$\mu$s]', fontsize=18)
# ax.set_xlabel(r'$|\vec{mathbf{x}}|$ [m]', fontsize=18)
# imdspec = ax.pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(csm_cc_skyvis_lag[:-1,:-1,j].T), norm=PLTC.LogNorm(vmin=1e6, vmax=dspec_max))
# horizonb = ax.plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls='-', lw=1.5)
# horizont = ax.plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls='-', lw=1.5)
# ax.set_aspect('auto')
# cbax = fig.add_axes([0.86, 0.125, 0.02, 0.84])
# cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
# cbax.set_ylabel('Jy Hz', labelpad=0, fontsize=18)
# PLT.tight_layout()
# fig.subplots_adjust(right=0.83)
# # fig.subplots_adjust(top=0.9)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_csm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_csm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
# Plot each snapshot separately
for j in xrange(n_snaps):
fig = PLT.figure(figsize=(6,6))
ax = fig.add_subplot(111)
imdspec = ax.pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(csm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=1e0, vmax=1e12))
horizonb = ax.plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = ax.plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
ax.set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
ax.set_aspect('auto')
# ax.text(0.5, 0.9, descriptor_str[j], transform=ax.transAxes, fontsize=14, weight='semibold', ha='center', color='white')
ax_kprll = ax.twinx()
ax_kprll.set_yticks(kprll(ax.get_yticks()*1e-6, redshift))
ax_kprll.set_ylim(kprll(NP.asarray(ax.get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
ax_kprll.yaxis.set_major_formatter(yformatter)
ax_kperp = ax.twiny()
ax_kperp.set_xticks(kperp(ax.get_xticks()*freq/FCNST.c, redshift))
ax_kperp.set_xlim(kperp(NP.asarray(ax.get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
ax_kperp.xaxis.set_major_formatter(xformatter)
ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium')
ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium')
ax_kprll.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
ax_kperp.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_csm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}_snapshot_{1:1d}'.format(oversampling_factor, j)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_csm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}_snapshot_{1:1d}'.format(oversampling_factor, j)+'.eps', bbox_inches=0)
# Plot diffuse and compact sky models for all snapshots together
fig, axs = PLT.subplots(nrows=n_snaps, ncols=2, sharex=True, sharey=True, figsize=(12,6))
for j in xrange(n_snaps):
dsmdspec = axs[j,0].pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(dsm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=dspec_max))
horizonb = axs[j,0].plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = axs[j,0].plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
axs[j,0].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[j,0].set_aspect('auto')
axs[j,0].text(0.5, 0.9, descriptor_str[j], transform=axs[j,0].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
csmspec = axs[j,1].pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(csm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=dspec_max))
horizonb = axs[j,1].plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = axs[j,1].plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
axs[j,1].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[j,1].set_aspect('auto')
axs[j,1].text(0.5, 0.9, descriptor_str[j], transform=axs[j,1].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
for j in xrange(n_snaps):
axs_kprll = axs[j,1].twinx()
axs_kprll.set_yticks(kprll(axs[j,1].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[j,1].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
if j == 0:
for col in range(2):
axs_kperp = axs[j,col].twiny()
axs_kperp.set_xticks(kperp(axs[j,col].get_xticks()*freq/FCNST.c, redshift))
axs_kperp.set_xlim(kperp(NP.asarray(axs[j,col].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axs_kperp.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(wspace=0, hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.93, 0.125, 0.02, 0.74])
cbar = fig.colorbar(dsmdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.82)
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_dsm_csm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_dsm_csm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
select_bl_id = ['47-21']
# select_bl_id = ['125-124', '93-28', '95-51', '84-58', '167-166', '85-61', '94-23', '47-21', '63-58', '67-51', '68-18', '93-86']
for blid in select_bl_id:
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
axs[j].plot(1e6*clean_lags, NP.abs(dsm_cc_skyvis_lag[ref_bl_id == blid,:,j]).ravel()**2 * volfactor1 * volfactor2 * Jy2K**2, 'k:', lw=2, label='Diffuse')
axs[j].plot(1e6*clean_lags, NP.abs(csm_cc_skyvis_lag[ref_bl_id == blid,:,j]).ravel()**2 * volfactor1 * volfactor2 * Jy2K**2, 'k--', lw=2, label='Compact')
axs[j].plot(1e6*clean_lags, NP.abs(asm_cc_skyvis_lag[ref_bl_id == blid,:,j]).ravel()**2 * volfactor1 * volfactor2 * Jy2K**2, 'k-', lw=2, label='Both')
dspec_ulim = NP.abs(asm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,j]).ravel()+NP.sqrt(NP.abs(csm_jacobian_spindex*csm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,j])**2 + NP.abs(dsm_jacobian_spindex*dsm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,j])**2 + NP.abs(vis_rms_lag[truncated_ref_bl_id==blid,:,j])**2).ravel()
dspec_llim = NP.abs(asm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,j]).ravel()-NP.sqrt(NP.abs(csm_jacobian_spindex*csm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,j])**2 + NP.abs(dsm_jacobian_spindex*dsm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,j])**2 + NP.abs(vis_rms_lag[truncated_ref_bl_id==blid,:,j])**2).ravel()
valid_ind = dspec_llim > 0.0
dspec_llim[NP.logical_not(valid_ind)] = 10**4.8
dspec_llim[small_delays_EoR_window[:,truncated_ref_bl_id==blid].ravel()] = 10**4.8
dspec_llim = dspec_llim**2 * volfactor1 * volfactor2 * Jy2K**2
dspec_ulim = dspec_ulim**2 * volfactor1 * volfactor2 * Jy2K**2
axs[j].fill_between(1e6*clean_lags, dspec_ulim, dspec_llim, alpha=0.75, edgecolor='none', facecolor='gray')
axs[j].axvline(x=1e6*min_delay[truncated_ref_bl_id==blid,0], ls='-.', lw=2, color='black')
axs[j].axvline(x=1e6*max_delay[truncated_ref_bl_id==blid,0], ls='-.', lw=2, color='black')
axs[j].set_yscale('log')
axs[j].set_xlim(1e6*clean_lags.min(), 1e6*clean_lags.max())
axs[j].set_ylim(dspec_llim.min(), 1.1*dspec_ulim.max())
# axs[j].set_ylim(10**4.3, 1.1*(max([NP.abs(asm_cc_vis_lag[truncated_ref_bl_id==blid,:,j]).max(), NP.abs(asm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,j]).max(), NP.abs(dsm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,j]).max(), NP.abs(csm_cc_vis_lag[truncated_ref_bl_id==blid,:,j]).max()])+NP.sqrt(NP.abs(csm_jacobian_spindex*csm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,j])**2 + NP.abs(dsm_jacobian_spindex*dsm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,j])**2 + NP.abs(vis_rms_lag[truncated_ref_bl_id==blid,:,j])**2)).max())
hl = PLT.Line2D(range(1), range(0), color='black', linestyle='-.', linewidth=2)
csml = PLT.Line2D(range(1), range(0), color='black', linestyle='--', linewidth=2)
dsml = PLT.Line2D(range(1), range(0), color='black', linestyle=':', linewidth=2)
asml = PLT.Line2D(range(1), range(0), color='black', linestyle='-', linewidth=2)
legend = axs[j].legend((dsml, csml, asml, hl), ('Diffuse', 'Compact', 'Both', 'Horizon\nLimit'), loc='upper right', frameon=False, fontsize=12)
if j == 0:
axs[j].set_title('East: {0[0]:+.1f} m, North: {0[1]:+.1f} m, Up: {0[2]:+.1f} m'.format(truncated_ref_bl[truncated_ref_bl_id==blid].ravel()), fontsize=12, weight='medium')
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_xlabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=20)
# big_ax.set_ylabel(r'$|V_{b\tau}(\mathbf{b},\tau)|$ [Jy Hz]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_ylabel(r"$P_d(k_\perp,k_\parallel)$ [K$^2$ (Mpc/$h)^3$]", fontsize=16, weight='medium', labelpad=30)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'baseline_'+blid+'_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_csm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'baseline_'+blid+'_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_csm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
select_bl_id = ['85-61', '63-58', '95-51']
fig, axs = PLT.subplots(len(select_bl_id), sharex=True, sharey=True, figsize=(6,8))
for j in xrange(len(select_bl_id)):
blid = select_bl_id[j]
axs[j].plot(1e6*clean_lags, NP.abs(dsm_cc_skyvis_lag[ref_bl_id == blid,:,1]).ravel()**2 * volfactor1 * volfactor2 * Jy2K**2, 'r-', lw=2, label='Diffuse')
axs[j].plot(1e6*clean_lags, NP.abs(csm_cc_skyvis_lag[ref_bl_id == blid,:,1]).ravel()**2 * volfactor1 * volfactor2 * Jy2K**2, ls='-', lw=2, label='Compact', color='cyan')
axs[j].plot(1e6*clean_lags, NP.abs(asm_cc_skyvis_lag[ref_bl_id == blid,:,1]).ravel()**2 * volfactor1 * volfactor2 * Jy2K**2, 'k-', lw=2, label='Both')
dspec_ulim = NP.abs(asm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,1]).ravel()+NP.sqrt(NP.abs(csm_jacobian_spindex*csm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,1])**2 + NP.abs(dsm_jacobian_spindex*dsm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,1])**2 + NP.abs(vis_rms_lag[truncated_ref_bl_id==blid,:,1])**2).ravel()
dspec_llim = NP.abs(asm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,1]).ravel()-NP.sqrt(NP.abs(csm_jacobian_spindex*csm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,1])**2 + NP.abs(dsm_jacobian_spindex*dsm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,1])**2 + NP.abs(vis_rms_lag[truncated_ref_bl_id==blid,:,1])**2).ravel()
valid_ind = dspec_llim > 0.0
dspec_llim[NP.logical_not(valid_ind)] = 10**4.8
dspec_llim[small_delays_EoR_window[:,truncated_ref_bl_id==blid].ravel()] = 10**4.8
dspec_llim = dspec_llim**2 * volfactor1 * volfactor2 * Jy2K**2
dspec_ulim = dspec_ulim**2 * volfactor1 * volfactor2 * Jy2K**2
# axs[j].fill_between(1e6*clean_lags, dspec_ulim, dspec_llim, alpha=0.75, edgecolor='none', facecolor='gray')
axs[j].axvline(x=1e6*min_delay[truncated_ref_bl_id==blid,0], ls=':', lw=2, color='black')
axs[j].axvline(x=1e6*max_delay[truncated_ref_bl_id==blid,0], ls=':', lw=2, color='black')
# axs[j].locator_params(axis='y', nbins=5)
axs[j].set_yscale('log')
axs[j].set_yticks(NP.logspace(0,8,5,endpoint=True).tolist())
# axs[j].get_yaxis().get_major_formatter().labelOnlyBase = True
axs[j].set_xlim(1e6*clean_lags.min(), 1e6*clean_lags.max())
axs[j].set_ylim(0.1*dspec_llim.min(), 1.5*dspec_ulim.max())
# axs[j].set_ylim(10**4.3, 1.1*(max([NP.abs(asm_cc_vis_lag[truncated_ref_bl_id==blid,:,1]).max(), NP.abs(asm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,1]).max(), NP.abs(dsm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,1]).max(), NP.abs(csm_cc_vis_lag[truncated_ref_bl_id==blid,:,1]).max()])+NP.sqrt(NP.abs(csm_jacobian_spindex*csm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,1])**2 + NP.abs(dsm_jacobian_spindex*dsm_cc_skyvis_lag[truncated_ref_bl_id==blid,:,1])**2 + NP.abs(vis_rms_lag[truncated_ref_bl_id==blid,:,1])**2)).max())
hl = PLT.Line2D(range(1), range(0), color='black', linestyle=':', linewidth=2)
dsml = PLT.Line2D(range(1), range(0), color='red', linestyle='-', linewidth=2)
csml = PLT.Line2D(range(1), range(0), color='cyan', linestyle='-', linewidth=2)
asml = PLT.Line2D(range(1), range(0), color='black', linestyle='-', linewidth=2)
legend = axs[j].legend((dsml, csml, asml, hl), ('Diffuse', 'Compact', 'Both', 'Horizon\nLimit'), loc='upper right', frameon=False, fontsize=12)
axs[j].text(0.05, 0.8, r'$|\mathbf{b}|$'+' = {0:.1f} m'.format(truncated_ref_bl_length[truncated_ref_bl_id==blid][0]), fontsize=12, weight='medium', transform=axs[j].transAxes)
axs[j].text(0.05, 0.72, r'$\theta_b$'+' = {0:+.1f}$^\circ$'.format(truncated_ref_bl_orientation[truncated_ref_bl_id==blid][0]), fontsize=12, weight='medium', transform=axs[j].transAxes)
# axs[j].text(0.05, 0.7, 'East: {0[0]:+.1f} m\nNorth: {0[1]:+.1f} m\nUp: {0[2]:+.1f} m'.format(truncated_ref_bl[truncated_ref_bl_id==blid].ravel()), fontsize=12, weight='medium', transform=axs[j].transAxes)
if j == 0:
axs_kprll = axs[j].twiny()
axs_kprll.set_xticks(kprll(axs[j].get_xticks()*1e-6, redshift))
axs_kprll.set_xlim(kprll(NP.asarray(axs[j].get_xlim())*1e-6, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.2f}'.format(x))
axs_kprll.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_xlabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=20)
# big_ax.set_ylabel(r'$|V_{b\tau}(\mathbf{b},\tau)|$ [Jy Hz]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_ylabel(r"$P_d(k_\perp,k_\parallel)$ [K$^2$ (Mpc/$h)^3$]", fontsize=16, weight='medium', labelpad=30)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'{0:0d}_baseline_comparison'.format(len(select_bl_id))+'_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'{0:0d}_baseline_comparison'.format(len(select_bl_id))+'_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
bl_orientation = NP.copy(simdata_bl_orientation[truncated_ref_bl_ind])
bloh, bloe, blon, blori = OPS.binned_statistic(bl_orientation, bins=n_bins_baseline_orientation, statistic='count', range=[(-90.0+0.5*180.0/n_bins_baseline_orientation, 90.0+0.5*180.0/n_bins_baseline_orientation)])
if plot_11:
for j in xrange(n_snaps):
fig, axs = PLT.subplots(n_bins_baseline_orientation, sharex=True, sharey=True, figsize=(6,9))
for i in xrange(n_bins_baseline_orientation):
blind = blori[blori[i]:blori[i+1]]
sortind = NP.argsort(truncated_ref_bl_length[blind], kind='heapsort')
imdspec = axs[n_bins_baseline_orientation-1-i].pcolorfast(truncated_ref_bl_length[blind[sortind]], 1e6*clean_lags, NP.abs(asm_cc_skyvis_lag[truncated_ref_bl_ind,:,:][blind[sortind][:-1],:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=dspec_max))
horizonb = axs[n_bins_baseline_orientation-1-i].plot(truncated_ref_bl_length[blind], 1e6*min_delay[blind].ravel(), color='white', ls=':', lw=1.5)
horizont = axs[n_bins_baseline_orientation-1-i].plot(truncated_ref_bl_length[blind], 1e6*max_delay[blind].ravel(), color='white', ls=':', lw=1.5)
axs[n_bins_baseline_orientation-1-i].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[n_bins_baseline_orientation-1-i].set_xlim(truncated_ref_bl_length.min(), truncated_ref_bl_length.max())
axs[n_bins_baseline_orientation-1-i].set_aspect('auto')
axs[n_bins_baseline_orientation-1-i].text(0.5, 0.1, bl_orientation_str[i]+': '+r'${0:+.1f}^\circ \leq\, \theta_b < {1:+.1f}^\circ$'.format(bloe[i], bloe[i+1]), fontsize=12, color='white', transform=axs[n_bins_baseline_orientation-1-i].transAxes, weight='bold', ha='center')
for i in xrange(n_bins_baseline_orientation):
axs_kprll = axs[i].twinx()
axs_kprll.set_yticks(kprll(axs[i].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[i].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
if i == 0:
axs_kperp = axs[i].twiny()
axs_kperp.set_xticks(kperp(axs[i].get_xticks()*freq/FCNST.c, redshift))
axs_kperp.set_xlim(kperp(NP.asarray(axs[i].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axs_kperp.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
# cbax = fig.add_axes([0.9, 0.1, 0.02, 0.78])
# cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
# cbax.set_xlabel('Jy Hz', labelpad=10, fontsize=12)
# cbax.xaxis.set_label_position('top')
# fig.subplots_adjust(right=0.75)
# fig.subplots_adjust(top=0.92)
# fig.subplots_adjust(bottom=0.07)
cbax = fig.add_axes([0.125, 0.94, 0.72, 0.02])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='horizontal')
cbax.xaxis.tick_top()
cbax.set_ylabel(r'K$^2$(Mpc/h)$^3$', fontsize=12, rotation='horizontal')
# cbax.yaxis.set_label_position('right')
cbax.yaxis.set_label_coords(1.1, 1.0)
fig.subplots_adjust(right=0.86)
fig.subplots_adjust(top=0.85)
fig.subplots_adjust(bottom=0.07)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'baseline_binned_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_snapshot_{0:0d}.png'.format(j), bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'baseline_binned_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_snapshot_{0:0d}.eps'.format(j), bbox_inches=0)
if plot_12:
required_bl_orientation = ['North', 'East']
for j in xrange(n_snaps):
fig, axs = PLT.subplots(len(required_bl_orientation), sharex=True, sharey=True, figsize=(6,6))
for k in xrange(len(required_bl_orientation)):
i = bl_orientation_str.index(required_bl_orientation[k])
blind = blori[blori[i]:blori[i+1]]
sortind = NP.argsort(truncated_ref_bl_length[blind], kind='heapsort')
imdspec = axs[k].pcolorfast(truncated_ref_bl_length[blind[sortind]], 1e6*clean_lags, NP.abs(asm_cc_skyvis_lag[truncated_ref_bl_ind,:,:][blind[sortind][:-1],:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=dspec_max))
horizonb = axs[k].plot(truncated_ref_bl_length[blind], 1e6*min_delay[blind].ravel(), color='white', ls=':', lw=1.0)
horizont = axs[k].plot(truncated_ref_bl_length[blind], 1e6*max_delay[blind].ravel(), color='white', ls=':', lw=1.0)
axs[k].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[k].set_xlim(truncated_ref_bl_length.min(), truncated_ref_bl_length.max())
axs[k].set_aspect('auto')
axs[k].text(0.5, 0.1, bl_orientation_str[i]+': '+r'${0:+.1f}^\circ \leq\, \theta_b < {1:+.1f}^\circ$'.format(bloe[i], bloe[i+1]), fontsize=16, color='white', transform=axs[k].transAxes, weight='semibold', ha='center')
for i in xrange(len(required_bl_orientation)):
axs_kprll = axs[i].twinx()
axs_kprll.set_yticks(kprll(axs[i].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[i].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
if i == 0:
axs_kperp = axs[i].twiny()
axs_kperp.set_xticks(kperp(axs[i].get_xticks()*freq/FCNST.c, redshift))
axs_kperp.set_xlim(kperp(NP.asarray(axs[i].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axs_kperp.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
# cbax = fig.add_axes([0.9, 0.1, 0.02, 0.78])
# cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
# cbax.set_xlabel('Jy Hz', labelpad=10, fontsize=12)
# cbax.xaxis.set_label_position('top')
# fig.subplots_adjust(right=0.75)
# fig.subplots_adjust(top=0.92)
# fig.subplots_adjust(bottom=0.07)
cbax = fig.add_axes([0.125, 0.92, 0.72, 0.02])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='horizontal')
cbax.xaxis.tick_top()
cbax.set_ylabel(r'K$^2$(Mpc/h)$^3$', fontsize=12, rotation='horizontal')
# cbax.yaxis.set_label_position('right')
cbax.yaxis.set_label_coords(1.1, 1.0)
fig.subplots_adjust(right=0.86)
fig.subplots_adjust(top=0.79)
fig.subplots_adjust(bottom=0.09)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'baseline_N_E_binned_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_snapshot_{0:0d}.png'.format(j), bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'baseline_N_E_binned_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_snapshot_{0:0d}.eps'.format(j), bbox_inches=0)
if plot_13:
# 13) Plot EoR window foreground contamination when baselines are selectively removed
blo_target = 0.0
n_blo_remove_range = 3
n_inner_bll_remove_range = 20
blo_remove_max = 0.5*180.0/n_bins_baseline_orientation*(1+NP.arange(n_blo_remove_range))/n_blo_remove_range
inner_bll_remove_max = NP.logspace(NP.log10(truncated_ref_bl_length.min()), NP.log10(max_bl_length), n_inner_bll_remove_range)
bl_screened_fg_contamination = NP.zeros((n_blo_remove_range, n_inner_bll_remove_range), dtype=NP.complex)
fraction_bl_discarded = NP.zeros((n_blo_remove_range, n_inner_bll_remove_range))
ns_blind = blori[blori[3]:blori[3+1]]
ns_fg_contamination = OPS.rms(NP.abs(asm_cc_skyvis_lag[ns_blind,:,0])**2, mask=NP.logical_not(small_delays_strict_EoR_window[:,ns_blind]).T) * volfactor1 * volfactor2 * Jy2K**2
ew_blind = blori[blori[1]:blori[1+1]]
ew_fg_contamination = OPS.rms(NP.abs(asm_cc_skyvis_lag[ew_blind,:,0])**2, mask=NP.logical_not(small_delays_strict_EoR_window[:,ew_blind]).T) * volfactor1 * volfactor2 * Jy2K**2
for i in xrange(n_blo_remove_range):
blo_retain_ind = NP.abs(bl_orientation - blo_target) > blo_remove_max[i]
blo_discard_ind = NP.logical_not(blo_retain_ind)
for j in xrange(n_inner_bll_remove_range):
bll_retain_ind = truncated_ref_bl_length > inner_bll_remove_max[j]
bll_discard_ind = NP.logical_not(bll_retain_ind)
retain = NP.logical_not(NP.logical_and(blo_discard_ind, bll_discard_ind))
mask = NP.logical_not(NP.logical_and(small_delays_strict_EoR_window, retain.reshape(1,-1)))
bl_screened_fg_contamination[i,j] = OPS.rms(NP.abs(asm_cc_skyvis_lag[:,:,0])**2, mask=mask.T) * volfactor1 * volfactor2 * Jy2K**2
fraction_bl_discarded[i,j] = 1.0 - NP.sum(retain).astype(float)/truncated_ref_bl_length.size
symbols = ['o', 's', '*', 'd', '+', 'x']
fig = PLT.figure(figsize=(6,6))
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
for i in xrange(n_blo_remove_range):
ax1.plot(inner_bll_remove_max, bl_screened_fg_contamination[i,:], marker=symbols[i], markersize=6, lw=1, color='k', ls='-', label=r'$|\theta_b|\,\leq\,${0:.1f}$^\circ$'.format(blo_remove_max[i]))
ax2.plot(inner_bll_remove_max, fraction_bl_discarded[i,:], marker=symbols[i], markersize=5, color='k', lw=1, ls=':', label=r'$|\theta_b|\,\leq\,${0:.1f}$^\circ$'.format(blo_remove_max[i]))
# ax1.axhline(y=NP.abs(ew_fg_contamination), color='k', ls='-.', lw=2, label='Eastward limit')
# ax1.axhline(y=NP.abs(ns_fg_contamination), color='k', ls='--', lw=2, label='Northward limit')
ax1.set_ylim(0.3*bl_screened_fg_contamination.min(), 1.2*bl_screened_fg_contamination.max())
# ax1.set_ylim(0.9*NP.abs(ns_fg_contamination), 1.1*NP.abs(ew_fg_contamination))
ax1.set_xlim(0.9*inner_bll_remove_max.min(), 1.1*inner_bll_remove_max.max())
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_xlabel(r'Eastward $|\mathbf{b}|_\mathrm{max}$ [m]', fontsize=18, weight='medium')
ax1.set_ylabel(r'Foreground Contamination [K$^2$(Mpc/h)$^3$]', fontsize=18, weight='medium')
# ax1.set_ylabel(r'$\langle|V_{b\tau}^\mathrm{FG}(\mathbf{b},\tau)|^2\rangle^{1/2}_{\in\,\mathrm{EoR\,window}}$ [Jy Hz]', fontsize=18, weight='medium')
# legend = ax1.legend(loc='lower right')
# legend = ax1.legend(loc='lower right', fancybox=True, framealpha=1.0)
ax2.set_yscale('log')
ax2.set_xscale('log')
ax2.set_ylim(1e-3, 1.0)
ax2.set_ylabel('Baseline fraction discarded', fontsize=18, weight='medium', color='k')
legend1_symbol = []
legend1_text = []
for i in xrange(n_blo_remove_range):
legend1_symbol += [PLT.Line2D(range(1), range(0), marker=symbols[i], markersize=6, color='k', linestyle='None')]
legend1_text += [r'$|\theta_b|\,\leq\,${0:.1f}$^\circ$'.format(blo_remove_max[i])]
legend2_symbol = []
legend2_text = []
# legend2_symbol += [PLT.Line2D(range(1), range(0), linestyle='-.', lw=1.5, color='k')]
# legend2_symbol += [PLT.Line2D(range(1), range(0), linestyle='--', lw=1.5, color='k')]
legend2_symbol += [PLT.Line2D(range(1), range(0), linestyle='-', lw=1.5, color='k')]
legend2_symbol += [PLT.Line2D(range(1), range(0), linestyle=':', lw=1.5, color='k')]
# legend2_text += ['Foreground upper limit']
# legend2_text += ['Foreground lower limit']
legend2_text += ['Foreground in EoR window']
legend2_text += ['Baseline fraction']
legend1 = ax1.legend(legend1_symbol, legend1_text, loc='lower right', numpoints=1, fontsize='medium')
legend2 = ax2.legend(legend2_symbol, legend2_text, loc='upper right', fontsize='medium')
PLT.tight_layout()
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'baseline_screening_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'baseline_screening_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
if plot_14:
# 14) Plot delay spectra before and after baselines are selectively removed
blo_target = 0.0
blo_remove_max = 0.5*180.0/n_bins_baseline_orientation
inner_bll_remove_max = 30.0
blo_retain_ind = NP.abs(bl_orientation - blo_target) > blo_remove_max
blo_discard_ind = NP.logical_not(blo_retain_ind)
bll_retain_ind = truncated_ref_bl_length > inner_bll_remove_max
bll_discard_ind = NP.logical_not(bll_retain_ind)
discard = NP.logical_and(blo_discard_ind, bll_discard_ind)
retain = NP.logical_not(discard)
msk = NP.zeros((truncated_ref_bl_length.size, clean_lags.size))
msk[discard,:] = 1
colrmap = copy.copy(CM.jet)
colrmap.set_bad(color='black', alpha=1.0)
bl_screened_asm_cc_skyvis_lag = NP.ma.masked_array(asm_cc_skyvis_lag[:,:,0], mask=msk)
# bl_screened_asm_cc_skyvis_lag = NP.ma.filled(bl_screened_asm_cc_skyvis_lag, fill_value=1e-5)
# bl_screened_asm_cc_skyvis_lag = NP.ma.compress_rows(bl_screened_asm_cc_skyvis_lag)
# bl_screened_asm_cc_skyvis_lag = NP.copy(asm_cc_skyvis_lag[:,:,0])
# bl_screened_asm_cc_skyvis_lag[discard,:] = 1e-3
descriptor_str = ['All baselines', 'Short eastward baselines removed']
fig, axs = PLT.subplots(2, sharex=True, sharey=True, figsize=(6,6))
all_imdspec = axs[0].pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(asm_cc_skyvis_lag[:-1,:-1,0].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=NP.abs(asm_cc_skyvis_lag).max()**2 * volfactor1 * volfactor2 * Jy2K**2))
screened_imdspec = axs[1].pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(bl_screened_asm_cc_skyvis_lag[:-1,:-1].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e6)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=NP.abs(asm_cc_skyvis_lag).max()**2 * volfactor1 * volfactor2 * Jy2K**2), cmap=colrmap)
for j in xrange(len(axs)):
bll_cut = axs[j].axvline(x=inner_bll_remove_max, ymin=0.0, ymax=0.75, ls='--', color='white', lw=1.5)
horizonb = axs[j].plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = axs[j].plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
axs[j].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[j].set_aspect('auto')
axs[j].text(0.5, 0.8, descriptor_str[j], transform=axs[j].transAxes, fontsize=12, weight='semibold', ha='center', color='white')
for j in xrange(len(axs)):
axs_kprll = axs[j].twinx()
axs_kprll.set_yticks(kprll(axs[j].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[j].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
if j == 0:
axs_kperp = axs[j].twiny()
axs_kperp.set_xticks(kperp(axs[j].get_xticks()*freq/FCNST.c, redshift))
axs_kperp.set_xlim(kperp(NP.asarray(axs[j].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axs_kperp.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(all_imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_screening_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_screening_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
##################################################################
if plot_15:
# 15) Plot Fourier space
bw = nchan * freq_resolution
fig = PLT.figure(figsize=(6,6))
ax = fig.add_subplot(111)
# ax.plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), 'k-', truncated_ref_bl_length, 1e6*max_delay.ravel(), 'k-')
# ax.plot(truncated_ref_bl_length, 1e6*(min_delay.ravel()-1/bw), 'k--', truncated_ref_bl_length, 1e6*(max_delay.ravel()+1/bw), 'k--')
ph_line, nh_line = ax.plot(truncated_ref_bl_length, 1e6*truncated_ref_bl_length/FCNST.c, 'k-', truncated_ref_bl_length, -1e6*truncated_ref_bl_length/FCNST.c, 'k-')
ax.plot(truncated_ref_bl_length, -1e6*(truncated_ref_bl_length/FCNST.c + 1/bw), 'k--', truncated_ref_bl_length, 1e6*(truncated_ref_bl_length/FCNST.c + 1/bw), 'k--')
ax.plot(truncated_ref_bl_length[truncated_ref_bl_length <= FCNST.c/coarse_channel_resolution], 1e6/coarse_channel_resolution*NP.ones(NP.sum(truncated_ref_bl_length <= FCNST.c/coarse_channel_resolution)), 'k-.')
ax.plot(truncated_ref_bl_length[truncated_ref_bl_length <= FCNST.c/coarse_channel_resolution], -1e6/coarse_channel_resolution*NP.ones(NP.sum(truncated_ref_bl_length <= FCNST.c/coarse_channel_resolution)), 'k-.')
ax.fill_between(truncated_ref_bl_length, -0.5/freq_resolution*1e6, -1e6*(truncated_ref_bl_length/FCNST.c + 1/bw), facecolor='0.8', edgecolor='none')
ax.fill_between(truncated_ref_bl_length, 1e6*(truncated_ref_bl_length/FCNST.c + 1/bw), 0.5/freq_resolution*1e6, facecolor='0.8', edgecolor='none')
ax.fill_between(truncated_ref_bl_length, -1e6/coarse_channel_resolution, -1e6*(truncated_ref_bl_length/FCNST.c + 1/bw), facecolor='0.7', edgecolor='none')
ax.fill_between(truncated_ref_bl_length, 1e6*(truncated_ref_bl_length/FCNST.c + 1/bw), 1e6/coarse_channel_resolution, facecolor='0.7', edgecolor='none')
ax.fill_between(truncated_ref_bl_length, -1e6*truncated_ref_bl_length/FCNST.c, 1e6*truncated_ref_bl_length/FCNST.c, facecolor='0.5', edgecolor='none')
ax.set_xlim(truncated_ref_bl_length.min(), truncated_ref_bl_length.max())
ax.set_ylim(-1.25, 1.25)
ax.text(0.5, 0.5, 'Foregrounds', transform=ax.transAxes, fontsize=12, weight='semibold', ha='left', color='black')
ax.text(100, 1e6/coarse_channel_resolution, 'Delay grating', fontsize=12, weight='semibold', ha='left', color='black', va='bottom')
ax.text(100, -1e6/coarse_channel_resolution, 'Delay grating', fontsize=12, weight='semibold', ha='left', color='black', va='top')
ax.text(10, 0.45, 'Maximal EoR \nsensitivity', fontsize=12, weight='semibold', ha='left', va='center')
ax.text(10, -0.45, 'Maximal EoR \nsensitivity', fontsize=12, weight='semibold', ha='left', va='center')
anchor_bll = 125.0
anchor_nh_delay = -1e6 * anchor_bll/FCNST.c
anchor_ph_delay = 1e6 * anchor_bll/FCNST.c
nhp1 = ax.transData.transform_point(NP.array([nh_line.get_xdata()[0], nh_line.get_ydata()[0]]))
nhp2 = ax.transData.transform_point(NP.array([nh_line.get_xdata()[-1], nh_line.get_ydata()[-1]]))
nh_angle = NP.degrees(NP.arctan2(nhp2[1]-nhp1[1], nhp2[0]-nhp1[0]))
php1 = ax.transData.transform_point(NP.array([ph_line.get_xdata()[0], ph_line.get_ydata()[0]]))
php2 = ax.transData.transform_point(NP.array([ph_line.get_xdata()[-1], ph_line.get_ydata()[-1]]))
ph_angle = NP.degrees(NP.arctan2(php2[1]-php1[1], php2[0]-php1[0]))
nh_text = ax.text(anchor_bll, anchor_nh_delay, 'Horizon', fontsize=12, weight='semibold', rotation=nh_angle, ha='left')
ph_text = ax.text(anchor_bll, anchor_ph_delay, 'Horizon', fontsize=12, weight='semibold', rotation=ph_angle, ha='left')
# ax.set_ylim(-0.5/freq_resolution*1e6, 0.5/freq_resolution*1e6)
ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium')
ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium')
axr = ax.twinx()
axr.set_yticks([])
axr.set_yticks(kprll(ax.get_yticks()*1e-6, redshift))
axr.set_ylim(kprll(NP.asarray(ax.get_ylim())*1e-6, redshift))
axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axr.yaxis.set_major_formatter(yformatter)
axt = ax.twiny()
axt.set_xticks([])
axt.set_xticks(kperp(ax.get_xticks()*freq/FCNST.c, redshift))
axt.set_xlim(kperp(NP.asarray(ax.get_xlim())*freq/FCNST.c, redshift))
axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axt.xaxis.set_major_formatter(xformatter)
PLT.tight_layout()
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/fourier_space_{0:.1f}_MHz_{1:.1f}_MHz.png'.format(freq/1e6,nchan*freq_resolution/1e6))
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/fourier_space_{0:.1f}_MHz_{1:.1f}_MHz.eps'.format(freq/1e6,nchan*freq_resolution/1e6))
##################################################################
if plot_17 or plot_18 or plot_19:
delta_array_usm_infile = '/data3/t_nithyanandan/'+project_dir+'/delta_array_multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_usm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'no_pfb'
delta_array_usm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/delta_array_multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_usm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'no_pfb_'+bpass_shape
delta_array_asm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/delta_array_multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_asm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'no_pfb_'+bpass_shape
mwa_dipole_asm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/mwa_dipole_multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_asm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'no_pfb_'+bpass_shape
hera_asm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/hera_multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_asm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'no_pfb_'+bpass_shape
delta_usm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/delta_multi_baseline_CLEAN_visibilities_no_ground_'+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_usm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'no_pfb_'+bpass_shape
ia = RI.InterferometerArray(None, None, None, init_file=delta_array_usm_infile+'.fits')
simdata_bl_orientation = NP.angle(ia.baselines[:,0] + 1j * ia.baselines[:,1], deg=True)
simdata_neg_bl_orientation_ind = simdata_bl_orientation > 90.0 + 0.5*180.0/n_bins_baseline_orientation
simdata_bl_orientation[simdata_neg_bl_orientation_ind] -= 180.0
ia.baselines[simdata_neg_bl_orientation_ind,:] = -ia.baselines[simdata_neg_bl_orientation_ind,:]
hdulist = fits.open(delta_array_usm_infile+'.fits')
latitude = hdulist[0].header['latitude']
pointing_coords = hdulist[0].header['pointing_coords']
pointings_table = hdulist['POINTING AND PHASE CENTER INFO'].data
lst = pointings_table['LST']
n_snaps = lst.size
hdulist.close()
if pointing_coords == 'altaz':
pointings_altaz = NP.hstack((pointings_table['pointing_latitude'].reshape(-1,1), pointings_table['pointing_longitude'].reshape(-1,1)))
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'radec':
pointings_radec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'hadec':
pointings_hadec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
hdulist = fits.open(delta_array_usm_CLEAN_infile+'.fits')
clean_lags = hdulist['SPECTRAL INFO'].data['lag']
clean_lags_orig = NP.copy(clean_lags)
delta_array_usm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
delta_array_usm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
delta_array_usm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
delta_array_usm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(delta_array_asm_CLEAN_infile+'.fits')
delta_array_asm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
delta_array_asm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
delta_array_asm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
delta_array_asm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(mwa_dipole_asm_CLEAN_infile+'.fits')
mwa_dipole_asm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
mwa_dipole_asm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
mwa_dipole_asm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
mwa_dipole_asm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(hera_asm_CLEAN_infile+'.fits')
hera_asm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
hera_asm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
hera_asm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
hera_asm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(delta_usm_CLEAN_infile+'.fits')
delta_usm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
delta_usm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
delta_usm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
delta_usm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
clean_lags = DSP.downsampler(clean_lags, 1.0*clean_lags.size/ia.lags.size, axis=-1)
clean_lags = clean_lags.ravel()
delaymat = DLY.delay_envelope(ia.baselines[truncated_ref_bl_ind,:], pc, units='mks')
bw = nchan * freq_resolution
min_delay = -delaymat[0,:,1]-delaymat[0,:,0]
max_delay = delaymat[0,:,0]-delaymat[0,:,1]
clags = clean_lags.reshape(1,-1)
min_delay = min_delay.reshape(-1,1)
max_delay = max_delay.reshape(-1,1)
thermal_noise_window = NP.abs(clags) >= max_abs_delay*1e-6
thermal_noise_window = NP.repeat(thermal_noise_window, ia.baselines[truncated_ref_bl_ind,:].shape[0], axis=0)
EoR_window = NP.logical_or(clags > max_delay+3/bw, clags < min_delay-3/bw)
strict_EoR_window = NP.logical_and(EoR_window, NP.abs(clags) < 1/coarse_channel_resolution)
wedge_window = NP.logical_and(clags <= max_delay, clags >= min_delay)
non_wedge_window = NP.logical_not(wedge_window)
small_delays_EoR_window = EoR_window.T
small_delays_strict_EoR_window = strict_EoR_window.T
small_delays_wedge_window = wedge_window.T
if max_abs_delay is not None:
small_delays_ind = NP.abs(clean_lags) <= max_abs_delay * 1e-6
clean_lags = clean_lags[small_delays_ind]
small_delays_EoR_window = small_delays_EoR_window[small_delays_ind,:]
small_delays_strict_EoR_window = small_delays_strict_EoR_window[small_delays_ind,:]
small_delays_wedge_window = small_delays_wedge_window[small_delays_ind,:]
if plot_17:
# 17) Plot delay spectra of the MWA tile power pattern using a uniform sky model
delta_array_usm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = delta_array_usm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
delta_array_usm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = delta_array_usm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
delta_array_usm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(delta_array_usm_cc_skyvis, axis=1),axes=1) * delta_array_usm_cc_skyvis.shape[1] * freq_resolution
delta_array_usm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(delta_array_usm_cc_skyvis_res, axis=1),axes=1) * delta_array_usm_cc_skyvis.shape[1] * freq_resolution
delta_array_usm_cc_skyvis_lag = delta_array_usm_cc_skyvis_lag + delta_array_usm_ccres_sky
delta_array_usm_cc_skyvis_lag = DSP.downsampler(delta_array_usm_cc_skyvis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
delta_array_usm_cc_skyvis_lag = delta_array_usm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
delta_array_usm_dspec_max = NP.abs(delta_array_usm_cc_skyvis_lag).max()
delta_array_usm_dspec_min = NP.abs(delta_array_usm_cc_skyvis_lag).min()
# delta_array_usm_dspec_max = delta_array_usm_dspec_max**2 * volfactor1 * volfactor2 * Jy2K**2
# delta_array_usm_dspec_min = delta_array_usm_dspec_min**2 * volfactor1 * volfactor2 * Jy2K**2
if max_abs_delay is not None:
delta_array_usm_cc_skyvis_lag = delta_array_usm_cc_skyvis_lag[:,small_delays_ind,:]
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
imdspec = axs[j].pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(delta_array_usm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e5**2) * volfactor1 * volfactor2 * Jy2K**2, vmax=(delta_array_usm_dspec_max**2) * volfactor1 * volfactor2 * Jy2K**2))
horizonb = axs[j].plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = axs[j].plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
axs[j].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[j].set_aspect('auto')
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
for j in xrange(n_snaps):
axs_kprll = axs[j].twinx()
axs_kprll.set_yticks(kprll(axs[j].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[j].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
if j == 0:
axs_kperp = axs[j].twiny()
axs_kperp.set_xticks(kperp(axs[j].get_xticks()*freq/FCNST.c, redshift))
axs_kperp.set_xlim(kperp(NP.asarray(axs[j].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axs_kperp.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/delta_array_multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_usm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'_no_pfb_{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/delta_array_multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_usm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'_no_pfb_{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
##################################################################
if plot_18:
# 18) Plot delay spectra of the all-sky model with dipole, MWA tile, and HERA dish antenna shapes
mwa_dipole_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = mwa_dipole_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
mwa_dipole_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = mwa_dipole_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
mwa_dipole_asm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(mwa_dipole_asm_cc_skyvis, axis=1),axes=1) * mwa_dipole_asm_cc_skyvis.shape[1] * freq_resolution
mwa_dipole_asm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(mwa_dipole_asm_cc_skyvis_res, axis=1),axes=1) * mwa_dipole_asm_cc_skyvis.shape[1] * freq_resolution
mwa_dipole_asm_cc_skyvis_lag = mwa_dipole_asm_cc_skyvis_lag + mwa_dipole_asm_ccres_sky
mwa_dipole_asm_cc_skyvis_lag = DSP.downsampler(mwa_dipole_asm_cc_skyvis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
delta_array_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = delta_array_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
delta_array_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = delta_array_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
delta_array_asm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(delta_array_asm_cc_skyvis, axis=1),axes=1) * delta_array_asm_cc_skyvis.shape[1] * freq_resolution
delta_array_asm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(delta_array_asm_cc_skyvis_res, axis=1),axes=1) * delta_array_asm_cc_skyvis.shape[1] * freq_resolution
delta_array_asm_cc_skyvis_lag = delta_array_asm_cc_skyvis_lag + delta_array_asm_ccres_sky
delta_array_asm_cc_skyvis_lag = DSP.downsampler(delta_array_asm_cc_skyvis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
hera_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = hera_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
hera_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = hera_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
hera_asm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(hera_asm_cc_skyvis, axis=1),axes=1) * hera_asm_cc_skyvis.shape[1] * freq_resolution
hera_asm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(hera_asm_cc_skyvis_res, axis=1),axes=1) * hera_asm_cc_skyvis.shape[1] * freq_resolution
hera_asm_cc_skyvis_lag = hera_asm_cc_skyvis_lag + hera_asm_ccres_sky
hera_asm_cc_skyvis_lag = DSP.downsampler(hera_asm_cc_skyvis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
delta_array_asm_cc_skyvis_lag = delta_array_asm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
mwa_dipole_asm_cc_skyvis_lag = mwa_dipole_asm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
hera_asm_cc_skyvis_lag = hera_asm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
if max_abs_delay is not None:
delta_array_asm_cc_skyvis_lag = delta_array_asm_cc_skyvis_lag[:,small_delays_ind,:]
mwa_dipole_asm_cc_skyvis_lag = mwa_dipole_asm_cc_skyvis_lag[:,small_delays_ind,:]
hera_asm_cc_skyvis_lag = hera_asm_cc_skyvis_lag[:,small_delays_ind,:]
antelem_asm_dspec_max = max([NP.abs(mwa_dipole_asm_cc_skyvis_lag).max(), NP.abs(delta_array_asm_cc_skyvis_lag).max(), NP.abs(hera_asm_cc_skyvis_lag).max()])
antelem_asm_dspec_min = min([NP.abs(mwa_dipole_asm_cc_skyvis_lag).min(), NP.abs(delta_array_asm_cc_skyvis_lag).min(), NP.abs(hera_asm_cc_skyvis_lag).min()])
# antelem_asm_dspec_max = antelem_asm_dspec_max**2 * volfactor1 * volfactor2 * Jy2K**2
# antelem_asm_dspec_min = antelem_asm_dspec_min**2 * volfactor1 * volfactor2 * Jy2K**2
delta_array_roifile = '/data3/t_nithyanandan/'+project_dir+'/roi_info_delta_array_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'.fits'
delta_array_roi = RI.ROI_parameters(init_file=delta_array_roifile)
delta_array_telescope = delta_array_roi.telescope
mwa_dipole_roifile = '/data3/t_nithyanandan/'+project_dir+'/roi_info_mwa_dipole_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'.fits'
mwa_dipole_roi = RI.ROI_parameters(init_file=mwa_dipole_roifile)
mwa_dipole_telescope = mwa_dipole_roi.telescope
hera_roifile = '/data3/t_nithyanandan/'+project_dir+'/roi_info_hera_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'.fits'
hera_roi = RI.ROI_parameters(init_file=hera_roifile)
hera_telescope = hera_roi.telescope
backdrop_xsize = 100
xmin = -180.0
xmax = 180.0
ymin = -90.0
ymax = 90.0
xgrid, ygrid = NP.meshgrid(NP.linspace(xmax, xmin, backdrop_xsize), NP.linspace(ymin, ymax, backdrop_xsize/2))
xvect = xgrid.ravel()
yvect = ygrid.ravel()
delta_array_pb_snapshots = []
mwa_dipole_pb_snapshots = []
hera_pb_snapshots = []
for i in xrange(n_snaps):
havect = lst[i] - xvect
altaz = GEOM.hadec2altaz(NP.hstack((havect.reshape(-1,1),yvect.reshape(-1,1))), latitude, units='degrees')
dircos = GEOM.altaz2dircos(altaz, units='degrees')
roi_altaz = NP.asarray(NP.where(altaz[:,0] >= 0.0)).ravel()
az = altaz[:,1] + 0.0
az[az > 360.0 - 0.5*180.0/n_sky_sectors] -= 360.0
roi_sector_altaz = NP.asarray(NP.where(NP.logical_or(NP.logical_and(az[roi_altaz] >= -0.5*180.0/n_sky_sectors + sky_sector*180.0/n_sky_sectors, az[roi_altaz] < -0.5*180.0/n_sky_sectors + (sky_sector+1)*180.0/n_sky_sectors), NP.logical_and(az[roi_altaz] >= 180.0 - 0.5*180.0/n_sky_sectors + sky_sector*180.0/n_sky_sectors, az[roi_altaz] < 180.0 - 0.5*180.0/n_sky_sectors + (sky_sector+1)*180.0/n_sky_sectors)))).ravel()
pb = NP.empty(xvect.size)
pb.fill(NP.nan)
pb[roi_altaz] = PB.primary_beam_generator(altaz[roi_altaz,:], freq, telescope=mwa_dipole_telescope, skyunits='altaz', freq_scale='Hz', pointing_info=mwa_dipole_roi.pinfo[i])
mwa_dipole_pb_snapshots += [pb.copy()]
pb[roi_altaz] = PB.primary_beam_generator(altaz[roi_altaz,:], freq, telescope=delta_array_telescope, skyunits='altaz', freq_scale='Hz', pointing_info=delta_array_roi.pinfo[i])
delta_array_pb_snapshots += [pb.copy()]
pb[roi_altaz] = PB.primary_beam_generator(altaz[roi_altaz,:], freq, telescope=hera_telescope, skyunits='altaz', freq_scale='Hz', pointing_info=hera_roi.pinfo[i])
hera_pb_snapshots += [pb.copy()]
for j in xrange(n_snaps):
fig, axs = PLT.subplots(ncols=3, nrows=1, sharex=True, sharey=True, figsize=(10.5,2))
mwa_dipole_pbsky = axs[0].imshow(mwa_dipole_pb_snapshots[j].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm(vmin=1e-5, vmax=1.0), cmap=CM.jet)
axs[0].set_xlim(xvect.max(), xvect.min())
axs[0].set_ylim(yvect.min(), yvect.max())
axs[0].grid(True, which='both')
axs[0].set_aspect('auto')
axs[0].tick_params(which='major', length=12, labelsize=12)
axs[0].tick_params(which='minor', length=6)
axs[0].locator_params(axis='x', nbins=5)
axs[0].text(0.5, 0.87, 'Dipole', transform=axs[0].transAxes, fontsize=14, weight='semibold', ha='center', color='black')
delta_array_pbsky = axs[1].imshow(delta_array_pb_snapshots[j].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm(vmin=1e-5, vmax=1.0), cmap=CM.jet)
axs[1].set_xlim(xvect.max(), xvect.min())
axs[1].set_ylim(yvect.min(), yvect.max())
axs[1].grid(True, which='both')
axs[1].set_aspect('auto')
axs[1].tick_params(which='major', length=12, labelsize=12)
axs[1].tick_params(which='minor', length=6)
axs[1].locator_params(axis='x', nbins=5)
axs[1].text(0.5, 0.87, 'Phased Array', transform=axs[1].transAxes, fontsize=14, weight='semibold', ha='center', color='black')
hera_pbsky = axs[2].imshow(hera_pb_snapshots[j].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm(vmin=1e-5, vmax=1.0), cmap=CM.jet)
axs[2].set_xlim(xvect.max(), xvect.min())
axs[2].set_ylim(yvect.min(), yvect.max())
axs[2].grid(True, which='both')
axs[2].set_aspect('auto')
axs[2].tick_params(which='major', length=12, labelsize=12)
axs[2].tick_params(which='minor', length=6)
axs[2].locator_params(axis='x', nbins=5)
axs[2].text(0.5, 0.87, 'Dish', transform=axs[2].transAxes, fontsize=14, weight='semibold', ha='center', color='black')
cbax = fig.add_axes([0.9, 0.24, 0.02, 0.7])
cbar = fig.colorbar(hera_pbsky, cax=cbax, orientation='vertical')
fig.subplots_adjust(wspace=0, hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium', labelpad=25)
big_ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium', labelpad=15)
# PLT.tight_layout()
fig.subplots_adjust(right=0.89)
fig.subplots_adjust(top=0.94)
fig.subplots_adjust(bottom=0.24)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/powerpattern_'+ground_plane_str+snapshot_type_str+obs_mode+'_no_delayerr_snapshot_{0:0d}.png'.format(j), bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/powerpattern_'+ground_plane_str+snapshot_type_str+obs_mode+'_no_delayerr_snapshot_{0:0d}.eps'.format(j), bbox_inches=0)
for j in xrange(n_snaps):
fig, axs = PLT.subplots(ncols=3, nrows=1, sharex=True, sharey=True, figsize=(10.5,5))
mwa_dipole_imdspec = axs[0].pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(mwa_dipole_asm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e5)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=(antelem_asm_dspec_max**2) * volfactor1 * volfactor2 * Jy2K**2))
horizonb = axs[0].plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = axs[0].plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
axs[0].set_xlim(truncated_ref_bl_length.min(), truncated_ref_bl_length.max())
axs[0].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
# axs[0].set_aspect('auto')
axs[0].text(0.5, 0.9, 'Dipole', transform=axs[0].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
delta_array_imdspec = axs[1].pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(delta_array_asm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e5)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=(antelem_asm_dspec_max**2) * volfactor1 * volfactor2 * Jy2K**2))
horizonb = axs[1].plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = axs[1].plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
axs[1].set_xlim(truncated_ref_bl_length.min(), truncated_ref_bl_length.max())
axs[1].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
# axs[1].set_aspect('auto')
axs[1].text(0.5, 0.9, 'Phased Array', transform=axs[1].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
hera_imdspec = axs[2].pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(hera_asm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e5)**2 * volfactor1 * volfactor2 * Jy2K**2, vmax=(antelem_asm_dspec_max**2) * volfactor1 * volfactor2 * Jy2K**2))
horizonb = axs[2].plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = axs[2].plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
axs[2].set_xlim(truncated_ref_bl_length.min(), truncated_ref_bl_length.max())
axs[2].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
# axs[2].set_aspect('auto')
axs[2].text(0.5, 0.9, 'Dish', transform=axs[2].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
mwa_dipole_axs_kperp = axs[0].twiny()
mwa_dipole_axs_kperp.set_xticks(kperp(axs[0].get_xticks()*freq/FCNST.c, redshift))
mwa_dipole_axs_kperp.set_xlim(kperp(NP.asarray(axs[0].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
mwa_dipole_axs_kperp.xaxis.set_major_formatter(xformatter)
delta_array_axs_kperp = axs[1].twiny()
delta_array_axs_kperp.set_xticks(kperp(axs[1].get_xticks()*freq/FCNST.c, redshift))
delta_array_axs_kperp.set_xlim(kperp(NP.asarray(axs[1].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
delta_array_axs_kperp.xaxis.set_major_formatter(xformatter)
hera_axs_kperp = axs[2].twiny()
hera_axs_kperp.set_xticks(kperp(axs[2].get_xticks()*freq/FCNST.c, redshift))
hera_axs_kperp.set_xlim(kperp(NP.asarray(axs[2].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
hera_axs_kperp.xaxis.set_major_formatter(xformatter)
axs_kprll = axs[2].twinx()
axs_kprll.set_yticks(kprll(axs[2].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[2].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
fig.subplots_adjust(wspace=0, hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(hera_imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.8)
fig.subplots_adjust(top=0.85)
fig.subplots_adjust(left=0.1)
fig.subplots_adjust(bottom=0.125)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/multi_antenna_multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'_no_pfb_{0:.1f}_snapshot_{1:0d}'.format(oversampling_factor,j)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/multi_antenna_multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'_no_pfb_{0:.1f}_snapshot_{1:0d}'.format(oversampling_factor,j)+'.eps', bbox_inches=0)
##################################################################
if plot_19:
# 19) Plot delay spectrum of uniform sky model with a uniform power pattern
delta_usm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = delta_usm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
delta_usm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = delta_usm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
delta_usm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(delta_usm_cc_skyvis, axis=1),axes=1) * delta_usm_cc_skyvis.shape[1] * freq_resolution
delta_usm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(delta_usm_cc_skyvis_res, axis=1),axes=1) * delta_usm_cc_skyvis.shape[1] * freq_resolution
delta_usm_cc_skyvis_lag = delta_usm_cc_skyvis_lag + delta_usm_ccres_sky
delta_usm_cc_skyvis_lag = DSP.downsampler(delta_usm_cc_skyvis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
delta_usm_cc_skyvis_lag = delta_usm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
delta_usm_dspec_max = NP.abs(delta_usm_cc_skyvis_lag).max()
delta_usm_dspec_min = NP.abs(delta_usm_cc_skyvis_lag).min()
# delta_usm_dspec_max = delta_usm_dspec_max**2 * volfactor1 * volfactor2 * Jy2K**2
# delta_usm_dspec_min = delta_usm_dspec_min**2 * volfactor1 * volfactor2 * Jy2K**2
if max_abs_delay is not None:
delta_usm_cc_skyvis_lag = delta_usm_cc_skyvis_lag[:,small_delays_ind,:]
for j in xrange(n_snaps):
fig = PLT.figure(figsize=(6,6))
ax = fig.add_subplot(111)
imdspec = ax.pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(delta_usm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e5**2) * volfactor1 * volfactor2 * Jy2K**2, vmax=(delta_usm_dspec_max**2) * volfactor1 * volfactor2 * Jy2K**2))
horizonb = ax.plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = ax.plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
ax.set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
ax.set_aspect('auto')
# ax.text(0.5, 0.9, descriptor_str[j], transform=ax.transAxes, fontsize=14, weight='semibold', ha='center', color='white')
ax_kprll = ax.twinx()
ax_kprll.set_yticks(kprll(ax.get_yticks()*1e-6, redshift))
ax_kprll.set_ylim(kprll(NP.asarray(ax.get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
ax_kprll.yaxis.set_major_formatter(yformatter)
ax_kperp = ax.twiny()
ax_kperp.set_xticks(kperp(ax.get_xticks()*freq/FCNST.c, redshift))
ax_kperp.set_xlim(kperp(NP.asarray(ax.get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
ax_kperp.xaxis.set_major_formatter(xformatter)
ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium')
ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium')
ax_kprll.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
ax_kperp.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/delta_multi_baseline_CLEAN_noiseless_PS_no_ground_'+snapshot_type_str+obs_mode+'_gaussian_FG_model_usm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'_no_pfb_{0:.1f}_snapshot_{1:0d}'.format(oversampling_factor,j)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/delta_multi_baseline_CLEAN_noiseless_PS_no_ground_'+snapshot_type_str+obs_mode+'_gaussian_FG_model_usm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'_no_pfb_{0:.1f}_snapshot_{1:0d}'.format(oversampling_factor,j)+'.eps', bbox_inches=0)
| mit |
ukuleleplayer/pureples | pureples/shared/visualize.py | 1 | 3149 | import graphviz
import matplotlib.pyplot as plt
try:
import cPickle as pickle
except:
import pickle
# Draw neural network with arbitrary topology.
def draw_net(net, filename=None, node_names={}, node_colors={}):
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph('svg', node_attr=node_attrs)
inputs = set()
for k in net.input_nodes:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled',
'shape': 'box',
'fillcolor': node_colors.get(k, 'lightgray')}
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in net.output_nodes:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled',
'fillcolor': node_colors.get(k, 'lightblue')}
dot.node(name, _attributes=node_attrs)
for node, act_func, agg_func, bias, response, links in net.node_evals:
for i, w in links:
input, output = node, i
a = node_names.get(output, str(output))
b = node_names.get(input, str(input))
style = 'solid'
color = 'green' if w > 0.0 else 'red'
width = str(0.1 + abs(w / 5.0))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename)
return dot
# Click handler for weight gradient created by a CPPN. Will re-query with the clicked coordinate.
def onclick(event):
plt.close()
x = event.xdata
y = event.ydata
path_to_cppn = "es_hyperneat_xor_small_cppn.pkl"
with open(path_to_cppn, 'rb') as input: # For now, path_to_cppn should match path in test_cppn.py, sorry.
cppn = pickle.load(input)
from pureples.es_hyperneat.es_hyperneat import find_pattern
pattern = find_pattern(cppn, (x, y))
draw_pattern(pattern)
# Draws the pattern/weight gradient queried by a CPPN.
def draw_pattern(im, res=60):
fig = plt.figure()
plt.axis([-1, 1, -1, 1])
fig.add_subplot(111)
a = range(res)
b = range(res)
for x in a:
for y in b:
px = -1.0 + (x/float(res))*2.0+1.0/float(res)
py = -1.0 + (y/float(res))*2.0+1.0/float(res)
c = str(0.5-im[x][y]/float(res))
plt.plot(px, py, marker='s', color=c)
fig.canvas.mpl_connect('button_press_event', onclick)
plt.grid()
plt.show()
# Draw the net created by ES-HyperNEAT
def draw_es(id_to_coords, connections, filename):
fig = plt.figure()
plt.axis([-1.1, 1.1, -1.1, 1.1])
fig.add_subplot(111)
for c in connections:
color = 'red'
if c.weight > 0.0:
color = 'black'
plt.arrow(c.x1, c.y1, c.x2-c.x1, c.y2-c.y1, head_width=0.00, head_length=0.0,
fc=color, ec=color, length_includes_head = True)
for (coord, idx) in id_to_coords.items():
plt.plot(coord[0], coord[1], marker='o', markersize=8.0, color='grey')
plt.grid()
fig.savefig(filename)
| mit |
annayqho/TheCannon | code/lamost/xcalib_5labels/paper_plots/coverage_with_dist.py | 1 | 2709 | # read in all LAMOST labels
import numpy as np
from matplotlib import rc
rc('font', family='serif')
rc('text', usetex=True)
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
def calc_dist(lamost_point, training_points, coeffs):
""" avg dist from one lamost point to nearest 10 training points """
diff2 = (training_points - lamost_point)**2
dist = np.sqrt(np.sum(diff2*coeffs, axis=1))
return np.mean(dist[dist.argsort()][0:10])
def dist(lamost_point, cannon_point, coeffs):
diff2 = (lamost_point - cannon_point)**2
dist = np.sqrt(np.sum(diff2*coeffs,axis=1))
return dist
coeffs = 1./(np.array([100,0.2,0.1])**2)
# get all the training set values
with np.load("../../examples/test_training_overlap/tr_label.npz") as a:
training_points = a['arr_0'][:,0:3]
direc = "../../examples/lamost_dr2"
teff_all = np.loadtxt(
"%s/lamost_labels_all_dates.csv" %direc,
delimiter=',', dtype='float', usecols=(1,),
skiprows=1)
logg_all = np.loadtxt(
"%s/lamost_labels_all_dates.csv" %direc,
delimiter=',', dtype='float', usecols=(2,),
skiprows=1)
feh_all = np.loadtxt(
"%s/lamost_labels_all_dates.csv" %direc,
delimiter=',', dtype='float', usecols=(3,),
skiprows=1)
teff = np.loadtxt(
"%s/lamost_labels_20121125.csv" %direc, delimiter=',',
dtype='float', usecols=(1,), skiprows=1)
logg = np.loadtxt(
"%s/lamost_labels_20121125.csv" %direc, delimiter=',',
dtype='float', usecols=(2,), skiprows=1)
feh = np.loadtxt(
"%s/lamost_labels_20121125.csv" %direc, delimiter=',',
dtype='float', usecols=(3,), skiprows=1)
lamost_points = np.vstack((teff,logg,feh)).T
# calculate distances
training_dist = np.array(
[calc_dist(p, training_points, coeffs) for p in lamost_points])
# plot all
plt.figure(figsize=(10,8))
plt.hist2d(teff_all,logg_all,bins=1000,norm=LogNorm(), cmap="Greys")
plt.ylim(1.5,5)
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# plot training set for training_dist < 2.5
cut = training_dist < 2.5
plt.scatter(teff[cut],logg[cut],c='darkorange',s=1,lw=0,
label=r"Distance from Training Labels $<$ 2.5")
cut = training_dist > 2.5
im = plt.scatter(teff[cut],logg[cut],c='darkorchid',s=1,lw=0,
label=r"Distance from Training Labels $>$ 2.5")
plt.legend(loc='upper left', fontsize=16, markerscale=5)
plt.xlabel(r"$\mbox{T}_{\mbox{eff}}$ [K] from LAMOST DR2", fontsize=16)
plt.ylabel(r"log g [dex] from LAMOST DR2", fontsize=16)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
plt.savefig("ts_distance_in_full_lamost_label_space.png")
plt.close()
| mit |
belteshassar/cartopy | lib/cartopy/io/img_tiles.py | 2 | 15185 | # (C) British Crown Copyright 2011 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Implements image tile identification and fetching from various sources.
The matplotlib interface can make use of tile objects (defined below) via the
:meth:`cartopy.mpl.geoaxes.GeoAxes.add_image` method. For example, to add a
:class:`MapQuest Open Aerial tileset <MapQuestOpenAerial>` to an existing axes
at zoom level 2, do ``ax.add_image(MapQuestOpenAerial(), 2)``. An example of
using tiles in this way can be found at :ref:`examples-eyja_volcano`.
"""
from __future__ import (absolute_import, division, print_function)
from PIL import Image
import shapely.geometry as sgeom
import numpy as np
import six
import cartopy.crs as ccrs
class GoogleTiles(object):
"""
Implements web tile retrieval using the Google WTS coordinate system.
A "tile" in this class refers to the coordinates (x, y, z).
"""
def __init__(self, desired_tile_form='RGB', style="street"):
"""
:param desired_tile_form:
:param style: The style for the Google Maps tiles. One of 'street',
'satellite', 'terrain', and 'only_streets'.
Defaults to 'street'.
"""
# Only streets are partly transparent tiles that can be overlayed over
# the satellite map to create the known hybrid style from google.
styles = ["street", "satellite", "terrain", "only_streets"]
style = style.lower()
if style not in styles:
msg = "Invalid style '%s'. Valid styles: %s" % \
(style, ", ".join(styles))
raise ValueError(msg)
self.style = style
# The 'satellite' and 'terrain' styles require pillow with a jpeg
# decoder.
if self.style in ["satellite", "terrain"] and \
not hasattr(Image.core, "jpeg_decoder") or \
not Image.core.jpeg_decoder:
msg = "The '%s' style requires pillow with jpeg decoding support."
raise ValueError(msg % self.style)
self.imgs = []
self.crs = ccrs.Mercator.GOOGLE
self.desired_tile_form = desired_tile_form
def image_for_domain(self, target_domain, target_z):
tiles = []
for tile in self.find_images(target_domain, target_z):
try:
img, extent, origin = self.get_image(tile)
except IOError:
continue
img = np.array(img)
x = np.linspace(extent[0], extent[1], img.shape[1])
y = np.linspace(extent[2], extent[3], img.shape[0])
tiles.append([img, x, y, origin])
img, extent, origin = _merge_tiles(tiles)
return img, extent, origin
def _find_images(self, target_domain, target_z, start_tile=(0, 0, 0)):
"""Target domain is a shapely polygon in native coordinates."""
assert isinstance(target_z, int) and target_z >= 0, ('target_z must '
'be an integer '
'>=0.')
# Recursively drill down to the images at the target zoom.
x0, x1, y0, y1 = self._tileextent(start_tile)
domain = sgeom.box(x0, y0, x1, y1)
if domain.intersects(target_domain):
if start_tile[2] == target_z:
yield start_tile
else:
for tile in self._subtiles(start_tile):
for result in self._find_images(target_domain, target_z,
start_tile=tile):
yield result
find_images = _find_images
def subtiles(self, x_y_z):
x, y, z = x_y_z
# Google tile specific (i.e. up->down).
for xi in range(0, 2):
for yi in range(0, 2):
yield x * 2 + xi, y * 2 + yi, z + 1
_subtiles = subtiles
def tile_bbox(self, x, y, z, y0_at_north_pole=True):
"""
Returns the ``(x0, x1), (y0, y1)`` bounding box for the given x, y, z
tile position.
Parameters
----------
x, y, z : int
The x, y, z tile coordinates in the Google tile numbering system
(with y=0 being at the north pole), unless `y0_at_north_pole` is
set to ``False``, in which case `y` is in the TMS numbering system
(with y=0 being at the south pole).
y0_at_north_pole : bool
Whether the numbering of the y coordinate starts at the north
pole (as is the convention for Google tiles), or the south
pole (as is the convention for TMS).
"""
n = 2 ** z
assert 0 <= x <= (n - 1), ("Tile's x index is out of range. Upper "
"limit %s. Got %s" % (n, x))
assert 0 <= y <= (n - 1), ("Tile's y index is out of range. Upper "
"limit %s. Got %s" % (n, y))
x0, x1 = self.crs.x_limits
y0, y1 = self.crs.y_limits
# Compute the box height and width in native coordinates
# for this zoom level.
box_h = (y1 - y0) / n
box_w = (x1 - x0) / n
# Compute the native x & y extents of the tile.
n_xs = x0 + (x + np.arange(0, 2, dtype=np.float64)) * box_w
n_ys = y0 + (y + np.arange(0, 2, dtype=np.float64)) * box_h
if y0_at_north_pole:
n_ys = -1 * n_ys[::-1]
return n_xs, n_ys
def tileextent(self, x_y_z):
"""Returns extent tuple ``(x0,x1,y0,y1)`` in Mercator coordinates."""
x, y, z = x_y_z
x_lim, y_lim = self.tile_bbox(x, y, z, y0_at_north_pole=True)
return tuple(x_lim) + tuple(y_lim)
_tileextent = tileextent
def _image_url(self, tile):
style_dict = {
"street": "m",
"satellite": "s",
"terrain": "t",
"only_streets": "h"}
url = ('https://mts0.google.com/vt/lyrs={style}@177000000&hl=en&'
'src=api&x={tile_x}&y={tile_y}&z={tile_z}&s=G'.format(
style=style_dict[self.style],
tile_x=tile[0],
tile_y=tile[1],
tile_z=tile[2]))
return url
def get_image(self, tile):
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
url = self._image_url(tile)
fh = urlopen(url)
im_data = six.BytesIO(fh.read())
fh.close()
img = Image.open(im_data)
img = img.convert(self.desired_tile_form)
return img, self.tileextent(tile), 'lower'
class MapQuestOSM(GoogleTiles):
# http://developer.mapquest.com/web/products/open/map for terms of use
def _image_url(self, tile):
x, y, z = tile
url = 'http://otile1.mqcdn.com/tiles/1.0.0/osm/%s/%s/%s.jpg' % (
z, x, y)
return url
class MapQuestOpenAerial(GoogleTiles):
# http://developer.mapquest.com/web/products/open/map for terms of use
# The following attribution should be included in the resulting image:
# "Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture,
# Farm Service Agency"
def _image_url(self, tile):
x, y, z = tile
url = 'http://oatile1.mqcdn.com/tiles/1.0.0/sat/%s/%s/%s.jpg' % (
z, x, y)
return url
class OSM(GoogleTiles):
# http://developer.mapquest.com/web/products/open/map for terms of use
def _image_url(self, tile):
x, y, z = tile
url = 'https://a.tile.openstreetmap.org/%s/%s/%s.png' % (z, x, y)
return url
class StamenTerrain(GoogleTiles):
"""
Terrain tiles defined for the continental United States, and include land
color and shaded hills. The land colors are a custom palette developed by
Gem Spear for the National Atlas 1km land cover data set, which defines
twenty-four land classifications including five kinds of forest,
combinations of shrubs, grasses and crops, and a few tundras and wetlands.
The colors are at their highest contrast when fully zoomed-out to the
whole U.S., and they slowly fade out to pale off-white as you zoom in to
leave room for foreground data and break up the weirdness of large areas
of flat, dark green.
Additional info:
http://mike.teczno.com/notes/osm-us-terrain-layer/background.html
http://maps.stamen.com/#terrain/12/37.6902/-122.3600
https://wiki.openstreetmap.org/wiki/List_of_OSM_based_Services
https://github.com/migurski/DEM-Tools
"""
def _image_url(self, tile):
x, y, z = tile
url = 'http://tile.stamen.com/terrain-background/%s/%s/%s.png' % (
z, x, y)
return url
class MapboxTiles(GoogleTiles):
"""
Implements web tile retrieval from Mapbox.
For terms of service, see https://www.mapbox.com/tos/.
"""
def __init__(self, access_token, map_id):
"""
Set up a new Mapbox tiles instance.
Access to Mapbox web services requires an access token and a map ID.
See https://www.mapbox.com/developers/api/ for details.
Parameters
----------
access_token: str
A valid Mapbox API access token.
map_id: str
A map ID for a publically accessible map. This is the map whose
tiles will be retrieved through this process.
"""
self.access_token = access_token
self.map_id = map_id
super(MapboxTiles, self).__init__()
def _image_url(self, tile):
x, y, z = tile
url = ('https://api.tiles.mapbox.com/v4/{mapid}/{z}/{x}/{y}.png?'
'access_token={token}'.format(z=z, y=y, x=x,
mapid=self.map_id,
token=self.access_token))
return url
class QuadtreeTiles(GoogleTiles):
"""
Implements web tile retrieval using the Microsoft WTS quadkey coordinate
system.
A "tile" in this class refers to a quadkey such as "1", "14" or "141"
where the length of the quatree is the zoom level in Google Tile terms.
"""
def _image_url(self, tile):
url = ('http://ecn.dynamic.t1.tiles.virtualearth.net/comp/'
'CompositionHandler/{tile}?mkt=en-'
'gb&it=A,G,L&shading=hill&n=z'.format(tile=tile))
return url
def tms_to_quadkey(self, tms, google=False):
quadKey = ""
x, y, z = tms
# this algorithm works with google tiles, rather than tms, so convert
# to those first.
if not google:
y = (2 ** z - 1) - y
for i in range(z, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (x & mask) != 0:
digit += 1
if (y & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
def quadkey_to_tms(self, quadkey, google=False):
# algorithm ported from
# https://msdn.microsoft.com/en-us/library/bb259689.aspx
assert isinstance(quadkey, six.string_types), \
'quadkey must be a string'
x = y = 0
z = len(quadkey)
for i in range(z, 0, -1):
mask = 1 << (i - 1)
if quadkey[z - i] == '0':
pass
elif quadkey[z - i] == '1':
x |= mask
elif quadkey[z - i] == '2':
y |= mask
elif quadkey[z - i] == '3':
x |= mask
y |= mask
else:
raise ValueError('Invalid QuadKey digit '
'sequence.' + str(quadkey))
# the algorithm works to google tiles, so convert to tms
if not google:
y = (2 ** z - 1) - y
return (x, y, z)
def subtiles(self, quadkey):
for i in range(4):
yield quadkey + str(i)
def tileextent(self, quadkey):
x_y_z = self.quadkey_to_tms(quadkey, google=True)
return GoogleTiles.tileextent(self, x_y_z)
def find_images(self, target_domain, target_z, start_tile=None):
"""
Find all the quadtree's at the given target zoom, in the given
target domain.
target_z must be a value >= 1.
"""
if target_z == 0:
raise ValueError('The empty quadtree cannot be returned.')
if start_tile is None:
start_tiles = ['0', '1', '2', '3']
else:
start_tiles = [start_tile]
for start_tile in start_tiles:
start_tile = self.quadkey_to_tms(start_tile, google=True)
for tile in GoogleTiles.find_images(self, target_domain, target_z,
start_tile=start_tile):
yield self.tms_to_quadkey(tile, google=True)
def _merge_tiles(tiles):
"""Return a single image, merging the given images."""
if not tiles:
raise ValueError('A non-empty list of tiles should '
'be provided to merge.')
xset = [set(x) for i, x, y, _ in tiles]
yset = [set(y) for i, x, y, _ in tiles]
xs = xset[0]
xs.update(*xset[1:])
ys = yset[0]
ys.update(*yset[1:])
xs = sorted(xs)
ys = sorted(ys)
other_len = tiles[0][0].shape[2:]
img = np.zeros((len(ys), len(xs)) + other_len, dtype=np.uint8) - 1
for tile_img, x, y, origin in tiles:
y_first, y_last = y[0], y[-1]
yi0, yi1 = np.where((y_first == ys) | (y_last == ys))[0]
if origin == 'upper':
yi0 = tile_img.shape[0] - yi0 - 1
yi1 = tile_img.shape[0] - yi1 - 1
start, stop, step = yi0, yi1, 1 if yi0 < yi1 else -1
if step == 1 and stop == img.shape[0] - 1:
stop = None
elif step == -1 and stop == 0:
stop = None
else:
stop += step
y_slice = slice(start, stop, step)
xi0, xi1 = np.where((x[0] == xs) | (x[-1] == xs))[0]
start, stop, step = xi0, xi1, 1 if xi0 < xi1 else -1
if step == 1 and stop == img.shape[1] - 1:
stop = None
elif step == -1 and stop == 0:
stop = None
else:
stop += step
x_slice = slice(start, stop, step)
img_slice = (y_slice, x_slice, Ellipsis)
if origin == 'lower':
tile_img = tile_img[::-1, ::]
img[img_slice] = tile_img
return img, [min(xs), max(xs), min(ys), max(ys)], 'lower'
| gpl-3.0 |
FabienPean/sofa | applications/plugins/SofaPython/python/AdvancedTimerOutputAnalysis/TimerLJSONPlot.py | 9 | 6307 | import sys
# ploting
import matplotlib.pyplot as plt
# JSON deconding
from collections import OrderedDict
import json
# argument parser: usage via the command line
import argparse
class TimerLJSONPlot() :
def __init__(self):
lol = 0
def parseInput(self):
parser = argparse.ArgumentParser(
description='Programm to plot light JSON file from a SOFA simulation.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,# ArgumentDefaultsHelpFormatter
epilog='''This program was made to create plot from a light JSON file from a SOFA simulation time capture. You can choose the componants and componants data to plot. It uses matplotlib (https://matplotlib.org/).''')
parser.add_argument('jsonFileName', metavar='j', type=str, help='Filename of the JSON file to plot.')
parser.add_argument('-d', type=int, default='0', help='Deepness of the analyse. 0 means an analyse of the comonant and the componants on the same level. 1 means an analyse of the componant and its children.')
parser.add_argument('-v', type=str, default='Percent', help='Value to search on the capture values of each componant. valide values are : [Dev, Level, Max, Mean, Min, Num, Percent, Start, Total].')
parser.add_argument('-c', nargs='+', default='Mechanical', help='Componant(s) to search on the JSON file.')
args = parser.parse_args()
return parser,args;
###
# Method : parseJsonComponantsId
# Brief : parse a json file to create a block for the given composant name
# Param : jsonData, json - data extracted from the json file
# Param : componantID, string - id of the component to seek in the json file
# Param : deep, int - 0 to get all componants on the same level than target, 1 to get all children of target
###
def parseJsonComponantsId(self, jsonData, componantID, deep, value) :
parsedInformations = []
# First iteration is used to create the list that will handle informations
# The list is defiend as following :
# steps | componantID | subComponant | subComponant2 | ...
# 0 "Steps" | "CompName" | "subCompName"| "subCompName" | ...
# 1 1 | 0.285 | 0.185 | 0.1 | ...
# ...
keyNumber = 0
father = ""
# Each k in this loop is the simulation step
for k,v in jsonData.items() :
# First analys to take search informations
if keyNumber == 0 :
row = ["Steps", k]
parsedInformations.append(row)
# Take informations from the target componant
for kbis, vbis in v.items() :
if kbis == componantID :
if deep == 0 :
father = vbis["Father"]
else :
father = componantID
row = []
row.append(componantID)
row.append(vbis["Values"][value])
parsedInformations.append(row)
# Take informations from componants on the same level than the target
for kbis, vbis in v.items() :
if kbis != componantID and vbis["Father"] == father :
row = []
row.append(kbis)
row.append(vbis["Values"][value])
parsedInformations.append(row)
keyNumber = 1
# Informations extraction
else :
parsedInformations[0].append(int(k))
for kbis, vbis in v.items() :
i = 0
if kbis == componantID or vbis["Father"] == father :
# Find the componant index in parsedInformations
for j, info in enumerate(parsedInformations) :
if info[0] == kbis :
i = j
# stock informations
row = parsedInformations[i]
row.append(vbis["Values"][value])
return parsedInformations
###
# Method : parseJsonFile
# Brief : parse a json file to create a gnuplot file of the timer analysis
# Param : jsonFile, string - name of the file to parse
# Param : *componantsID, list of strings - ids of components to seek in the file
###
def parseJsonFile(self, jsonFileName, deep, value, *componantsID):
with open(jsonFileName, "r") as jsonFile :
jsonData = json.load(jsonFile, object_pairs_hook=OrderedDict)
fig, ax = plt.subplots()
lineColors = ["green", "red", "blue", "yellow", "orange", "black", "purple", "brown"]
markStyles = ['.', '+', 'p', '*', 'o', 'v', '^', '<', '>', '8', 's', 'h', 'x', 'D', '2']
lineColorIndice = 0
markStyleIndice = 0
for componantID in componantsID :
result = self.parseJsonComponantsId(jsonData, componantID, deep, value)
# Create plot
for i in result :
if i[0] != "Steps" and i[0] != componantID:
ax.plot(result[0][1:], i[1:], label=i[0], color=lineColors[lineColorIndice], marker=markStyles[markStyleIndice])
markStyleIndice = (markStyleIndice + 1) % len(markStyles)
elif i[0] != "Steps" :
ax.plot(result[0][1:], i[1:], label=i[0], color=lineColors[lineColorIndice])
legend = ax.legend(loc='best', shadow=True, fontsize='x-large')
# Set next line color
lineColorIndice = (lineColorIndice + 1) % len(lineColors)
legend.get_frame().set_facecolor('#00FFCC')
plt.show()
jsonFile.close()
return 0
def main():
# Create the object
obj = TimerLJSONPlot()
# Parse the console input
parser, args = obj.parseInput()
jsonFileName = args.jsonFileName
deep = args.d
value = args.v
componantsID = args.c
obj.parseJsonFile(jsonFileName, deep, value, *componantsID)
return
if __name__ == "__main__":
main()
| lgpl-2.1 |
trankmichael/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 72 | 13586 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
sergpolly/GlycoMadness | DEPRECATED/previous_pipeline/resolve_peptide_ambiguity copy.py | 1 | 5690 | import sys
from Bio import Seq
from Bio import SeqIO
from Bio import SeqRecord
import pandas as pd
import numpy as np
import ms_module as ms
import re
#
# #
# pep_fname = "../peptides.xls"
# fasta_fname = "dec23_1701.fasta"
if len(sys.argv)<4:
print "Command line arguments required! Launch example:"
print "%s ../input_peptides.xls required_prot_dec23_1701.fasta uniq_peptides_catalog.sv"%sys.argv[0]
sys.exit(1)
pep_fname = sys.argv[1]
fasta_fname = sys.argv[2]
out_fname = sys.argv[3]
#
pep_info = pd.read_csv(pep_fname,sep='\t')
#
fasta = SeqIO.to_dict(SeqIO.parse(fasta_fname,"fasta"),key_function=lambda _: _.id.split('|')[1])
#
# at first let's get a list of unique peptide sequences ...
uniq_pept = pep_info['Peptide sequence'].unique()
# now let's find all Uniprot ids associated with each peptide ...
def extract_uids(peptide_seq,pep_dat_info,columns = ["Protein accession numbers","Other Proteins"]):
""" looking for all uid in specified columns excluding Unknowns."""
pep_dat = pep_dat_info[pep_dat_info["Peptide sequence"]==peptide_seq]
uid_ambig_list = []
for col in columns:
for uids in pep_dat[col].unique():
# avoid missing data ...
if pd.notnull(uids):
# break them up (they are comma-separated) ...
for uid in uids.strip(',').split(','):
# avoid Unknowns ...
if len(uid.split('|'))>1:
uid_ambig_list.append(uid)
#############################################
uid_uniq_list = [ uid_uniq_item.replace(';',' ') for uid_uniq_item in set(uid_ambig_list) ]
to_return = ';'.join(uid_uniq_list) if uid_uniq_list else None
return (pep_dat,to_return)
def get_single_fasta(uids,fa_dict):
"""Uid for the longest protein gets selected as a representative"""
#######################
uid_list = [uid.split('|')[1] for uid in uids.strip(',').split(',')]
fasta_list = [fa_dict[uid] for uid in uid_list]
fasta_len_list = [ len(sr.seq) for sr in fasta_list ]
max_len_index = np.argmax(fasta_len_list)
#######################
return (uid_list[max_len_index],fasta_len_list[max_len_index],fasta_list[max_len_index])
# extract_uids - is working
interesting_peptide = []
uids_list = []
uid_of_maxlen_list = []
prot_len = []
prot_fasta = []
pept_positions = []
prot_name = []
uniq_pept_count = []
pept_probab = []
#
aa_before = []
aa_after = []
for pept in uniq_pept:
pep_dat_pept,uids = extract_uids(pept,pep_info)
if uids:
interesting_peptide.append(pept)
uids_list.append(uids)
#################################
_1,prot_len_fasta,prot_seq_fasta = get_single_fasta(uids,fasta)
uid_of_maxlen_list.append(_1)
prot_len.append(prot_len_fasta)
prot_fasta.append(str(prot_seq_fasta.seq))
peptide_start_in_protein = ms.stupid_aligner(pept,prot_seq_fasta) # 1-based ...
peptide_stop_in_protein = peptide_start_in_protein + len(pept) # 1-based ...
pept_positions.append(peptide_start_in_protein)
prot_name.append(prot_seq_fasta.description.replace(',',' ')) # long protein name here ...
# uniq peptide count taken from pep_dat_pept, for definition look up extract_uids...
uniq_pept_count_val = pep_dat_pept['Exclusive unique peptide count'].unique()[0]
# uniq_pept_count_val, = pep_dat_pept['Exclusive unique peptide count'][pep_dat_pept['Exclusive unique peptide count']>0].unique()
uniq_pept_count.append(uniq_pept_count_val)
# some kind of peptide probability (like a quality score from experimental data)?!
pept_probab_val, = pep_dat_pept['Best Peptide identification probability'].unique()
pept_probab.append(pept_probab_val)
#################################
# BEWARE: 1-BASED INDEXING ALL THE WAY ACROSS SO FAR...
# peptide can start right at N-terminus, so there will be no AminoAcid preceding it, call it a START
aa_before.append(str(prot_seq_fasta.seq)[peptide_start_in_protein-2] if peptide_start_in_protein>1 else 'START')
# peptide can end right at C-terminus, so there will be no AminoAcid after it, call it an END
aa_after.append(str(prot_seq_fasta.seq)[peptide_stop_in_protein+0] if peptide_stop_in_protein<prot_len_fasta else 'END')
#########################################
dict_df = {
"pept":interesting_peptide,
"all_uids":uids_list,
"uid_max":uid_of_maxlen_list,
"protlen":prot_len,
"peptide_start":pept_positions,
"prot_seqrec":prot_fasta,
"aa_before":aa_before,
"aa_after":aa_after,
"prot_name":prot_name,
"uniq_pept_count":uniq_pept_count,
"pept_probab":pept_probab
}
##############################################################################################################################
pep_df = pd.DataFrame(dict_df)
# within-cell separators are ';' now ...
# # let's change within a cell separators to ';' instead of ',', which is used for column separation ...
# for col,dtype in pep_df.dtypes.iteritems():
# if dtype=='object':
# pep_df[col].str.replace(';',' ').str.replace(',',';')
##############################################################################################################################
pep_df.to_csv(out_fname,index=False)
# print "Beware! Some columns have string values with the comma-characters ',' in them, turned out pandas "
# print "deals with the problem graciously, placing such value in quotes, that makes reading such csv files "
# print "an easy task. At least both pandas read_csv and Apple Numbers(like Excel) interpret the situation correctly!"
| mit |
lscsoft/gwdetchar | setup.py | 1 | 3658 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of the GW DetChar python (gwdetchar) package.
#
# gwdetchar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gwdetchar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gwdetchar. If not, see <http://www.gnu.org/licenses/>.
"""Setup the gwdetchar package
"""
from __future__ import print_function
import glob
import os.path
import sys
from setuptools import (setup, find_packages)
# local setup utilities
from _setup_utils import CMDCLASS as cmdclass
# set basic metadata
PACKAGENAME = 'gwdetchar'
DISTNAME = 'gwdetchar'
AUTHOR = 'Duncan Macleod'
AUTHOR_EMAIL = '[email protected]'
LICENSE = 'GPLv3'
# -- versioning ---------------------------------------------------------------
import versioneer
__version__ = versioneer.get_version()
# -- dependencies -------------------------------------------------------------
# build
setup_requires = [
'jsmin',
'libsass',
'setuptools',
]
# run
install_requires = [
'astropy>=1.2',
'gwdatafind',
'gwpy>=0.13.0',
'gwtrigfind',
'MarkupPy>=1.14',
'matplotlib>=2.0.0',
'numpy>=1.10',
'pandas',
'pycondor',
'pytz',
'coloredlogs',
'scikit-learn',
'scipy>=1.2.0',
'setuptools',
'six',
'pathlib2 ; python_version < \'3.6\'',
'pygments',
]
# test
if 'test' in sys.argv:
setup_requires.append('pytest-runner')
tests_require = [
'pytest',
]
# -- run setup ----------------------------------------------------------------
packagenames = find_packages()
scripts = glob.glob(os.path.join('bin', '*'))
# read description
with open('README.rst', 'rb') as f:
longdesc = f.read().decode().strip()
setup(name=DISTNAME,
provides=[PACKAGENAME],
version=__version__,
description="A python package for gravitational-wave detector characterisation",
long_description=longdesc,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url='https://github.com/gwdetchar/gwdetchar',
packages=packagenames,
include_package_data=True,
cmdclass=cmdclass,
scripts=scripts,
setup_requires=setup_requires,
install_requires=install_requires,
use_2to3=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics',
],
)
| gpl-3.0 |
jay-johnson/datanode | bins/ml/demo-ml-classifier-iris.py | 1 | 18742 | #!/usr/bin/env python
# Load common imports and system envs to build the core object
import sys, os
# For running inside the docker container use:
#import matplotlib
#matplotlib.use('Agg')
# Load the Environment:
os.environ["ENV_DEPLOYMENT_TYPE"] = "JustRedis"
from src.common.inits_for_python import *
#####################################################################
#
# Start Arg Processing:
#
action = "ML Classifier"
parser = argparse.ArgumentParser(description="Parser for Action: " + str(action))
parser.add_argument('-f', '--csvfile', help='CSV File', dest='csvfile')
parser.add_argument('-n', '--dsname', help='Dataset Name', dest='ds_name')
parser.add_argument('-b', '--s3bucket', help='S3 Bucket (Optional)', dest='s_bucket')
parser.add_argument('-k', '--s3key', help='S3 Key (Optional)', dest='s_key')
parser.add_argument('-u', '--usedate', help='Use Date', dest='usedate')
parser.add_argument("-d", "--debug", help="Debug Flag", dest='debug', action='store_true')
args = parser.parse_args()
if args.debug:
debug = True
core.enable_debug()
ds_name = "iris_classifier"
if args.ds_name:
ds_name = str(args.ds_name).strip().lstrip()
now = datetime.datetime.now()
cur_date = now
cur_date_str = now.strftime("%Y-%m-%d")
if args.usedate:
cur_date_str = str(args.usedate)
send_email = "1" # by default send email
s3_bucket = "demodatasets"
s3_key = "dataset_" + str(str(ds_name).upper().strip().lstrip()) + "_" + str(cur_date_str) + ".csv"
analysis_version = 2
if args.s_bucket:
s3_bucket = str(args.s_bucket)
if args.s_key:
s3_key = str(args.s_key)
dataset_filename = "iris.csv"
ml_csv = str(os.getenv("ENV_DATA_SRC_DIR", "/opt/work/data/src")) + "/" + dataset_filename
if args.csvfile:
ml_csv = str(args.csvfile)
#
# End Arg Processing
#
#####################################################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
if os.path.exists(ml_csv) == False:
if os.path.exists("/opt/work/data/examples/iris.csv"):
org_path = "/opt/work/data/examples/iris.csv"
os.system("cp " + str(org_path) + " " + ml_csv)
elif os.path.exists(os.getenv("ENV_DATANODE_REPO", "/opt/work") + "/data/examples/iris.csv"):
org_path = os.getenv("ENV_DATANODE_REPO", "/opt/work") + "/data/examples/iris.csv"
os.system("cp " + str(org_path) + " " + ml_csv)
else:
lg("Recreating iris dataset: /opt/work/bins/ml/downloaders/download_iris.py", 6)
os.system("/opt/work/bins/ml/downloaders/download_iris.py")
if os.path.exists(ml_csv) == False:
lg("Failed to recreate iris dataset with: /opt/work/bins/ml/downloaders/download_iris.py", 0)
lg("Stopping", 6)
sys.exit(1)
# end of checking if the csv file is available
lg("Processing ML Predictions for CSV(" + str(ml_csv) + ")", 6)
max_features_to_display = 10
num_estimators = 200
show_importance_plot = True
show_confusion_plot = True
random_state = 0
# For forecasting:
units_ahead_set = []
units_ahead = 0
now = datetime.datetime.now()
title_prefix = ds_name
confusion_plot_title = ds_name + " - Random Forest Confusion Matrix\nThe darker the square on the diagonal the better the predictions\n\n"
featimp_plot_title = ds_name + " - Feature Importance with Estimators(" + str(num_estimators) + ")"
row_names = [ "Actual" ] # CM - Y Axis
col_names = [ "Predictions" ] # CM - X Axis
num_jobs = 8
ranked_features = []
org_ranked_features = []
ml_type = "Predict with Filter"
ml_algo_name = "xgb-regressor"
ml_algo_name = "xgb-classifier"
price_min = 0.10
train_test_ratio = 0.1
# What column has the labeled targets as integers? (added-manually to the dataset)
target_column_name = "ResultTargetValue"
# possible values in the Target Column
target_column_values = [ "Iris-setosa", "Iris-versicolor", "Iris-virginica" ]
# What columns can the algorithms use for training and learning?
feature_column_names = [ "SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "ResultTargetValue" ]
label_column_name = "ResultLabel"
ignore_features = [ # Prune non-int/float columns as needed:
label_column_name
]
algo_nodes = []
forcast_df = None
ml_request = {
"MLType" : ml_type,
"MLAlgo" : {
"Name" : ml_algo_name,
"Version" : 1,
"Meta" : {
"UnitsAhead" : units_ahead,
"DatasetName" : ds_name,
"FilterMask" : None,
"Source" : {
"CSVFile" : ml_csv,
"S3File" : "", # <Bucket Name>:<Key>
"RedisKey" : "" # <App Name>:<Key>
},
},
"Steps" : {
"Train" :{
"LearningRate" : 0.1,
"NumEstimators" : 1000,
"Objective" : "reg:linear",
"MaxDepth" : 6,
"MaxDeltaStep" : 0,
"MinChildWeight" : 1,
"Gamma" : 0,
"SubSample" : 0.8,
"ColSampleByTree" : 0.8,
"ColSampleByLevel" : 1.0,
"RegAlpha" : 0,
"RegLambda" : 1,
"BaseScore" : 0.5,
"NumThreads" : -1, # infinite = -1
"ScaledPositionWeight" : 1,
"Seed" : 27,
"Debug" : True
}
}
},
"FeatureColumnNames": feature_column_names,
"TargetColumnName" : target_column_name,
"TargetColumnValues": target_column_values,
"IgnoreFeatures" : ignore_features,
"UnitsAheadSet" : units_ahead_set,
"UnitsAheadType" : "",
"PredictionType" : "Predict",
"MaxFeatures" : 10,
"Version" : 1,
"TrackingType" : "UseTargetColAndUnits",
"TrackingName" : core.to_upper(ds_name),
"TrackingID" : "ML_" + ds_name + "_" + str(core.build_unique_key()),
"Debug" : False
}
# Load dataset to build
csv_res = core.ml_load_csv_dataset(ml_request, core.get_rds(), core.get_dbs(), debug)
if csv_res["Status"] != "SUCCESS":
lg("ERROR: Failed to Load CSV(" + str(ml_request["MLAlgo"]["Meta"]["Source"]["CSVFile"]) + ")", 0)
sys.exit(1)
ds_df = csv_res["Record"]["SourceDF"]
# Build a Filter for pruning bad records out before creating the train/test sets
samples_filter_mask = (ds_df["SepalLength"] > 0.0) \
& (ds_df["PetalWidth"] > 0.0)
# For patching on the fly you can use the encoder method to replace labels with target dictionary values:
#ready_df = core.ml_encode_target_column(ds_df, "ResultLabel", "Target")
show_pair_plot = False
if show_pair_plot:
lg("Samples(" + str(len(ds_df.index)) + ") in CSV(" + str(ml_request["MLAlgo"]["Meta"]["Source"]["CSVFile"]) + ")", 6)
lg("")
print ds_df.describe()
lg("")
num_per_class = ds_df.groupby("ResultLabel").size()
print num_per_class
lg("")
pair_plot_req = {
"Title" : "Iris Dataset PairPlot",
"SourceDF" : ds_df[samples_filter_mask],
"Style" : "default",
"DiagKind" : "hist", # "kde" or "hist"
"HueColumnName" : ml_request["TargetColumnName"],
"XLabel" : "",
"YLabel" : "",
"CompareColumns": ml_request["FeatureColumnNames"],
"Size" : 3.0,
"ImgFile" : str(os.getenv("ENV_DATA_SRC_DIR", "/opt/work/data/src")) + "/" + "validate_jupyter_iris_classification_pairplot.png",
"ShowPlot" : True
}
lg("Plotting Validation Pair Plot - Please wait a moment...", 6)
core.sb_pairplot(pair_plot_req)
if os.path.exists(pair_plot_req["ImgFile"]):
lg("Done Plotting Valiation Pair Plot - Saved(" + str(pair_plot_req["ImgFile"]) + ")", 5)
else:
lg("Failed to save Validation Pair Plot(" + str(pair_plot_req["ImgFile"]) + "). Please check the ENV_DATA_SRC_DIR is writeable by this user and exposed to the docker container correctly.", 0)
# end of showing a pairplot for validation
# Create a Prediction Column
ml_request["MLAlgo"]["Meta"]["SamplesFilterMask"] = samples_filter_mask
# Create a Result Column
core.enable_debug()
ml_images = []
train_results = core.ml_train_models_for_predictions(ml_request, core.get_rds(), core.get_dbs(), debug)
if train_results["Status"] != "SUCCESS":
lg("ERROR: Failed to Train Models for Predictions with Error(" + str(train_results["Error"]) + ") StoppedEarly(" + str(train_results["Record"]["StoppedEarly"]) + ")", 0)
sys.exit(1)
algo_nodes = train_results["Record"]["AlgoNodes"]
predict_row = {
"SepalLength" : 5.4,
"SepalWidth" : 3.4,
"PetalLength" : 1.7,
"PetalWidth" : 0.2,
"ResultTargetValue" : 0
}
predict_row_df = pd.DataFrame(predict_row, index=[0])
predict_req = {
"AlgoNodes" : algo_nodes,
"PredictionMask": samples_filter_mask,
"PredictionRow" : predict_row_df
}
predict_results = core.ml_compile_predictions_from_models(predict_req, core.get_rds(), core.get_dbs(), debug)
if predict_results["Status"] != "SUCCESS":
lg("ERROR: Failed to Compile Predictions from Models with Error(" + str(predict_results["Error"]) + ")", 0)
sys.exit(1)
lg("Done with Predictions", 6)
if predict_results["Status"] == "SUCCESS":
al_req = train_results["Record"]
al_req["DSName"] = ml_request["TrackingName"]
al_req["Version"] = 1
al_req["FeatureColumnNames"]= ml_request["FeatureColumnNames"]
al_req["TargetColumnName"] = ml_request["TargetColumnName"]
al_req["TargetColumnValues"]= ml_request["TargetColumnValues"]
al_req["IgnoreFeatures"] = ml_request["IgnoreFeatures"]
al_req["PredictionType"] = ml_request["PredictionType"]
al_req["ConfMatrices"] = predict_results["Record"]["ConfMatrices"]
al_req["PredictionMarkers"] = predict_results["Record"]["PredictionMarkers"]
analysis_dataset = core.ml_compile_analysis_dataset(al_req, core.get_rds(), core.get_dbs(), debug)
lg("Analyzed Models(" + str(len(analysis_dataset["Models"])) + ")", 6)
lg("-----------------------------------------------------", 6)
lg("Caching Models", 6)
cache_req = {
"Name" : "CACHE",
"Key" : "_MODELS_" + str(al_req["Tracking"]["TrackingName"]) + "_LATEST",
"TrackingID": "_MD_" + str(al_req["Tracking"]["TrackingName"]),
"Analysis" : analysis_dataset
}
cache_results = core.ml_cache_analysis_and_models(cache_req, core.get_rds(), core.get_dbs(), debug)
lg("Done Caching Models", 6)
lg("-----------------------------------------------------", 6)
lg("Creating Analysis Visualizations", 6)
# Turn this False to prevent displaying images
analysis_dataset["ShowPlot"] = True
analysis_dataset["SourceDF"] = al_req["SourceDF"]
lg("Plotting Feature Importance", 6)
for midx,model_node in enumerate(analysis_dataset["Models"]):
predict_col = model_node["Target"]
if predict_col == "ResultTargetValue":
plot_req = {
"ImgFile" : analysis_dataset["FeatImpImgFile"],
"Model" : model_node["Model"],
"XLabel" : str(predict_col),
"YLabel" : "Importance Amount",
"Title" : str(predict_col) + " Importance Analysis",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_model_feature_importance(plot_req, debug)
for img in image_list:
ml_images.append(img)
# end of for all models
lg("Plotting PairPlot", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Pair Plot",
"ImgFile" : str(analysis_dataset["PairPlotImgFile"]),
"SourceDF" : al_req["SourceDF"],
"HueColumnName" : target_column_name,
"CompareColumns": feature_column_names,
"Markers" : ["o", "s", "D"],
"Width" : 15.0,
"Height" : 15.0,
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_pairplot(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Plotting Confusion Matrices", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Confusion Matrix",
"ImgFile" : str(analysis_dataset["CMatrixImgFile"]),
"SourceDF" : al_req["SourceDF"],
"ConfMatrices" : al_req["ConfMatrices"],
"Width" : 15.0,
"Height" : 15.0,
"XLabel" : "Dates",
"YLabel" : "Values",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_confusion_matrix(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Plotting Scatters", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Scatter Plot",
"ImgFile" : str(analysis_dataset["ScatterImgFile"]),
"SourceDF" : analysis_dataset["SourceDF"],
"UnitsAheadType" : analysis_dataset["UnitsAheadType"],
"FeatureColumnNames": analysis_dataset["FeatureColumnNames"],
"Hue" : label_column_name,
"Width" : 7.0,
"Height" : 7.0,
"XLabel" : "Dates",
"YLabel" : "Values",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_all_scatterplots(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Plotting JointPlots", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Joint Plot",
"ImgFile" : str(analysis_dataset["JointPlotImgFile"]),
"SourceDF" : analysis_dataset["SourceDF"],
"UnitsAheadType" : analysis_dataset["UnitsAheadType"],
"FeatureColumnNames": analysis_dataset["FeatureColumnNames"],
"Hue" : label_column_name,
"Width" : 15.0,
"Height" : 15.0,
"XLabel" : "Dates",
"YLabel" : "Values",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_all_jointplots(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Done Creating Analysis Visualizations", 6)
lg("-----------------------------------------------------", 6)
else:
lg("", 6)
lg("ERROR: Failed Processing Predictions for Dataset(" + str(ds_name) + ") with Error:", 6)
lg(ml_results["Error"], 6)
lg("", 6)
sys.exit(2)
# end of if success
lg("", 6)
lg("Analysis Complete Saved Images(" + str(len(ml_images)) + ")", 5)
lg("", 6)
sys.exit(0)
| apache-2.0 |
yonglehou/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
matbra/bokeh | examples/plotting/file/boxplot.py | 43 | 2269 | import numpy as np
import pandas as pd
from bokeh.plotting import figure, show, output_file
# Generate some synthetic time series for six different categories
cats = list("abcdef")
yy = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
yy[g == l] += i // 2
df = pd.DataFrame(dict(score=yy, group=g))
# Find the quartiles and IQR foor each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
# find the outliers for each category
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need coordinate for every outlier.
outx = []
outy = []
for cat in cats:
# only add outliers if they exist
if not out.loc[cat].empty:
for value in out[cat]:
outx.append(cat)
outy.append(value)
output_file("boxplot.html")
p = figure(tools="save", background_fill="#EFE8E2", title="", x_range=cats)
# If no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.iloc[:,0]),upper.score) ]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.iloc[:,0]),lower.score) ]
# stems
p.segment(cats, upper.score, cats, q3.score, line_width=2, line_color="black")
p.segment(cats, lower.score, cats, q1.score, line_width=2, line_color="black")
# boxes
p.rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
p.rect(cats, (q2.score+q1.score)/2, 0.7, q2.score-q1.score,
fill_color="#3B8686", line_width=2, line_color="black")
# whiskers (almost-0 height rects simpler than segments)
p.rect(cats, lower.score, 0.2, 0.01, line_color="black")
p.rect(cats, upper.score, 0.2, 0.01, line_color="black")
# outliers
p.circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = "white"
p.grid.grid_line_width = 2
p.xaxis.major_label_text_font_size="12pt"
show(p)
| bsd-3-clause |
f3r/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
timtroendle/urban-occupants-paper | scripts/simulationinput.py | 1 | 13175 | from datetime import datetime, timedelta
from itertools import count, chain
import math
from multiprocessing import Pool, cpu_count
import os
from pathlib import Path
import random
import click
import pandas as pd
import yaml
from tqdm import tqdm
import requests_cache
import sqlalchemy
import urbanoccupants as uo
NUMBER_HOUSEHOLDS_HARINGEY = 101955
NUMBER_USUAL_RESIDENTS_HARINGEY = 254926
RANDOM_SEED = 'haringey-case-study'
ROOT_FOLDER = Path(os.path.abspath(__file__)).parent.parent
CACHE_PATH = ROOT_FOLDER / 'build' / 'web-cache'
MIDAS_DATABASE_PATH = ROOT_FOLDER / 'data' / 'Londhour.csv'
requests_cache.install_cache((CACHE_PATH).as_posix())
@click.command()
@click.argument('path_to_seed')
@click.argument('path_to_markov_ts')
@click.argument('path_to_config')
@click.argument('path_to_result')
def simulation_input(path_to_seed, path_to_markov_ts, path_to_config, path_to_result):
random.seed(RANDOM_SEED)
_check_paths(path_to_seed, path_to_markov_ts, path_to_config, path_to_result)
seed = pd.read_pickle(path_to_seed)
markov_ts = pd.read_pickle(path_to_markov_ts)
config = uo.read_simulation_config(path_to_config)
features = config['people-features'] + config['household-features']
seed, markov_ts = uo.tus.filter_features(
seed,
markov_ts,
set(features + [uo.PeopleFeature.AGE])
)
markov_chains = _create_markov_chains(
seed,
markov_ts,
features,
config
)
seed = _amend_seed_by_markov_model(seed, markov_chains, features, config['start-time'])
seed = _amend_seed_by_metabolic_rate(seed, config)
census_data_ppl = {feature: feature.read_census_data(config['spatial-resolution'])
for feature in config['people-features']}
for data in census_data_ppl.values():
assert data.sum().sum() == NUMBER_USUAL_RESIDENTS_HARINGEY
census_data_hh = {feature: feature.read_census_data(config['spatial-resolution'])
for feature in config['household-features']}
for data in census_data_hh.values():
assert data.sum().sum() == NUMBER_HOUSEHOLDS_HARINGEY
seed = _prepare_seed_index(seed)
households, citizens = _create_synthetic_population(
seed,
census_data_hh,
census_data_ppl,
config
)
_write_dwellings_table(households, config, path_to_result)
_write_citizens_table(citizens, path_to_result)
_write_markov_chains(markov_chains, path_to_result)
_write_temperature_table(config, path_to_result)
_write_simulation_parameter_table(config, path_to_result)
def _check_paths(path_to_seed, path_to_markov_ts, path_to_config, path_to_result):
if not Path(path_to_seed).exists():
raise ValueError("Seed is missing: {}.".format(path_to_seed))
if not Path(path_to_markov_ts).exists():
raise ValueError("Markov timeseries is missing: {}.".format(path_to_markov_ts))
if not Path(path_to_config).exists():
raise ValueError("Config file is missing: {}.".format(path_to_config))
path_to_result = Path(path_to_result)
if path_to_result.exists():
path_to_result.unlink()
if not MIDAS_DATABASE_PATH.exists():
raise ValueError('MIDAS weather data file is missing: {}.'.format(MIDAS_DATABASE_PATH))
def _create_markov_chains(seed, markov_ts, features, config):
seed_groups = seed.groupby([str(feature) for feature in features])
print("Dividing the seed into {} cluster.".format(len(seed_groups.groups.keys())))
print("Cluster statistics:")
print(seed_groups.size().describe())
with Pool(config['number-processes']) as pool:
feature_combinations = seed_groups.groups.keys()
all_parameters = ( # imap_unordered allows only one parameter, hence the tuple
(markov_ts,
seed_groups.get_group(features),
features,
config['time-step-size'])
for features in feature_combinations
)
markov_chains = dict(pool.imap_unordered(uo.tus.markov_chain_for_cluster,
tqdm(all_parameters,
total=len(feature_combinations),
desc='Calculating markov chains')))
return markov_chains
def _amend_seed_by_markov_model(seed, markov_chains, features, simulation_start_time):
seed_groups = seed.groupby([str(feature) for feature in features])
for feature_combination, index in seed_groups.groups.items():
seed.loc[index, 'markov_id'] = uo.feature_id(feature_combination)
seed.loc[index, 'initial_activity'] = markov_chains[feature_combination]\
.valid_states(simulation_start_time)[0]
return seed
def _amend_seed_by_metabolic_rate(seed, config):
below18 = seed[str(uo.PeopleFeature.AGE)] < uo.types.AgeStructure.AGE_18_TO_19
above18 = seed[str(uo.PeopleFeature.AGE)] >= uo.types.AgeStructure.AGE_18_TO_19
metabolic_heat_gain_active = config['metabolic-heat-gain-active']
metabolic_heat_gain_passive = config['metabolic-heat-gain-passive']
metabolic_ratio_child = config['metabolic-ratio-child']
seed.loc[below18, 'metabolic_heat_gain_active'] = (metabolic_heat_gain_active *
metabolic_ratio_child)
seed.loc[below18, 'metabolic_heat_gain_passive'] = (metabolic_heat_gain_passive *
metabolic_ratio_child)
seed.loc[above18, 'metabolic_heat_gain_active'] = metabolic_heat_gain_active
seed.loc[above18, 'metabolic_heat_gain_passive'] = metabolic_heat_gain_passive
return seed
def _prepare_seed_index(seed):
sn1_plus_sn2 = seed.index.droplevel(2)
seed = seed.copy()
seed['household_id'] = list(sn1_plus_sn2)
seed.reset_index(inplace=True)
seed.rename(columns={'SN3': 'person_id'}, inplace=True)
seed.set_index(['household_id', 'person_id'], inplace=True)
seed.drop(['SN1', 'SN2'], axis=1, inplace=True)
return seed
def _create_synthetic_population(seed, census_data_hh, census_data_ppl, config):
random_hh_feature = list(census_data_hh.values())[0]
regions = list(random_hh_feature.index)
controls_hh = {region: {str(feature): census_data_hh[feature].ix[region, :]
for feature in config['household-features']}
for region in regions}
controls_ppl = {region: {str(feature): census_data_ppl[feature].ix[region, :]
for feature in config['people-features']}
for region in regions}
number_households = {region: random_hh_feature.ix[region, :].sum() for region in regions}
household_counter = count(start=1, step=1)
household_ids = {region: [household_counter.__next__()
for _ in range(number_households[region])]
for region in regions}
random_numbers = {region: [random.uniform(0, 1) for _ in range(number_households[region])]
for region in regions}
hh_chunk_size = int(NUMBER_HOUSEHOLDS_HARINGEY / config['number-processes'] / 4)
with Pool(config['number-processes']) as pool:
hipf_params = ((seed, controls_hh[region], controls_ppl[region], region)
for region in regions)
household_weights = dict(tqdm(
pool.imap_unordered(uo.synthpop.run_hipf, hipf_params),
total=len(regions),
desc='Hierarchical IPF '
))
household_params = ((region, seed, household_weights[region],
random_numbers[region], household_ids[region])
for region in regions)
households = list(chain(*tqdm(
pool.imap_unordered(uo.synthpop.sample_households, household_params),
total=len(regions),
desc='Sampling households '
)))
household_chunks = [households[i:i + hh_chunk_size]
for i in range(0, len(households), hh_chunk_size)]
citizens = list(chain(*tqdm(
pool.imap_unordered(
uo.synthpop.sample_citizen,
((households, seed) for households in household_chunks)
),
total=math.ceil(NUMBER_HOUSEHOLDS_HARINGEY / hh_chunk_size),
desc='Sampling individuals '
)))
assert len(households) == NUMBER_HOUSEHOLDS_HARINGEY
assert abs(len(citizens) - NUMBER_USUAL_RESIDENTS_HARINGEY) < 2000
return households, citizens
def _df_to_input_db(df, table_name, path_to_db):
disk_engine = sqlalchemy.create_engine('sqlite:///{}'.format(path_to_db))
df.to_sql(name=table_name, con=disk_engine)
def _write_dwellings_table(households, config, path_to_db):
df = pd.DataFrame(
index=[household.id for household in households],
data={
'thermalMassCapacity': config['dwelling']['thermal-mass-capacity'],
'thermalMassArea': config['dwelling']['thermal-mass-area'],
'floorArea': config['dwelling']['floor-area'],
'roomHeight': config['dwelling']['room-height'],
'windowToWallRatio': config['dwelling']['window-to-wall-ratio'],
'uWall': config['dwelling']['u-value-wall'],
'uRoof': config['dwelling']['u-value-roof'],
'uFloor': config['dwelling']['u-value-floor'],
'uWindow': config['dwelling']['u-value-window'],
'transmissionAdjustmentGround': config['dwelling']['transmission-adjustment-ground'],
'naturalVentilationRate': config['dwelling']['natural-ventilation-rate'],
'maxHeatingPower': config['dwelling']['max-heating-power'],
'initialTemperature': config['dwelling']['initial-temperature'],
'heatingControlStrategy': config['dwelling']['heating-control-strategy'],
'region': [household.region for household in households]
}
)
_df_to_input_db(df, uo.DWELLINGS_TABLE_NAME, path_to_db)
def _write_citizens_table(citizens, path_to_db):
df = pd.DataFrame(
index=list(range(len(citizens))),
data={
'markovChainId': [citizen.markovId for citizen in citizens],
'dwellingId': [citizen.householdId for citizen in citizens],
'initialActivity': [str(citizen.initialActivity) for citizen in citizens],
'activeMetabolicRate': [citizen.activeMetabolicRate for citizen in citizens],
'passiveMetabolicRate': [citizen.passiveMetabolicRate for citizen in citizens],
'randomSeed': [citizen.randomSeed for citizen in citizens]
}
)
_df_to_input_db(df, uo.PEOPLE_TABLE_NAME, path_to_db)
def _write_markov_chains(markov_chains, path_to_db):
markov_index = pd.Series(
{
feature_id: "markov{}".format(feature_id)
for feature_id in [uo.feature_id(key) for key in markov_chains.keys()]
},
name='tablename'
)
_df_to_input_db(markov_index, uo.MARKOV_CHAIN_INDEX_TABLE_NAME, path_to_db)
for feature_combination, markov_chain in markov_chains.items():
df = markov_chain.to_dataframe()
df.fromActivity = [str(x) for x in df.fromActivity]
df.toActivity = [str(x) for x in df.toActivity]
_df_to_input_db(df, markov_index[uo.feature_id(feature_combination)], path_to_db)
def _write_temperature_table(config, path_to_db):
def date_parser(date, time):
month, day, year = [int(x) for x in date.split('/')]
hour, minute = [int(x) for x in time.split(':')]
return datetime(year, month, day, hour - 1, minute)
temperature = pd.read_csv(
MIDAS_DATABASE_PATH,
skiprows=[0],
header=0,
parse_dates=[['Date (MM/DD/YYYY)', 'Time (HH:MM)']],
date_parser=date_parser,
index_col=[0]
)
temperature.rename(columns={'Dry-bulb (C)': 'temperature'}, inplace=True)
temperature.index.name = 'index'
df = temperature['temperature'].resample(config['time-step-size']).ffill()
_df_to_input_db(df, uo.ENVIRONMENT_TABLE_NAME, path_to_db)
def _write_simulation_parameter_table(config, path_to_db):
_df_to_input_db(
table_name=uo.PARAMETERS_TABLE_NAME,
df=pd.DataFrame(
index=[1],
data={
'initialDatetime': config['start-time'],
'timeStepSize_in_min': config['time-step-size-minutes'],
'numberTimeSteps': config['number-time-steps'],
'setPointWhileHome': config['set-point-while-home'],
'setPointWhileAsleep': config['set-point-while-asleep'],
'wakeUpTime': config['wake-up-time'],
'leaveHomeTime': config['leave-home-time'],
'comeHomeTime': config['come-home-time'],
'bedTime': config['bed-time'],
'logTemperature': config['log-temperature'],
'logThermalPower': config['log-thermal-power'],
'logActivity': config['log-activity']
}
),
path_to_db=path_to_db
)
if __name__ == '__main__':
simulation_input()
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/utils/tests/test_class_weight.py | 50 | 13151 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| mit |
ajrichards/notebook | python/kernel-density.py | 4 | 2213 | #!/usr/bin/env python
"""
https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
"""
import numpy as np
from scipy.stats.distributions import norm
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity
from scipy.stats import gaussian_kde
from statsmodels.nonparametric.kde import KDEUnivariate
from statsmodels.nonparametric.kernel_density import KDEMultivariate
from scipy.stats import gaussian_kde
def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scipy"""
# Note that scipy weights its bandwidth by the covariance of the
# input data. To make the results comparable to the other methods,
# we divide the bandwidth by the sample standard deviation here.
kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)
return kde.evaluate(x_grid)
def kde_statsmodels_u(x, x_grid, bandwidth=0.2, **kwargs):
"""Univariate Kernel Density Estimation with Statsmodels"""
kde = KDEUnivariate(x)
kde.fit(bw=bandwidth, **kwargs)
return kde.evaluate(x_grid)
def kde_statsmodels_m(x, x_grid, bandwidth=0.2, **kwargs):
"""Multivariate Kernel Density Estimation with Statsmodels"""
kde = KDEMultivariate(x, bw=bandwidth * np.ones_like(x),
var_type='c', **kwargs)
return kde.pdf(x_grid)
def kde_sklearn(x, x_grid, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scikit-learn"""
kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)
kde_skl.fit(x[:, np.newaxis])
# score_samples() returns the log-likelihood of the samples
log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis])
return np.exp(log_pdf)
## plot
fig, ax = plt.subplots()
x_grid = np.linspace(-4.5, 3.5, 1000)
np.random.seed(0)
x = np.concatenate([norm(-1, 1.).rvs(400),
norm(1, 0.3).rvs(100)])
for bandwidth in [0.1, 0.3, 1.0]:
ax.plot(x_grid, kde_sklearn(x, x_grid, bandwidth=bandwidth),
label='bw={0}'.format(bandwidth), linewidth=3, alpha=0.5)
ax.hist(x, 30, fc='gray', histtype='stepfilled', alpha=0.3, normed=True)
ax.set_xlim(-4.5, 3.5)
ax.legend(loc='upper left')
plt.show()
| bsd-3-clause |
zuku1985/scikit-learn | examples/gaussian_process/plot_gpc.py | 103 | 3927 | """
====================================================================
Probabilistic predictions with Gaussian process classification (GPC)
====================================================================
This example illustrates the predicted probability of GPC for an RBF kernel
with different choices of the hyperparameters. The first figure shows the
predicted probability of GPC with arbitrarily chosen hyperparameters and with
the hyperparameters corresponding to the maximum log-marginal-likelihood (LML).
While the hyperparameters chosen by optimizing LML have a considerable larger
LML, they perform slightly worse according to the log-loss on test data. The
figure shows that this is because they exhibit a steep change of the class
probabilities at the class boundaries (which is good) but have predicted
probabilities close to 0.5 far away from the class boundaries (which is bad)
This undesirable effect is caused by the Laplace approximation used
internally by GPC.
The second figure shows the log-marginal-likelihood for different choices of
the kernel's hyperparameters, highlighting the two choices of the
hyperparameters used in the first figure by black dots.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# Generate data
train_size = 50
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 100)[:, np.newaxis]
y = np.array(X[:, 0] > 2.5, dtype=int)
# Specify Gaussian Processes with fixed and optimized hyperparameters
gp_fix = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0),
optimizer=None)
gp_fix.fit(X[:train_size], y[:train_size])
gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0))
gp_opt.fit(X[:train_size], y[:train_size])
print("Log Marginal Likelihood (initial): %.3f"
% gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta))
print("Log Marginal Likelihood (optimized): %.3f"
% gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta))
print("Accuracy: %.3f (initial) %.3f (optimized)"
% (accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])),
accuracy_score(y[:train_size], gp_opt.predict(X[:train_size]))))
print("Log-loss: %.3f (initial) %.3f (optimized)"
% (log_loss(y[:train_size], gp_fix.predict_proba(X[:train_size])[:, 1]),
log_loss(y[:train_size], gp_opt.predict_proba(X[:train_size])[:, 1])))
# Plot posteriors
plt.figure(0)
plt.scatter(X[:train_size, 0], y[:train_size], c='k', label="Train data")
plt.scatter(X[train_size:, 0], y[train_size:], c='g', label="Test data")
X_ = np.linspace(0, 5, 100)
plt.plot(X_, gp_fix.predict_proba(X_[:, np.newaxis])[:, 1], 'r',
label="Initial kernel: %s" % gp_fix.kernel_)
plt.plot(X_, gp_opt.predict_proba(X_[:, np.newaxis])[:, 1], 'b',
label="Optimized kernel: %s" % gp_opt.kernel_)
plt.xlabel("Feature")
plt.ylabel("Class 1 probability")
plt.xlim(0, 5)
plt.ylim(-0.25, 1.5)
plt.legend(loc="best")
# Plot LML landscape
plt.figure(1)
theta0 = np.logspace(0, 8, 30)
theta1 = np.logspace(-1, 1, 29)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp_opt.log_marginal_likelihood(np.log([Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
plt.plot(np.exp(gp_fix.kernel_.theta)[0], np.exp(gp_fix.kernel_.theta)[1],
'ko', zorder=10)
plt.plot(np.exp(gp_opt.kernel_.theta)[0], np.exp(gp_opt.kernel_.theta)[1],
'ko', zorder=10)
plt.pcolor(Theta0, Theta1, LML)
plt.xscale("log")
plt.yscale("log")
plt.colorbar()
plt.xlabel("Magnitude")
plt.ylabel("Length-scale")
plt.title("Log-marginal-likelihood")
plt.show()
| bsd-3-clause |
MCGallaspy/pymc3 | pymc3/examples/lasso_block_update.py | 14 | 1676 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# Sometimes, it is very useful to update a set of parameters together. For example, variables that are highly correlated are often good to update together. In PyMC 3 block updating is simple, as example will demonstrate.
#
# Here we have a LASSO regression model where the two coefficients are strongly correlated. Normally, we would define the coefficient parameters as a single random variable, but here we define them separately to show how to do block updates.
#
# First we generate some fake data.
# <codecell>
from matplotlib.pylab import *
from pymc3 import *
import numpy as np
d = np.random.normal(size=(3, 30))
d1 = d[0] + 4
d2 = d[1] + 4
yd = .2 * d1 + .3 * d2 + d[2]
# <markdowncell>
# Then define the random variables.
# <codecell>
with Model() as model:
s = Exponential('s', 1)
m1 = Laplace('m1', 0, 100)
m2 = Laplace('m2', 0, 100)
p = d1 * m1 + d2 * m2
y = Normal('y', p, s ** -2, observed=yd)
# <markdowncell>
# For most samplers, including Metropolis and HamiltonianMC, simply pass a
# list of variables to sample as a block. This works with both scalar and
# array parameters.
# <codecell>
with model:
step1 = Metropolis([m1, m2], blocked=True)
step2 = Metropolis([s], proposal_dist=LaplaceProposal)
def run(n=5000):
if n == "short":
n = 300
with model:
start = find_MAP()
trace = sample(n, [step1, step2], start)
dh = fn(hessian_diag(model.logpt))
# <codecell>
traceplot(trace)
# <codecell>
hexbin(trace[m1], trace[m2], gridsize=50)
# <codecell>
if __name__ == '__main__':
run()
| apache-2.0 |
nhuntwalker/astroML | book_figures/chapter3/fig_fisher_f_distribution.py | 3 | 2353 | """
Example of Fisher's F distribution
------------------------------------
Figure 3.16.
This shows an example of Fisher's F distribution with various parameters.
We'll generate the distribution using::
dist = scipy.stats.f(...)
Where ... should be filled in with the desired distribution parameters
Once we have defined the distribution parameters in this way, these
distribution objects have many useful methods; for example:
* ``dist.pmf(x)`` computes the Probability Mass Function at values ``x``
in the case of discrete distributions
* ``dist.pdf(x)`` computes the Probability Density Function at values ``x``
in the case of continuous distributions
* ``dist.rvs(N)`` computes ``N`` random variables distributed according
to the given distribution
Many further options exist; refer to the documentation of ``scipy.stats``
for more details.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy.stats import f as fisher_f
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Define the distribution parameters to be plotted
mu = 0
d1_values = [1, 5, 2, 10]
d2_values = [1, 2, 5, 50]
linestyles = ['-', '--', ':', '-.']
x = np.linspace(0, 5, 1001)[1:]
fig, ax = plt.subplots(figsize=(5, 3.75))
for (d1, d2, ls) in zip(d1_values, d2_values, linestyles):
dist = fisher_f(d1, d2, mu)
plt.plot(x, dist.pdf(x), ls=ls, c='black',
label=r'$d_1=%i,\ d_2=%i$' % (d1, d2))
plt.xlim(0, 4)
plt.ylim(0.0, 1.0)
plt.xlabel('$x$')
plt.ylabel(r'$p(x|d_1, d_2)$')
plt.title("Fisher's Distribution")
plt.legend()
plt.show()
| bsd-2-clause |
fivejjs/pyhsmm | pyhsmm/basic/abstractions.py | 3 | 4361 | from __future__ import division
import abc
import numpy as np
from matplotlib import pyplot as plt
from pybasicbayes.abstractions import *
import pyhsmm
from pyhsmm.util.stats import flattendata, sample_discrete, sample_discrete_from_log, combinedata
from pyhsmm.util.general import rcumsum
class DurationDistribution(Distribution):
__metaclass__ = abc.ABCMeta
# in addition to the methods required by Distribution, we also require a
# log_sf implementation
@abc.abstractmethod
def log_sf(self,x):
'''
log survival function, defined by log_sf(x) = log(P[X \gt x]) =
log(1-cdf(x)) where cdf(x) = P[X \leq x]
'''
pass
def log_pmf(self,x):
return self.log_likelihood(x)
def expected_log_pmf(self,x):
return self.expected_log_likelihood(x)
# default implementations below
def pmf(self,x):
return np.exp(self.log_pmf(x))
def rvs_given_greater_than(self,x):
tail = self.log_sf(x)
# if numerical underflow, return anything sensible
if np.isinf(tail):
return x+1
# if big tail, rejection sample
elif np.exp(tail) > 0.1:
y = self.rvs(25)
while not np.any(y > x):
y = self.rvs(25)
return y[y > x][0]
# otherwise, sample directly using the pmf and sf
else:
u = np.random.rand()
y = x
while u > 0:
u -= np.exp(self.log_pmf(y) - tail)
y += 1
return y
def rvs_given_less_than(self,x,num):
pmf = self.pmf(np.arange(1,x))
return sample_discrete(pmf,num)+1
def expected_log_sf(self,x):
x = np.atleast_1d(x).astype('int32')
assert x.ndim == 1
inf = max(2*x.max(),2*1000) # approximately infinity, we hope
return rcumsum(self.expected_log_pmf(np.arange(1,inf)),strict=True)[x]
def resample_with_censoring(self,data=[],censored_data=[]):
'''
censored_data is full of observations that were censored, meaning a
value of x really could have been anything >= x, so this method samples
them out to be at least that large
'''
filled_in = self._uncensor_data(censored_data)
return self.resample(data=combinedata((data,filled_in)))
def _uncensor_data(self,censored_data):
# TODO numpy-vectorize this!
if len(censored_data) > 0:
if not isinstance(censored_data,list):
filled_in = np.asarray([self.rvs_given_greater_than(x-1)
for x in censored_data])
else:
filled_in = np.asarray([self.rvs_given_greater_than(x-1)
for xx in censored_data for x in xx])
else:
filled_in = []
return filled_in
def resample_with_censoring_and_truncation(self,data=[],censored_data=[],left_truncation_level=None):
filled_in = self._uncensor_data(censored_data)
if left_truncation_level is not None and left_truncation_level > 1:
norm = self.pmf(np.arange(1,left_truncation_level)).sum()
num_rejected = np.random.geometric(1-norm)-1
rejected_observations = self.rvs_given_less_than(left_truncation_level,num_rejected) \
if num_rejected > 0 else []
else:
rejected_observations = []
self.resample(data=combinedata((data,filled_in,rejected_observations)))
@property
def mean(self):
# TODO this is dumb, why is this here?
trunc = 500
while self.log_sf(trunc) > -20:
trunc *= 1.5
return np.arange(1,trunc+1).dot(self.pmf(np.arange(1,trunc+1)))
def plot(self,data=None,color='b',**kwargs):
data = flattendata(data) if data is not None else None
try:
tmax = np.where(np.exp(self.log_sf(np.arange(1,1000))) < 1e-3)[0][0]
except IndexError:
tmax = 2*self.rvs(1000).mean()
tmax = max(tmax,data.max()) if data is not None else tmax
t = np.arange(1,tmax+1)
plt.plot(t,self.pmf(t),color=color)
if data is not None:
if len(data) > 1:
plt.hist(data,bins=t-0.5,color=color,normed=len(set(data)) > 1)
else:
plt.hist(data,bins=t-0.5,color=color)
| mit |
q1ang/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
gotomypc/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
FrankTsui/robust_rescaled_svm | robust_rescaled_svm.py | 1 | 6561 | '''
this algorithm is presented in:
Guibiao Xu, Zheng Cao, Bao-Gang Hu and Jose Principe, Robust support vector machines based on the
rescaled hinge loss, Pattern Recognition, 2017.
'''
import numpy as np
from sklearn.svm import SVC
from collections import OrderedDict
from config import config
class rsvm:
def __init__(self, config):
'''
config: parameter settings
'''
self.config = config
def _create_svm_object(self):
'''
create an svm object according to kernel type
'''
if self.config['kernel'] == 'linear':
return SVC(C = self.config['C'], kernel = self.config['kernel'], \
shrinking = self.config['shrinking'], probability = self.config['probability'], \
tol = self.config['tol'], cache_size = self.config['cache_size'], \
class_weight = self.config['class_weight'], verbose = self.config['verbose'], \
max_iter = self.config['max_iter'], decision_function_shape = self.config['decision_function_shape'], \
random_state = self.config['random_state'])
elif self.config['kernel'] == 'poly':
return SVC(C = self.config['C'], kernel = self.config['kernel'], \
degree = self.config['degree'], gamma = self.config['gamma'], coef0 = self.config['coef0'], \
shrinking = self.config['shrinking'], probability = self.config['probability'], \
tol = self.config['tol'], cache_size = self.config['cache_size'], \
class_weight = self.config['class_weight'], verbose = self.config['verbose'], \
max_iter = self.config['max_iter'], decision_function_shape = self.config['decision_function_shape'], \
random_state = self.config['random_state'])
elif self.config['kernel'] == 'rbf':
return SVC(C = self.config['C'], kernel = self.config['kernel'], gamma = self.config['gamma'], \
shrinking = self.config['shrinking'], probability = self.config['probability'], \
tol = self.config['tol'], cache_size = self.config['cache_size'], \
class_weight = self.config['class_weight'], verbose = self.config['verbose'], \
max_iter = self.config['max_iter'], decision_function_shape = self.config['decision_function_shape'], \
random_state = self.config['random_state'])
elif self.config['kernel'] == 'sigmoid':
return SVC(C = self.config['C'], kernel = self.config['kernel'], \
gamma = self.config['gamma'], coef0 = self.config['coef0'], \
shrinking = self.config['shrinking'], probability = self.config['probability'], \
tol = self.config['tol'], cache_size = self.config['cache_size'], \
class_weight = self.config['class_weight'], verbose = self.config['verbose'], \
max_iter = self.config['max_iter'], decision_function_shape = self.config['decision_function_shape'], \
random_state = self.config['random_state'])
def fit(self, train_fea, train_gnd):
'''
training method
train_fea: array like, shape = (smp_num, fea_num)
train_gnd: array like, shape = (smp_num,), -1 and +1
'''
# check elements in train_gnd, the element should be -1 or +1
assert set(train_gnd) == set([-1, 1])
train_num = train_fea.shape[0]
# save sample weights across iterations
self.smp_weights_mat = np.zeros(shape = (self.config['rsvm_iter_num'], train_num))
# save svm models across iterations
self.svmmodel_dict = OrderedDict()
# save support vector ratios across iterations
self.sv_ratio_vec = np.zeros(shape = (self.config['rsvm_iter_num'],))
self.smp_weights_mat[0] = self.config['rsvm_v0']
for iter_i in range(self.config['rsvm_iter_num']):
self.svmmodel_dict[iter_i] = self._create_svm_object()
self.svmmodel_dict[iter_i].fit(train_fea, train_gnd, sample_weight = self.smp_weights_mat[iter_i])
self.sv_ratio_vec[iter_i] = np.float64(self.svmmodel_dict[iter_i].n_support_.sum()) / train_num * 100
# update weights of samples
if iter_i == (self.config['rsvm_iter_num'] - 1):
break
else:
tmp_outputs = self.svmmodel_dict[iter_i].decision_function(train_fea)
tmp_hinge_loss = np.maximum(0.0, 1.0 - tmp_outputs * train_gnd)
# weights update function
self.smp_weights_mat[iter_i + 1] = np.exp(-self.config['rsvm_eta'] * tmp_hinge_loss)
self.smp_weights_mat = self.smp_weights_mat.transpose()
def predict(self, test_fea, last_model_flag = True):
'''
prediction function
test_fea: array like, shape = (smp_num, fea_num)
last_model_flag: whether only use the last svm model or not
return
pred: array like, shape = (smp_num, iter_num)
'''
if last_model_flag:
return self.svmmodel_dict[self.config['rsvm_iter_num'] - 1].predict(test_fea)
else:
test_num = test_fea.shape[0]
pred = np.zeros(shape = (test_num, self.config['rsvm_iter_num']), dtype = np.int32)
for iter_i in range(self.config['rsvm_iter_num']):
pred[:, iter_i] = self.svmmodel_dict[iter_i].predict(test_fea)
return pred
def score(self, test_fea, test_gnd, last_model_flag = True):
'''
return accuracy on the given test_fea and test_gnd
test_fea: array like, shape = (smp_num, fea_num)
test_gnd: array like, shape = (smp_num,), -1 and +1
last_model_flag: whether only use the last svm model or not
return
accu_vec: a vector
'''
if last_model_flag:
return self.svmmodel_dict[self.config['rsvm_iter_num'] - 1].score(test_fea, test_gnd) * 100
else:
accu_vec = np.zeros(shape = (self.config['rsvm_iter_num'],))
for iter_i in range(self.config['rsvm_iter_num']):
accu_vec[iter_i] = self.svmmodel_dict[iter_i].score(test_fea, test_gnd) * 100
return accu_vec
def decision_function(self, test_fea, last_model_flag = True):
'''
svm outputs
test_fea: array like, shape = (smp_num, fea_num)
last_model_flag: whether only use the last svm model or not
return
distance: array like, shape = (smp_num, iter_num)
'''
if last_model_flag:
return self.svmmodel_dict[self.config['rsvm_iter_num'] - 1].decision_function(test_fea)
else:
test_num = test_fea.shape[0]
distance = np.zeros(shape = (test_num, self.config['rsvm_iter_num']), dtype = np.float64)
for iter_i in range(self.config['rsvm_iter_num']):
distance[:, iter_i] = self.svmmodel_dict[iter_i].decision_function(test_fea)
return distance
if __name__ == '__main__':
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [-1] * 10 + [1] * 10
train_num = 20
config['rsvm_v0'] = np.ones(shape = (20, ), dtype = np.float64)
config['rsvm_eta'] = 0.5
rsvm_obj = rsvm(config)
rsvm_obj.fit(X, y)
print '#### sv ratio vector ####'
print rsvm_obj.sv_ratio_vec
print '#### smp_weights_mat ####'
print rsvm_obj.smp_weights_mat
| apache-2.0 |
CallaJun/hackprince | indico/matplotlib/tests/test_transforms.py | 9 | 19984 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import unittest
from nose.tools import assert_equal, assert_raises
import numpy.testing as np_test
from numpy.testing import assert_almost_equal
from matplotlib.transforms import Affine2D, BlendedGenericTransform
from matplotlib.path import Path
from matplotlib.scale import LogScale
from matplotlib.testing.decorators import cleanup, image_comparison
import numpy as np
import matplotlib.transforms as mtrans
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.patches as mpatches
@cleanup
def test_non_affine_caching():
class AssertingNonAffineTransform(mtrans.Transform):
"""
This transform raises an assertion error when called when it
shouldn't be and self.raise_on_transform is True.
"""
input_dims = output_dims = 2
is_affine = False
def __init__(self, *args, **kwargs):
mtrans.Transform.__init__(self, *args, **kwargs)
self.raise_on_transform = False
self.underlying_transform = mtrans.Affine2D().scale(10, 10)
def transform_path_non_affine(self, path):
if self.raise_on_transform:
assert False, ('Invalidated affine part of transform '
'unnecessarily.')
return self.underlying_transform.transform_path(path)
transform_path = transform_path_non_affine
def transform_non_affine(self, path):
if self.raise_on_transform:
assert False, ('Invalidated affine part of transform '
'unnecessarily.')
return self.underlying_transform.transform(path)
transform = transform_non_affine
my_trans = AssertingNonAffineTransform()
ax = plt.axes()
plt.plot(list(xrange(10)), transform=my_trans + ax.transData)
plt.draw()
# enable the transform to raise an exception if it's non-affine transform
# method is triggered again.
my_trans.raise_on_transform = True
ax.transAxes.invalidate()
plt.draw()
@cleanup
def test_external_transform_api():
class ScaledBy(object):
def __init__(self, scale_factor):
self._scale_factor = scale_factor
def _as_mpl_transform(self, axes):
return mtrans.Affine2D().scale(self._scale_factor) + axes.transData
ax = plt.axes()
line, = plt.plot(list(xrange(10)), transform=ScaledBy(10))
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
# assert that the top transform of the line is the scale transform.
np.testing.assert_allclose(line.get_transform()._a.get_matrix(),
mtrans.Affine2D().scale(10).get_matrix())
@image_comparison(baseline_images=['pre_transform_data'])
def test_pre_transform_plotting():
# a catch-all for as many as possible plot layouts which handle pre-transforming the data
# NOTE: The axis range is important in this plot. It should be x10 what the data suggests it should be
ax = plt.axes()
times10 = mtrans.Affine2D().scale(10)
ax.contourf(np.arange(48).reshape(6, 8), transform=times10 + ax.transData)
ax.pcolormesh(np.linspace(0, 4, 7),
np.linspace(5.5, 8, 9),
np.arange(48).reshape(8, 6),
transform=times10 + ax.transData)
ax.scatter(np.linspace(0, 10), np.linspace(10, 0),
transform=times10 + ax.transData)
x = np.linspace(8, 10, 20)
y = np.linspace(1, 5, 20)
u = 2*np.sin(x) + np.cos(y[:, np.newaxis])
v = np.sin(x) - np.cos(y[:, np.newaxis])
df = 25. / 30. # Compatibility factor for old test image
ax.streamplot(x, y, u, v, transform=times10 + ax.transData,
density=(df, df), linewidth=u**2 + v**2)
# reduce the vector data down a bit for barb and quiver plotting
x, y = x[::3], y[::3]
u, v = u[::3, ::3], v[::3, ::3]
ax.quiver(x, y + 5, u, v, transform=times10 + ax.transData)
ax.barbs(x - 3, y + 5, u**2, v**2, transform=times10 + ax.transData)
@cleanup
def test_contour_pre_transform_limits():
ax = plt.axes()
xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20))
ax.contourf(xs, ys, np.log(xs * ys), transform=mtrans.Affine2D().scale(0.1) + ax.transData)
expected = np.array([[ 1.5 , 1.24],
[ 2. , 1.25]])
assert_almost_equal(expected, ax.dataLim.get_points())
@cleanup
def test_pcolor_pre_transform_limits():
# Based on test_contour_pre_transform_limits()
ax = plt.axes()
xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20))
ax.pcolor(xs, ys, np.log(xs * ys), transform=mtrans.Affine2D().scale(0.1) + ax.transData)
expected = np.array([[ 1.5 , 1.24],
[ 2. , 1.25]])
assert_almost_equal(expected, ax.dataLim.get_points())
@cleanup
def test_pcolormesh_pre_transform_limits():
# Based on test_contour_pre_transform_limits()
ax = plt.axes()
xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20))
ax.pcolormesh(xs, ys, np.log(xs * ys), transform=mtrans.Affine2D().scale(0.1) + ax.transData)
expected = np.array([[ 1.5 , 1.24],
[ 2. , 1.25]])
assert_almost_equal(expected, ax.dataLim.get_points())
def test_Affine2D_from_values():
points = np.array([ [0,0],
[10,20],
[-1,0],
])
t = mtrans.Affine2D.from_values(1,0,0,0,0,0)
actual = t.transform(points)
expected = np.array( [[0,0],[10,0],[-1,0]] )
assert_almost_equal(actual,expected)
t = mtrans.Affine2D.from_values(0,2,0,0,0,0)
actual = t.transform(points)
expected = np.array( [[0,0],[0,20],[0,-2]] )
assert_almost_equal(actual,expected)
t = mtrans.Affine2D.from_values(0,0,3,0,0,0)
actual = t.transform(points)
expected = np.array( [[0,0],[60,0],[0,0]] )
assert_almost_equal(actual,expected)
t = mtrans.Affine2D.from_values(0,0,0,4,0,0)
actual = t.transform(points)
expected = np.array( [[0,0],[0,80],[0,0]] )
assert_almost_equal(actual,expected)
t = mtrans.Affine2D.from_values(0,0,0,0,5,0)
actual = t.transform(points)
expected = np.array( [[5,0],[5,0],[5,0]] )
assert_almost_equal(actual,expected)
t = mtrans.Affine2D.from_values(0,0,0,0,0,6)
actual = t.transform(points)
expected = np.array( [[0,6],[0,6],[0,6]] )
assert_almost_equal(actual,expected)
def test_clipping_of_log():
# issue 804
M,L,C = Path.MOVETO, Path.LINETO, Path.CLOSEPOLY
points = [ (0.2, -99), (0.4, -99), (0.4, 20), (0.2, 20), (0.2, -99) ]
codes = [ M, L, L, L, C ]
path = Path(points, codes)
# something like this happens in plotting logarithmic histograms
trans = BlendedGenericTransform(Affine2D(),
LogScale.Log10Transform('clip'))
tpath = trans.transform_path_non_affine(path)
result = tpath.iter_segments(trans.get_affine(),
clip=(0, 0, 100, 100),
simplify=False)
tpoints, tcodes = list(zip(*result))
# Because y coordinate -99 is outside the clip zone, the first
# line segment is effectively removed. That means that the closepoly
# operation must be replaced by a move to the first point.
assert np.allclose(tcodes, [ M, M, L, L, L ])
class NonAffineForTest(mtrans.Transform):
"""
A class which looks like a non affine transform, but does whatever
the given transform does (even if it is affine). This is very useful
for testing NonAffine behaviour with a simple Affine transform.
"""
is_affine = False
output_dims = 2
input_dims = 2
def __init__(self, real_trans, *args, **kwargs):
self.real_trans = real_trans
r = mtrans.Transform.__init__(self, *args, **kwargs)
def transform_non_affine(self, values):
return self.real_trans.transform(values)
def transform_path_non_affine(self, path):
return self.real_trans.transform_path(path)
class BasicTransformTests(unittest.TestCase):
def setUp(self):
self.ta1 = mtrans.Affine2D(shorthand_name='ta1').rotate(np.pi / 2)
self.ta2 = mtrans.Affine2D(shorthand_name='ta2').translate(10, 0)
self.ta3 = mtrans.Affine2D(shorthand_name='ta3').scale(1, 2)
self.tn1 = NonAffineForTest(mtrans.Affine2D().translate(1, 2), shorthand_name='tn1')
self.tn2 = NonAffineForTest(mtrans.Affine2D().translate(1, 2), shorthand_name='tn2')
self.tn3 = NonAffineForTest(mtrans.Affine2D().translate(1, 2), shorthand_name='tn3')
# creates a transform stack which looks like ((A, (N, A)), A)
self.stack1 = (self.ta1 + (self.tn1 + self.ta2)) + self.ta3
# creates a transform stack which looks like (((A, N), A), A)
self.stack2 = self.ta1 + self.tn1 + self.ta2 + self.ta3
# creates a transform stack which is a subset of stack2
self.stack2_subset = self.tn1 + self.ta2 + self.ta3
# when in debug, the transform stacks can produce dot images:
# self.stack1.write_graphviz(file('stack1.dot', 'w'))
# self.stack2.write_graphviz(file('stack2.dot', 'w'))
# self.stack2_subset.write_graphviz(file('stack2_subset.dot', 'w'))
def test_transform_depth(self):
assert_equal(self.stack1.depth, 4)
assert_equal(self.stack2.depth, 4)
assert_equal(self.stack2_subset.depth, 3)
def test_left_to_right_iteration(self):
stack3 = (self.ta1 + (self.tn1 + (self.ta2 + self.tn2))) + self.ta3
# stack3.write_graphviz(file('stack3.dot', 'w'))
target_transforms = [stack3,
(self.tn1 + (self.ta2 + self.tn2)) + self.ta3,
(self.ta2 + self.tn2) + self.ta3,
self.tn2 + self.ta3,
self.ta3,
]
r = [rh for _, rh in stack3._iter_break_from_left_to_right()]
self.assertEqual(len(r), len(target_transforms))
for target_stack, stack in zip(target_transforms, r):
self.assertEqual(target_stack, stack)
def test_transform_shortcuts(self):
self.assertEqual(self.stack1 - self.stack2_subset, self.ta1)
self.assertEqual(self.stack2 - self.stack2_subset, self.ta1)
assert_equal((self.stack2_subset - self.stack2),
self.ta1.inverted(),
)
assert_equal((self.stack2_subset - self.stack2).depth, 1)
assert_raises(ValueError, self.stack1.__sub__, self.stack2)
aff1 = self.ta1 + (self.ta2 + self.ta3)
aff2 = self.ta2 + self.ta3
self.assertEqual(aff1 - aff2, self.ta1)
self.assertEqual(aff1 - self.ta2, aff1 + self.ta2.inverted())
self.assertEqual(self.stack1 - self.ta3, self.ta1 + (self.tn1 + self.ta2))
self.assertEqual(self.stack2 - self.ta3, self.ta1 + self.tn1 + self.ta2)
self.assertEqual((self.ta2 + self.ta3) - self.ta3 + self.ta3, self.ta2 + self.ta3)
def test_contains_branch(self):
r1 = (self.ta2 + self.ta1)
r2 = (self.ta2 + self.ta1)
self.assertEqual(r1, r2)
self.assertNotEqual(r1, self.ta1)
self.assertTrue(r1.contains_branch(r2))
self.assertTrue(r1.contains_branch(self.ta1))
self.assertFalse(r1.contains_branch(self.ta2))
self.assertFalse(r1.contains_branch((self.ta2 + self.ta2)))
self.assertEqual(r1, r2)
self.assertTrue(self.stack1.contains_branch(self.ta3))
self.assertTrue(self.stack2.contains_branch(self.ta3))
self.assertTrue(self.stack1.contains_branch(self.stack2_subset))
self.assertTrue(self.stack2.contains_branch(self.stack2_subset))
self.assertFalse(self.stack2_subset.contains_branch(self.stack1))
self.assertFalse(self.stack2_subset.contains_branch(self.stack2))
self.assertTrue(self.stack1.contains_branch((self.ta2 + self.ta3)))
self.assertTrue(self.stack2.contains_branch((self.ta2 + self.ta3)))
self.assertFalse(self.stack1.contains_branch((self.tn1 + self.ta2)))
def test_affine_simplification(self):
# tests that a transform stack only calls as much is absolutely necessary
# "non-affine" allowing the best possible optimization with complex
# transformation stacks.
points = np.array([[0, 0], [10, 20], [np.nan, 1], [-1, 0]], dtype=np.float64)
na_pts = self.stack1.transform_non_affine(points)
all_pts = self.stack1.transform(points)
na_expected = np.array([[1., 2.], [-19., 12.],
[np.nan, np.nan], [1., 1.]], dtype=np.float64)
all_expected = np.array([[11., 4.], [-9., 24.],
[np.nan, np.nan], [11., 2.]], dtype=np.float64)
# check we have the expected results from doing the affine part only
np_test.assert_array_almost_equal(na_pts, na_expected)
# check we have the expected results from a full transformation
np_test.assert_array_almost_equal(all_pts, all_expected)
# check we have the expected results from doing the transformation in two steps
np_test.assert_array_almost_equal(self.stack1.transform_affine(na_pts), all_expected)
# check that getting the affine transformation first, then fully transforming using that
# yields the same result as before.
np_test.assert_array_almost_equal(self.stack1.get_affine().transform(na_pts), all_expected)
# check that the affine part of stack1 & stack2 are equivalent (i.e. the optimization
# is working)
expected_result = (self.ta2 + self.ta3).get_matrix()
result = self.stack1.get_affine().get_matrix()
np_test.assert_array_equal(expected_result, result)
result = self.stack2.get_affine().get_matrix()
np_test.assert_array_equal(expected_result, result)
class TestTransformPlotInterface(unittest.TestCase):
def tearDown(self):
plt.close()
def test_line_extent_axes_coords(self):
# a simple line in axes coordinates
ax = plt.axes()
ax.plot([0.1, 1.2, 0.8], [0.9, 0.5, 0.8], transform=ax.transAxes)
np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[np.inf, np.inf], [-np.inf, -np.inf]]))
def test_line_extent_data_coords(self):
# a simple line in data coordinates
ax = plt.axes()
ax.plot([0.1, 1.2, 0.8], [0.9, 0.5, 0.8], transform=ax.transData)
np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[ 0.1, 0.5], [ 1.2, 0.9]]))
def test_line_extent_compound_coords1(self):
# a simple line in data coordinates in the y component, and in axes coordinates in the x
ax = plt.axes()
trans = mtrans.blended_transform_factory(ax.transAxes, ax.transData)
ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans)
np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[ np.inf, -5.], [ -np.inf, 35.]]))
plt.close()
def test_line_extent_predata_transform_coords(self):
# a simple line in (offset + data) coordinates
ax = plt.axes()
trans = mtrans.Affine2D().scale(10) + ax.transData
ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans)
np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[1., -50.], [12., 350.]]))
plt.close()
def test_line_extent_compound_coords2(self):
# a simple line in (offset + data) coordinates in the y component, and in axes coordinates in the x
ax = plt.axes()
trans = mtrans.blended_transform_factory(ax.transAxes, mtrans.Affine2D().scale(10) + ax.transData)
ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans)
np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[ np.inf, -50.], [ -np.inf, 350.]]))
plt.close()
def test_line_extents_affine(self):
ax = plt.axes()
offset = mtrans.Affine2D().translate(10, 10)
plt.plot(list(xrange(10)), transform=offset + ax.transData)
expeted_data_lim = np.array([[0., 0.], [9., 9.]]) + 10
np.testing.assert_array_almost_equal(ax.dataLim.get_points(),
expeted_data_lim)
def test_line_extents_non_affine(self):
ax = plt.axes()
offset = mtrans.Affine2D().translate(10, 10)
na_offset = NonAffineForTest(mtrans.Affine2D().translate(10, 10))
plt.plot(list(xrange(10)), transform=offset + na_offset + ax.transData)
expeted_data_lim = np.array([[0., 0.], [9., 9.]]) + 20
np.testing.assert_array_almost_equal(ax.dataLim.get_points(),
expeted_data_lim)
def test_pathc_extents_non_affine(self):
ax = plt.axes()
offset = mtrans.Affine2D().translate(10, 10)
na_offset = NonAffineForTest(mtrans.Affine2D().translate(10, 10))
pth = mpath.Path(np.array([[0, 0], [0, 10], [10, 10], [10, 0]]))
patch = mpatches.PathPatch(pth, transform=offset + na_offset + ax.transData)
ax.add_patch(patch)
expeted_data_lim = np.array([[0., 0.], [10., 10.]]) + 20
np.testing.assert_array_almost_equal(ax.dataLim.get_points(),
expeted_data_lim)
def test_pathc_extents_affine(self):
ax = plt.axes()
offset = mtrans.Affine2D().translate(10, 10)
pth = mpath.Path(np.array([[0, 0], [0, 10], [10, 10], [10, 0]]))
patch = mpatches.PathPatch(pth, transform=offset + ax.transData)
ax.add_patch(patch)
expeted_data_lim = np.array([[0., 0.], [10., 10.]]) + 10
np.testing.assert_array_almost_equal(ax.dataLim.get_points(),
expeted_data_lim)
def test_line_extents_for_non_affine_transData(self):
ax = plt.axes(projection='polar')
# add 10 to the radius of the data
offset = mtrans.Affine2D().translate(0, 10)
plt.plot(list(xrange(10)), transform=offset + ax.transData)
# the data lim of a polar plot is stored in coordinates
# before a transData transformation, hence the data limits
# are not what is being shown on the actual plot.
expeted_data_lim = np.array([[0., 0.], [9., 9.]]) + [0, 10]
np.testing.assert_array_almost_equal(ax.dataLim.get_points(),
expeted_data_lim)
def test_bbox_intersection():
bbox_from_ext = mtrans.Bbox.from_extents
inter = mtrans.Bbox.intersection
from numpy.testing import assert_array_equal as assert_a_equal
def assert_bbox_eq(bbox1, bbox2):
assert_a_equal(bbox1.bounds, bbox2.bounds)
r1 = bbox_from_ext(0, 0, 1, 1)
r2 = bbox_from_ext(0.5, 0.5, 1.5, 1.5)
r3 = bbox_from_ext(0.5, 0, 0.75, 0.75)
r4 = bbox_from_ext(0.5, 1.5, 1, 2.5)
r5 = bbox_from_ext(1, 1, 2, 2)
# self intersection -> no change
assert_bbox_eq(inter(r1, r1), r1)
# simple intersection
assert_bbox_eq(inter(r1, r2), bbox_from_ext(0.5, 0.5, 1, 1))
# r3 contains r2
assert_bbox_eq(inter(r1, r3), r3)
# no intersection
assert_equal(inter(r1, r4), None)
# single point
assert_bbox_eq(inter(r1, r5), bbox_from_ext(1, 1, 1, 1))
@cleanup
def test_log_transform():
# Tests that the last line runs without exception (previously the
# transform would fail if one of the axes was logarithmic).
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.transData.transform((1,1))
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False)
| lgpl-3.0 |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/testing/__init__.py | 7 | 3942 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from contextlib import contextmanager
from matplotlib.cbook import is_string_like, iterable
from matplotlib import rcParams, rcdefaults, use
def _is_list_like(obj):
"""Returns whether the obj is iterable and not a string"""
return not is_string_like(obj) and iterable(obj)
# stolen from pandas
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None):
"""
Context manager for running code that expects to raise (or not raise)
warnings. Checks that code raises the expected warning and only the
expected warning. Pass ``False`` or ``None`` to check that it does *not*
raise a warning. Defaults to ``exception.Warning``, baseclass of all
Warnings. (basically a wrapper around ``warnings.catch_warnings``).
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not _is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
assert saw_warning, ("Did not see expected warning of class %r."
% expected_warning.__name__)
assert not extra_warnings, ("Caused unexpected warning(s): %r."
% extra_warnings)
def setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
import locale
import warnings
from matplotlib.backends import backend_agg, backend_pdf, backend_svg
try:
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
except locale.Error:
warnings.warn(
"Could not set locale to English/United States. "
"Some date-related tests may fail")
use('Agg', warn=False) # use Agg backend for these tests
# These settings *must* be hardcoded for running the comparison
# tests and are not necessarily the default values as specified in
# rcsetup.py
rcdefaults() # Start with all defaults
rcParams['font.family'] = 'Bitstream Vera Sans'
rcParams['text.hinting'] = False
rcParams['text.hinting_factor'] = 8
# Clear the font caches. Otherwise, the hinting mode can travel
# from one test to another.
backend_agg.RendererAgg._fontd.clear()
backend_pdf.RendererPdf.truetype_font_cache.clear()
backend_svg.RendererSVG.fontd.clear()
| mit |
cwaluga/singularities_dolfin | src/correction.py | 1 | 5217 | #! /usr/bin/env python
"""
Evaluation of fits and correction tools
"""
__author__ = "Christian Waluga ([email protected])"
__copyright__ = "Copyright (c) 2013 %s" % __author__
from dolfin import *
from singular import *
from meshtools import *
import numpy as np
# evaluate fit which was obtained for p=1 and a symmetric mesh with isosceles
# cf. Ruede, Waluga, Wohlmuth 2013 for an explanation
def evaluate_fit(n, angle, dirichlet = True):
fitfunc = lambda c, x: c[0]*(np.exp(-2.0*(x - pi)) - 1.0) + c[1]*(x - pi)
if dirichlet:
lookup = {\
3: [0.0998183980437, 0.189615542703],
4: [0.0555624819392, 0.128041557699],
5: [0.0415019850858, 0.1072128902],
6: [0.0363481781425, 0.0979881012415],
7: [0.0328888599638, 0.0925971779024],
8: [0.0313092655216, 0.0894450110842],
9: [0.0304135897967, 0.0874557266743],
10: [0.0289942470411, 0.0857622163158],
11: [0.027901067039, 0.084485325609],
12: [0.0279439846719, 0.0838604929991],
}
else:
lookup = {\
3: [0.165216485785, 0.212197384825],
4: [0.0857378209084, 0.137467156594],
5: [0.0560465904453, 0.111396584279],
6: [0.044618021932, 0.10018816402],
7: [0.038669805111, 0.0941769068565],
8: [0.0352699798574, 0.0905576657623],
9: [0.0326235916411, 0.0879857131768],
10: [0.0311767624978, 0.0863397219782],
11: [0.0292296008797, 0.0848041056068],
12: [0.0282471423786, 0.0838033295999],
}
return fitfunc(lookup[n], angle)
# create a measure that allows us to assemble for the corner domains only
# this can be used to compute energies etc.
def corner_measure(mesh, corners):
# init mesh connectivity
mesh.init(1)
# create mesh function with marked indices
ncorners = len(corners)
domain = CellFunction('size_t', mesh, ncorners)
for vertex in vertices(mesh):
for i in range(ncorners):
if all(vertex.x(i) == x for i, x in enumerate(corners[i])):
domain.array()[vertex.entities(2)] = i
break
return Measure('dx')[domain]
# defines the correction for the corner domains (probably slower than using varying coefficients)
def correction(u, v, gammas, dx_correction):
corr = 0.0*dot(u,v)*dx
for i in range(0,len(gammas)):
corr -= Constant(gammas[i])*inner(grad(u), grad(v))*dx_correction(i)
return corr
# correct the material parameter at the reentrant corners
def correct_corner_stiffness(function, mesh, corners, gammas):
# init mesh connectivity
mesh.init(1)
# create mesh function with marked indices
for vertex in vertices(mesh):
for i in range(len(corners)):
if all(vertex.x(i) == x for i, x in enumerate(corners[i])):
function.vector()[vertex.entities(2)] *= (1.0-gammas[i])
break
return function
def solve_problem(mesh, corners, gammas, angles, u_exact = None):
# right hand side
f = Constant(1.0)
# correct coefficient
V0 = FunctionSpace(mesh, 'DG', 0)
a = interpolate(Constant(1.0), V0)
a = correct_corner_stiffness(a, mesh, corners, gammas)
# variational form
V = FunctionSpace(mesh, 'Lagrange', 1)
u = TrialFunction(V)
v = TestFunction(V)
a = a*inner(grad(u), grad(v))*dx
L = f*v*dx
# boundary conditions
bcs = [DirichletBC(V, 0.0, 'on_boundary')]
# solve the variational problem
uh = Function(V)
solve(a == L, uh, bcs, \
solver_parameters = { \
"linear_solver": "bicgstab", \
"preconditioner": "ml_amg" \
})
if u_exact is None:
return uh
V_ho = FunctionSpace(mesh, 'Lagrange', 3)
u_ho = interpolate(u_exact, V_ho)
weight = WeightingFunction(corners, angles)
return uh, sqrt(assemble(weight*(u_ho-uh)**2*dx))
# unit test
if __name__ == '__main__':
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
import pylab
pylab.rc("font", family = "serif")
pylab.rc("font", size = 12)
# generate first plot
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.linspace(3, 12, 10)
Y = np.linspace(1.0, 2.0-1e-3, 10)
X, Y = np.meshgrid(X, Y)
Z = np.vectorize(lambda X, Y: evaluate_fit(X, Y*pi, dirichlet = True))(X, Y)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.binary, # choose cm.Spectral for color
linewidth=.5, antialiased=True)
ax.set_zlim(0.0, 0.5)
ax.set_xlabel(r'$n$')
ax.set_ylabel(r'$\theta/\pi$')
ax.set_zlabel(r'$\gamma_{\infty}$')
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
fig.savefig('gammafit3d.pdf')
plt.show()
# generate second plot
X = np.linspace(3, 7, 5)
Y = np.linspace(1.0, 2.0-1e-3, 20)
fig = plt.figure()
plots, labels = [], []
for i,n in enumerate(X):
p, = plt.plot(Y, evaluate_fit(n, Y*pi), ['kv-', 'g^-', 'rD-', 'bs-', 'y*-'][i])
plots.append(p)
labels.append('n={0}'.format(n))
plt.legend(plots,labels,loc=0)
plt.xlabel(r'$\theta/\pi$')
plt.ylabel(r'$\gamma_\infty$')
fig.savefig('gamma-plot-angle-n=3-7.pdf')
plt.show()
| mit |
tectronics/pygranule | pygranule/orbital_layer.py | 2 | 13944 | from datetime import datetime, timedelta
import numpy as np
from pyproj import Proj
from shapely import geometry
from abc import ABCMeta, abstractmethod
class OrbitalLayerError(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
class OrbitalLayer(object):
"""
Defines a common interface for accessing orbital
information from an orbital package, such as
pyorbital. This forms a convenient layer (model) between
getsat and the orbital module.
"""
__metaclass__ = ABCMeta
instrument_info_db = {'AVHRR':{'scan_steps':2048},
'MODIS':{},
'VIIRS':{}}
earth_radius = 6.37e6
proj_out_of_bounds_value = 1.0e30
def __init__(self, aoi, sat, instrument="AVHRR"):
self.aoi = np.array([ [x for (x,y) in aoi],[y for (x,y) in aoi] ])
self.sat = sat
self.instrument = instrument
self.instrument_info = self.instrument_info_db[instrument]
# init working projection for aoi/observation polygons
clon,clat = self.aoi_center()
self.working_projection = {'proj':'ortho', 'lon_0':clon, 'lat_0':clat}
self.proj = Proj(**self.working_projection)
@abstractmethod
def next_transit(self, start=datetime.now(), resolution=100):
"""
Next transit time relative to center of aoi.
Search resolution accuracy defined by subdivision of orbital period.
"""
pass
def next_sampling(self, start=datetime.now(), resolution=100, search_limit=timedelta(hours=30)):
"""
Returns next sampling of aoi, where swath intersects the aoi.
Returns transit time relative to center of aoi and fraction of aoi area
sampled by the instrument swath.
"""
# next transit
quarter_period = timedelta(minutes=self.orbital_period()/4.0)
t = self.next_transit(start,resolution)
Dt = t - start
f = self.intersect_fraction(t-quarter_period, 2.0*quarter_period.total_seconds()/60.0)
if f > 0.0:
return t, f
else:
while Dt < search_limit:
t = self.next_transit(t,resolution)
Dt = t - start
f = self.intersect_fraction(t-quarter_period, 2.0*quarter_period.total_seconds()/60.0)
if f > 0.0:
return t, f
raise Exception('AOI Sampling exceeded search limit')
@abstractmethod
def orbital_period(self):
"""
Orbital period in floating point minutes.
"""
pass
def swath_lonlats(self, start, period=None):
"""
Returns outline of the instrument swath in lonlat coordinates.
This is useful for forming a polygon shape to test intercept with
the area of interest.
"""
# default period is 1 minute long granule
if period is None:
period = 1.0
# 100th step of orbit should be sufficient resolution for resulting polygon
# equivalent ot 1m AVHRR granule
t_step = self.orbital_period()/100.0
t_steps = np.arange(0.0, period+t_step, t_step)
scans_lon = []
scans_lat = []
for dt in t_steps:
t = start + timedelta(minutes=dt)
pos_time = self.scan_line_lonlats(t)
scans_lon.append( pos_time[0] )
scans_lat.append( pos_time[1] )
scans_lon = np.array(scans_lon)
scans_lat = np.array(scans_lat)
# pick out perimeter of swaths
lons = scans_lon[0,:].tolist() + (scans_lon[1:-1,-1].tolist()) \
+ (scans_lon[-1,:])[::-1].tolist() + (scans_lon[1:-1,0])[::-1].tolist()
lats = scans_lat[0,:].tolist() + scans_lat[1:-1,-1].tolist() \
+ (scans_lat[-1,:])[::-1].tolist() + (scans_lat[1:-1,0])[::-1].tolist()
return np.array((lons,lats))
@abstractmethod
def scan_line_lonlats(self, t):
"""
Returns a single instrument scan line starting at datetime t
"""
pass
def scan_line_working_projection(self, t):
"""
Returns a single instrument scan line sarting at datetime t
in working projection coordinates.
"""
return np.array(self.proj(*self.scan_line_lonlats(t)))
def swath_working_projection(self, start, period=None):
"""
Returns the coordinate outline of the instrument swath in
working projection coordinates. Out of map projection bound
coordinates are handled by splitting up the outline into a list.
"""
# default period is 1 minute long granule
if period is None:
period = 1.0
# 100th step of orbit should be sufficient resolution for resulting polygon
# equivalent ot 1m AVHRR granule
t_step = self.orbital_period()/1000.0
t_steps = np.arange(0.0, period+t_step, t_step)
# fetch all scan lines and index segments
def has_valid_xy(A):
return ((A[0] != self.proj_out_of_bounds_value) & (A[1] != self.proj_out_of_bounds_value)).any()
def valid_xys(A):
sel = (A[0] != self.proj_out_of_bounds_value) & (A[1] != self.proj_out_of_bounds_value)
return A[:,sel]
scans = []
segment_idx = []
count = 0
idx = -1
for dt in t_steps:
t = start + timedelta(minutes=dt)
pos = self.scan_line_working_projection(t)
if has_valid_xy(pos):
scans.append( valid_xys(pos) )
if idx < 0: #begin segment
idx = count
count += 1
elif idx > -1: #end segment
segment_idx.append((idx,count-1))
idx = -1
if (pos < self.proj_out_of_bounds_value).any():
segment_idx.append((idx,count-1))
# pick out perimeters of segments (this is fast / above is bottleneck)
segments = []
for s in segment_idx:
a,b = s
xs = scans[a][0].tolist()
ys = scans[a][1].tolist()
for i in range(a+1,b):
xs = scans[i][0,0:1].tolist() + xs + scans[i][0,-1:].tolist()
ys = scans[i][1,0:1].tolist() + ys + scans[i][1,-1:].tolist()
xs = xs + scans[b][0,::-1].tolist()
ys = ys + scans[b][1,::-1].tolist()
segments.append( np.array((xs,ys)) )
return segments
def intersect_polygon(self, start, period=None):
"""
Returns a shapely polygon representing the intersection
with the area of interest and the instrument swath sampled
at datetime start for period number of minutes.
"""
swath = self.swath_polygon(start, period)
aoi = self.aoi_polygon()
return aoi.intersection(swath)
def intersect_fraction(self, start, period=None):
"""
Returns the fractional area of the area of interest, sampled by the
instrument swath starting at datetime 'start' for a 'period' number of minutes.
"""
swath = self.swath_polygon(start, period)
aoi = self.aoi_polygon()
intersect = aoi.intersection(swath)
return intersect.area/aoi.area
def does_swath_sample_aoi(self, start, period=None):
"""
Check if swath starting at time 'start' samples (overlaps)
the area of interest.
"""
swath = self.swath_polygon(start, period)
aoi = self.aoi_polygon()
return swath.intersects(aoi)
#return aoi.intersection(swath).area > 0.0
# using area intersect because overlaps fails
# for small poly or if swath line does not cross (need to read pyshapely manual)
#return swath.overlaps(aoi)
def swath_polygon(self, start, period=None):
segments = self.swath_working_projection(start,period=period)
try:
if len(segments) == 0:
# empty polygon
return geometry.Polygon()
elif len(segments) == 1:
return geometry.Polygon(segments[0].transpose().tolist())
else:
P = geometry.Polygon(segments[0].transpose().tolist())
for i in range(1,len(segments)):
P = P.union( geometry.Polygon(
segments[i].transpose().tolist()) )
return P
except ValueError:
# empty polygon if invalid polygon created
return geometry.Polygon()
def aoi_polygon(self):
xys = np.array(self.proj(*self.aoi))
coords = xys.transpose().tolist()
if len(coords) > 2:
return geometry.Polygon(coords)
elif len(coords) == 2:
return geometry.LineString(coords)
else:
return geometry.Point(coords[0])
def show_swath(self, start, period=None):
"""
A helper method that displays the orbital swath
starting at datetime start, for a period number of minutes.
If, start is iterable, then the method assumes it is an iterable
of datetimes, plotting a number of swaths at those times.
"""
# test if start is iterable, EAFP style:
try:
for e in start:
pass
except TypeError:
start = [start]
import matplotlib.pyplot as plt
# fetch the coordinates
(aoix,aoiy) = self.proj(*self.aoi)
# plot AOI
plt.axis('equal')
plt.plot(aoix,aoiy,'r-')
plt.plot((aoix[-1],aoix[0]),(aoiy[-1],aoiy[0]),'r-')
# plot Earth
Re = self.earth_radius
circle=plt.Circle((0,0),Re,color='g',fill=False)
fig = plt.gcf()
fig.gca().add_artist(circle)
plt.xlim((-1.5*Re,1.5*Re))
plt.ylim((-1.5*Re,1.5*Re))
# Plot granules
for t in start:
# fetch the coordinates
xys_segs = self.swath_working_projection(t, period)
for xys in xys_segs:
plt.plot(xys[0],xys[1],'b-')
plt.plot((xys[0][-1],xys[0][0]),(xys[1][-1],xys[1][0]),'b-')
plt.show()
#from mpl_toolkits.basemap import Basemap
#m = Basemap(projection='ortho', lon_0=self.working_projection['lon_0'],
# lat_0=self.working_projection['lat_0'],resolution='l')
# convert and plot the predicted pixels in red
#p1 = m.plot(x,y, marker='+', color='red', markerfacecolor='red',
# markeredgecolor='red', markersize=1, markevery=1, zorder=4, linewidth=1.0)
#m.fillcontinents(color='0.85', lake_color=None, zorder=3)
#m.drawparallels(np.arange(-90.,90.,5.), labels=[1,0,1,0],fontsize=10, dashes=[1, 0], color=[0.8,0.8,0.8], zorder=1)
#m.drawmeridians(np.arange(-180.,180.,5.), labels=[0,1,0,1],fontsize=10, dashes=[1, 0], color=[0.8,0.8,0.8], zorder=2)
#plt.show()
def show_swath_pycoast(self, start, period=None):
"""
A helper method that displays the orbital swath
starting at datetime start, for a period number of minutes.
If, start is iterable, then the method assumes it is an iterable
of datetimes, plotting a number of swaths at those times.
"""
# test if start is iterable, EAFP style:
try:
for e in start:
pass
except TypeError:
start = [start]
start.sort()
from PIL import Image
from pycoast import ContourWriterAGG
from pydecorate import DecoratorAGG
img = Image.new('RGB', (650, 650))
proj4_string = ""
for x in self.working_projection:
proj4_string += "+%s=%s "%(x,self.working_projection[x])
area_extent = (-6700000.0, -6700000.0, 6700000.0, 6700000.0)
area_def = (proj4_string, area_extent)
cw = ContourWriterAGG()
cw.add_grid(img, area_def, (10.0,10.0),(2.0,2.0), fill='blue',
outline='gray', outline_opacity=130, minor_outline=None, write_text=False)
# Plot granules
for t in start:
# fetch the coordinates
xys_segs = self.swath_working_projection(t, period)
for xys in xys_segs:
lls = self.proj(xys[0],xys[1],inverse=True)
cw.add_polygon(img, area_def, zip(lls[0], lls[1]), outline="blue", fill="blue", fill_opacity=70, width=1)
cw.add_coastlines(img, area_def, resolution='l')
aoi_coords = zip(*self.aoi)
## TODO: Handle single point case properly
if len(aoi_coords) == 1:
x, y = aoi_coords[0]
d = 0.5
line_coords = [(x-d,y),(x+d,y)]
cw.add_line(img, area_def, line_coords,
outline="red", fill="red", fill_opacity=100, width=2)
elif len(aoi_coords) == 2:
cw.add_line(img, area_def, aoi_coords, outline="red", fill="red", fill_opacity=100, width=10)
else:
cw.add_polygon(img, area_def, aoi_coords, outline="red", fill="red", fill_opacity=100, width=2)
# Decorate
dc = DecoratorAGG(img)
text = "Granules from time: %s + %.2f min."%(start[0].strftime('%Y.%m.%d %H:%M:%S'),
(start[-1]-start[0]).total_seconds()/60.0)
dc.align_bottom()
dc.add_text(text,height=0)
img.show()
def aoi_center(self):
## eventually do this in projection coordinate space instead.
## swath and aoi intersection will be evaluated in projection (e.g. orthographic).
return sum(self.aoi[0])/len(self.aoi[0]), sum(self.aoi[1])/len(self.aoi[1])
@abstractmethod
def set_tle(self, line1, line2):
"""
Use to apply a particular two line element.
"""
pass
| gpl-3.0 |
modelblocks/modelblocks-release | resource-rt/scripts/filter_cols.py | 1 | 1065 | import sys, re, argparse, pandas as pd
argparser = argparse.ArgumentParser('''
Reads a space-delimited data table and outputs only user-specified columns.
''')
argparser.add_argument('-c', '--cols', dest='c', nargs='+', action='store', help='column names to output')
argparser.add_argument('-x', '--exclude', dest='x', nargs='+', action='store', help='column names to drop')
argparser.add_argument('-d', '--dedup', dest='d', action='store_true', help='drop duplicate column names (keep values from the first one)')
args, unknown = argparser.parse_known_args()
def main():
data = pd.read_csv(sys.stdin, sep=' ', skipinitialspace=True, quoting=3)
if args.d:
dups = [col for col in data.columns.values if re.search('\.[0-9]+$', col)]
for dup in dups:
data.drop(dup, axis=1, inplace=True)
if args.c != None:
data = data.filter(items=args.c)
if args.x != None:
for x in args.x:
data.drop(x, axis=1, inplace=True)
data.to_csv(sys.stdout, ' ', na_rep='NaN', index=False, quoting=3)
main()
| gpl-3.0 |
rohanp/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
mjgrav2001/scikit-learn | sklearn/datasets/lfw.py | 50 | 19048 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
Clyde-fare/scikit-learn | sklearn/tests/test_cross_validation.py | 29 | 46740 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1./3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
sonnyhu/scikit-learn | sklearn/metrics/setup.py | 24 | 1059 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
gbouvignies/chemex | setup.py | 1 | 1232 | from setuptools import find_packages
from setuptools import setup
with open("README.md", "rb") as f:
long_description = f.read().decode("utf-8")
setup(
name="chemex",
use_scm_version=True,
description="ChemEx is an analysis program for chemical exchange detected by NMR.",
long_description=long_description,
author="Guillaume Bouvignies",
author_email="[email protected]",
url="https://github.com/gbouvignies/chemex",
license="3-Clause BSD",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="nmr protein dynamics chemical exchange cpmg cest relaxation data fitting",
packages=find_packages(exclude=["tests"]),
setup_requires=["setuptools_scm"],
install_requires=[
"numpy>=1.0",
"scipy>=1.0",
"matplotlib>=2.0",
"lmfit>=0.9.11",
"asteval>=0.9.11",
],
python_requires=">=3.6",
entry_points={"console_scripts": ["chemex = chemex.chemex:main"]},
)
| bsd-3-clause |
Agent007/deepchem | examples/binding_pockets/binding_pocket_rf.py | 6 | 1357 | """
Script that trains Sklearn RF models on PDBbind Pockets dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "MIT"
import os
import deepchem as dc
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from binding_pocket_datasets import load_pdbbind_pockets
# For stable runs
np.random.seed(123)
split = "random"
subset = "full"
pdbbind_tasks, pdbbind_datasets, transformers = load_pdbbind_pockets(
split=split, subset=subset)
train_dataset, valid_dataset, test_dataset = pdbbind_datasets
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
current_dir = os.path.dirname(os.path.realpath(__file__))
model_dir = os.path.join(current_dir, "pocket_%s_%s_RF" % (split, subset))
sklearn_model = RandomForestClassifier(n_estimators=500)
model = dc.models.SklearnModel(sklearn_model, model_dir=model_dir)
# Fit trained model
print("Fitting model on train dataset")
model.fit(train_dataset)
model.save()
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| mit |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/axes_grid/demo_axes_hbox_divider.py | 7 | 1547 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.axes_divider import HBoxDivider
import mpl_toolkits.axes_grid1.axes_size as Size
def make_heights_equal(fig, rect, ax1, ax2, pad):
# pad in inches
h1, v1 = Size.AxesX(ax1), Size.AxesY(ax1)
h2, v2 = Size.AxesX(ax2), Size.AxesY(ax2)
pad_v = Size.Scaled(1)
pad_h = Size.Fixed(pad)
my_divider = HBoxDivider(fig, rect,
horizontal=[h1, pad_h, h2],
vertical=[v1, pad_v, v2])
ax1.set_axes_locator(my_divider.new_locator(0))
ax2.set_axes_locator(my_divider.new_locator(2))
if __name__ == "__main__":
arr1 = np.arange(20).reshape((4,5))
arr2 = np.arange(20).reshape((5,4))
fig, (ax1, ax2) = plt.subplots(1,2)
ax1.imshow(arr1, interpolation="nearest")
ax2.imshow(arr2, interpolation="nearest")
rect = 111 # subplot param for combined axes
make_heights_equal(fig, rect, ax1, ax2, pad=0.5) # pad in inches
for ax in [ax1, ax2]:
ax.locator_params(nbins=4)
# annotate
ax3 = plt.axes([0.5, 0.5, 0.001, 0.001], frameon=False)
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
ax3.annotate("Location of two axes are adjusted\n"
"so that they have equal heights\n"
"while maintaining their aspect ratios", (0.5, 0.5),
xycoords="axes fraction", va="center", ha="center",
bbox=dict(boxstyle="round, pad=1", fc="w"))
plt.show()
| mit |
zasdfgbnm/tensorflow | tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py | 2 | 13049 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TensorFlow Eager Execution Example: RNN Colorbot.
This example builds, trains, and evaluates a multi-layer RNN that can be
run with eager execution enabled. The RNN is trained to map color names to
their RGB values: it takes as input a one-hot encoded character sequence and
outputs a three-tuple (R, G, B) (scaled by 1/255).
For example, say we'd like the RNN Colorbot to generate the RGB values for the
color white. To represent our query in a form that the Colorbot could
understand, we would create a sequence of five 256-long vectors encoding the
ASCII values of the characters in "white". The first vector in our sequence
would be 0 everywhere except for the ord("w")-th position, where it would be
1, the second vector would be 0 everywhere except for the
ord("h")-th position, where it would be 1, and similarly for the remaining three
vectors. We refer to such indicator vectors as "one-hot encodings" of
characters. After consuming these vectors, a well-trained Colorbot would output
the three tuple (1, 1, 1), since the RGB values for white are (255, 255, 255).
We are of course free to ask the colorbot to generate colors for any string we'd
like, such as "steel gray," "tensorflow orange," or "green apple," though
your mileage may vary as your queries increase in creativity.
This example shows how to:
1. read, process, (one-hot) encode, and pad text data via the
Datasets API;
2. build a trainable model;
3. implement a multi-layer RNN using Python control flow
constructs (e.g., a for loop);
4. train a model using an iterative gradient-based method; and
The data used in this example is licensed under the Creative Commons
Attribution-ShareAlike License and is available at
https://en.wikipedia.org/wiki/List_of_colors:_A-F
https://en.wikipedia.org/wiki/List_of_colors:_G-M
https://en.wikipedia.org/wiki/List_of_colors:_N-Z
This example was adapted from
https://github.com/random-forests/tensorflow-workshop/tree/master/extras/colorbot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
import sys
import time
import six
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
def parse(line):
"""Parse a line from the colors dataset."""
# Each line of the dataset is comma-separated and formatted as
# color_name, r, g, b
# so `items` is a list [color_name, r, g, b].
items = tf.string_split([line], ",").values
rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.
# Represent the color name as a one-hot encoded character sequence.
color_name = items[0]
chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)
# The sequence length is needed by our RNN.
length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)
return rgb, chars, length
def load_dataset(data_dir, url, batch_size):
"""Loads the colors data at path into a PaddedDataset."""
# Downloads data at url into data_dir/basename(url). The dataset has a header
# row (color_name, r, g, b) followed by comma-separated lines.
path = tf.contrib.learn.datasets.base.maybe_download(
os.path.basename(url), data_dir, url)
# This chain of commands loads our data by:
# 1. skipping the header; (.skip(1))
# 2. parsing the subsequent lines; (.map(parse))
# 3. shuffling the data; (.shuffle(...))
# 3. grouping the data into padded batches (.padded_batch(...)).
dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle(
buffer_size=10000).padded_batch(
batch_size, padded_shapes=([None], [None, None], []))
return dataset
# pylint: disable=not-callable
class RNNColorbot(tfe.Network):
"""Multi-layer (LSTM) RNN that regresses on real-valued vector labels.
"""
def __init__(self, rnn_cell_sizes, label_dimension, keep_prob):
"""Constructs an RNNColorbot.
Args:
rnn_cell_sizes: list of integers denoting the size of each LSTM cell in
the RNN; rnn_cell_sizes[i] is the size of the i-th layer cell
label_dimension: the length of the labels on which to regress
keep_prob: (1 - dropout probability); dropout is applied to the outputs of
each LSTM layer
"""
super(RNNColorbot, self).__init__(name="")
self.label_dimension = label_dimension
self.keep_prob = keep_prob
# Note the calls to `track_layer` below; these calls register the layers as
# network components that house trainable variables.
self.cells = [
self.track_layer(tf.nn.rnn_cell.BasicLSTMCell(size))
for size in rnn_cell_sizes
]
self.relu = self.track_layer(
tf.layers.Dense(label_dimension, activation=tf.nn.relu, name="relu"))
def call(self, chars, sequence_length, training=False):
"""Implements the RNN logic and prediction generation.
Args:
chars: a Tensor of dimension [batch_size, time_steps, 256] holding a
batch of one-hot encoded color names
sequence_length: a Tensor of dimension [batch_size] holding the length
of each character sequence (i.e., color name)
training: whether the invocation is happening during training
Returns:
A tensor of dimension [batch_size, label_dimension] that is produced by
passing chars through a multi-layer RNN and applying a ReLU to the final
hidden state.
"""
# Transpose the first and second dimensions so that chars is of shape
# [time_steps, batch_size, dimension].
chars = tf.transpose(chars, [1, 0, 2])
# The outer loop cycles through the layers of the RNN; the inner loop
# executes the time steps for a particular layer.
batch_size = int(chars.shape[1])
for l in range(len(self.cells)):
cell = self.cells[l]
outputs = []
state = cell.zero_state(batch_size, tf.float32)
# Unstack the inputs to obtain a list of batches, one for each time step.
chars = tf.unstack(chars, axis=0)
for ch in chars:
output, state = cell(ch, state)
outputs.append(output)
# The outputs of this layer are the inputs of the subsequent layer.
chars = tf.stack(outputs, axis=0)
if training:
chars = tf.nn.dropout(chars, self.keep_prob)
# Extract the correct output (i.e., hidden state) for each example. All the
# character sequences in this batch were padded to the same fixed length so
# that they could be easily fed through the above RNN loop. The
# `sequence_length` vector tells us the true lengths of the character
# sequences, letting us obtain for each sequence the hidden state that was
# generated by its non-padding characters.
batch_range = [i for i in range(batch_size)]
indices = tf.stack([sequence_length - 1, batch_range], axis=1)
hidden_states = tf.gather_nd(chars, indices)
return self.relu(hidden_states)
def loss(labels, predictions):
"""Computes mean squared loss."""
return tf.reduce_mean(tf.square(predictions - labels))
def test(model, eval_data):
"""Computes the average loss on eval_data, which should be a Dataset."""
avg_loss = tfe.metrics.Mean("loss")
for (labels, chars, sequence_length) in tfe.Iterator(eval_data):
predictions = model(chars, sequence_length, training=False)
avg_loss(loss(labels, predictions))
print("eval/loss: %.6f\n" % avg_loss.result())
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", avg_loss.result())
def train_one_epoch(model, optimizer, train_data, log_interval=10):
"""Trains model on train_data using optimizer."""
tf.train.get_or_create_global_step()
def model_loss(labels, chars, sequence_length):
predictions = model(chars, sequence_length, training=True)
loss_value = loss(labels, predictions)
tf.contrib.summary.scalar("loss", loss_value)
return loss_value
for (batch, (labels, chars, sequence_length)) in enumerate(
tfe.Iterator(train_data)):
with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
batch_model_loss = functools.partial(model_loss, labels, chars,
sequence_length)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss()))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/train.csv"
SOURCE_TEST_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/test.csv"
def main(_):
data_dir = os.path.join(FLAGS.dir, "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)
model = RNNColorbot(
rnn_cell_sizes=FLAGS.rnn_cell_sizes,
label_dimension=3,
keep_prob=FLAGS.keep_probability)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
print(tfe.num_gpus())
device = "/cpu:0"
else:
device = "/gpu:0"
print("Using device %s." % device)
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
start = time.time()
with train_summary_writer.as_default():
train_one_epoch(model, optimizer, train_data, FLAGS.log_interval)
end = time.time()
print("train/time for epoch #%d: %.2f" % (epoch, end - start))
with test_summary_writer.as_default():
test(model, eval_data)
print("Colorbot is ready to generate colors!")
while True:
try:
color_name = six.moves.input(
"Give me a color name (or press enter to exit): ")
except EOFError:
return
if not color_name:
return
_, chars, length = parse(color_name)
with tf.device(device):
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model(chars, length, training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
print("rgb:", rgb)
data = [[clipped_preds]]
if HAS_MATPLOTLIB:
plt.imshow(data)
plt.title(color_name)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dir",
type=str,
default="/tmp/rnn_colorbot/",
help="Directory to download data files and save logs.")
parser.add_argument(
"--log_interval",
type=int,
default=10,
metavar="N",
help="Log training loss every log_interval batches.")
parser.add_argument(
"--num_epochs", type=int, default=20, help="Number of epochs to train.")
parser.add_argument(
"--rnn_cell_sizes",
type=int,
nargs="+",
default=[256, 128],
help="List of sizes for each layer of the RNN.")
parser.add_argument(
"--batch_size",
type=int,
default=64,
help="Batch size for training and eval.")
parser.add_argument(
"--keep_probability",
type=float,
default=0.5,
help="Keep probability for dropout between layers.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.01,
help="Learning rate to be used during training.")
parser.add_argument(
"--no_gpu",
action="store_true",
default=False,
help="Disables GPU usage even if a GPU is available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
ipashchenko/ml4vs | ml4vs/logistic.py | 1 | 7944 | # -*- coding: utf-8 -*-
import os
import numpy as np
import hyperopt
from sklearn import decomposition
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import confusion_matrix, precision_recall_curve, f1_score
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
from sklearn.cross_validation import StratifiedKFold, cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer, StandardScaler, FunctionTransformer,\
RobustScaler
from sklearn.linear_model import LogisticRegression
from data_load import load_data, load_data_tgt
from sklearn.cluster import FeatureAgglomeration
data_dir = '/home/ilya/code/ml4vs/data/LMC_SC20__corrected_list_of_variables/raw_index_values'
file_1 = 'vast_lightcurve_statistics_variables_only.log'
file_0 = 'vast_lightcurve_statistics_constant_only.log'
file_0 = os.path.join(data_dir, file_0)
file_1 = os.path.join(data_dir, file_1)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
names_to_delete = ['meaningless_1', 'meaningless_2', 'star_ID',
'Npts', 'CSSD', 'clipped_sigma', 'lag1', 'L', 'Lclp', 'Jclp',
'MAD', 'Ltim']
# names_to_delete = ['meaningless_1', 'meaningless_2', 'star_ID',
# 'Npts', 'CSSD']
X, y, df, features_names, delta = load_data([file_0, file_1], names,
names_to_delete)
target = 'variable'
predictors = list(df)
predictors.remove(target)
dtrain = df
kfold = StratifiedKFold(y, n_folds=4, shuffle=True, random_state=1)
def log_axis(X_, names=None):
X = X_.copy()
tr_names = ['clipped_sigma', 'weighted_sigma', 'RoMS', 'rCh2', 'Vp2p',
'Ex', 'inv_eta', 'S_B']
for name in tr_names:
try:
# print "Log-Transforming {}".format(name)
i = names.index(name)
X[:, i] = np.log(X[:, i])
except ValueError:
print "No {} in predictors".format(name)
pass
return X
def objective(space):
print "C, n_pca, cw : {}, {}, {}".format(space['C'], space['n_pca'],
space['cw'])
clf = LogisticRegression(C=space['C'], class_weight={0: 1, 1: space['cw']},
random_state=1, max_iter=300, n_jobs=1,
tol=10.**(-5))
pca = decomposition.PCA(n_components=space['n_pca'], random_state=1)
# cal_clf = CalibratedClassifierCV(clf, method='sigmoid')
# ward = FeatureAgglomeration(n_clusters=space['n_clusters'])
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
# estimators.append(('func', FunctionTransformer(log_axis, kw_args={'names':
# predictors})))
estimators.append(('scaler', StandardScaler()))
estimators.append(('pca', pca))
# estimators.append(('ward', ward))
estimators.append(('clf', clf))
pipeline = Pipeline(estimators)
# auc = np.mean(cross_val_score(pipeline, X, y, cv=kfold, scoring='roc_auc',
# verbose=1, n_jobs=2))
y_preds = cross_val_predict(pipeline, X, y, cv=kfold, n_jobs=4)
CMs = list()
for train_idx, test_idx in kfold:
CMs.append(confusion_matrix(y[test_idx], y_preds[test_idx]))
CM = np.sum(CMs, axis=0)
FN = CM[1][0]
TP = CM[1][1]
FP = CM[0][1]
print "TP = {}".format(TP)
print "FP = {}".format(FP)
print "FN = {}".format(FN)
f1 = 2. * TP / (2. * TP + FP + FN)
# print "AUC: {}".format(auc)
print "F1: {}".format(f1)
return{'loss': 1-f1, 'status': STATUS_OK}
space = {'C': hp.loguniform('C', -5., 4.),
'cw': hp.choice('cw', np.arange(1, 300, 1, dtype=int)),
# 'n_clusters': hp.choice('n_clusters', np.arange(3, 22, 1, dtype=int))}
'n_pca': hp.choice('n_pca', np.arange(5, 18, 1, dtype=int))}
trials = Trials()
best = fmin(fn=objective,
space=space,
algo=tpe.suggest,
max_evals=500,
trials=trials)
print hyperopt.space_eval(space, best)
best_pars = hyperopt.space_eval(space, best)
# Load blind test data
file_tgt = 'LMC_SC19_PSF_Pgood98__vast_lightcurve_statistics.log'
file_tgt = os.path.join(data_dir, file_tgt)
X_tgt, feature_names, df, df_orig = load_data_tgt(file_tgt, names, names_to_delete,
delta)
# Fit model on all training data
clf = LogisticRegression(C=best_pars['C'], class_weight={0: 1,
1: best_pars['cw']},
random_state=1, max_iter=300, n_jobs=1, tol=10.**(-5))
# cal_clf = CalibratedClassifierCV(clf, method='sigmoid')
pca = decomposition.PCA(n_components=best_pars['n_pca'], random_state=1)
# ward = FeatureAgglomeration(n_clusters=best_pars['n_clusters'])
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
# estimators.append(('func', FunctionTransformer(log_axis, kw_args={'names':
# predictors})))
estimators.append(('scaler', StandardScaler()))
# estimators.append(('ward', ward))
estimators.append(('pca', pca))
estimators.append(('clf', clf))
pipeline = Pipeline(estimators)
pipeline.fit(X, y)
# y_probs = pipeline.predict(X_tgt)
from scipy.optimize import fmin
from sklearn.metrics import f1_score
# thresh = fmin(lambda x: 1-f1_score(y, y_probs > x), 0.75, xtol=10**(-6),
# ftol=10**(-6), maxiter=10**5)
# thresh = 1. - np.array(np.count_nonzero(y_probs), dtype=float) / np.count_nonzero(1. - y_probs)
thresh = 0.5
# Predict classes on new data
y_probs = pipeline.predict_proba(X_tgt)[:, 1]
idx = y_probs > thresh
idx_ = y_probs < thresh
gb_no = list(df_orig['star_ID'][idx_])
print("Found {} variables".format(np.count_nonzero(idx)))
with open('lr_results.txt', 'w') as fo:
for line in list(df_orig['star_ID'][idx]):
fo.write(line + '\n')
# Check F1
with open('clean_list_of_new_variables.txt', 'r') as fo:
news = fo.readlines()
news = [line.strip().split(' ')[1] for line in news]
news = set(news)
with open('lr_results.txt', 'r') as fo:
gb = fo.readlines()
gb = [line.strip().split('_')[4].split('.')[0] for line in gb]
gb = set(gb)
print "Among new vars found {}".format(len(news.intersection(gb)))
with open('candidates_50perc_threshold.txt', 'r') as fo:
c50 = fo.readlines()
c50 = [line.strip("\", ', \", \n, }, {") for line in c50]
with open('variables_not_in_catalogs.txt', 'r') as fo:
not_in_cat = fo.readlines()
nic = [line.strip().split(' ')[1] for line in not_in_cat]
# Catalogue variables
cat_vars = set(c50).difference(set(nic))
# Non-catalogue variable
noncat_vars = set([line.strip().split(' ')[1] for line in not_in_cat if 'CST'
not in line])
# All variables
all_vars = news.union(cat_vars).union(noncat_vars)
gb_no = set([line.strip().split('_')[4].split('.')[0] for line in gb_no])
found_bad = '181193' in gb
print "Found known variable : ", found_bad
FN = len(gb_no.intersection(all_vars))
TP = len(all_vars.intersection(gb))
TN = len(gb_no) - FN
FP = len(gb) - TP
recall = float(TP) / (TP + FN)
precision = float(TP) / (TP + FP)
F1 = 2 * precision * recall / (precision + recall)
print "precision: {}".format(precision)
print "recall: {}".format(recall)
print "F1: {}".format(F1)
print "TN={}, FP={}".format(TN, FP)
print "FN={}, TP={}".format(FN, TP)
| mit |
massmutual/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
glemaitre/UnbalancedDataset | imblearn/under_sampling/prototype_selection/tests/test_condensed_nearest_neighbour.py | 2 | 3904 | """Test the module condensed nearest neighbour."""
# Authors: Guillaume Lemaitre <[email protected]>
# Christos Aridas
# License: MIT
from __future__ import print_function
import numpy as np
from sklearn.utils.testing import assert_array_equal
from pytest import raises
from sklearn.neighbors import KNeighborsClassifier
from imblearn.under_sampling import CondensedNearestNeighbour
RND_SEED = 0
X = np.array([[2.59928271, 0.93323465], [0.25738379, 0.95564169],
[1.42772181, 0.526027], [1.92365863, 0.82718767],
[-0.10903849, -0.12085181], [-0.284881, -0.62730973],
[0.57062627, 1.19528323], [0.03394306, 0.03986753],
[0.78318102, 2.59153329], [0.35831463, 1.33483198],
[-0.14313184, -1.0412815], [0.01936241, 0.17799828],
[-1.25020462, -0.40402054], [-0.09816301, -0.74662486],
[-0.01252787, 0.34102657], [0.52726792, -0.38735648],
[0.2821046, -0.07862747], [0.05230552, 0.09043907],
[0.15198585, 0.12512646], [0.70524765, 0.39816382]])
Y = np.array([1, 2, 1, 1, 0, 2, 2, 2, 2, 2, 2, 0, 1, 2, 2, 2, 2, 1, 2, 1])
def test_cnn_init():
cnn = CondensedNearestNeighbour(random_state=RND_SEED)
assert cnn.n_seeds_S == 1
assert cnn.n_jobs == 1
def test_cnn_fit_sample():
cnn = CondensedNearestNeighbour(random_state=RND_SEED)
X_resampled, y_resampled = cnn.fit_sample(X, Y)
X_gt = np.array([[-0.10903849, -0.12085181], [0.01936241, 0.17799828],
[0.05230552, 0.09043907], [-1.25020462, -0.40402054],
[0.70524765, 0.39816382], [0.35831463, 1.33483198],
[-0.284881, -0.62730973], [0.03394306, 0.03986753],
[-0.01252787, 0.34102657], [0.15198585, 0.12512646]])
y_gt = np.array([0, 0, 1, 1, 1, 2, 2, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_cnn_fit_sample_with_indices():
cnn = CondensedNearestNeighbour(return_indices=True, random_state=RND_SEED)
X_resampled, y_resampled, idx_under = cnn.fit_sample(X, Y)
X_gt = np.array([[-0.10903849, -0.12085181], [0.01936241, 0.17799828],
[0.05230552, 0.09043907], [-1.25020462, -0.40402054],
[0.70524765, 0.39816382], [0.35831463, 1.33483198],
[-0.284881, -0.62730973], [0.03394306, 0.03986753],
[-0.01252787, 0.34102657], [0.15198585, 0.12512646]])
y_gt = np.array([0, 0, 1, 1, 1, 2, 2, 2, 2, 2])
idx_gt = np.array([4, 11, 17, 12, 19, 9, 5, 7, 14, 18])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
assert_array_equal(idx_under, idx_gt)
def test_cnn_fit_sample_with_object():
knn = KNeighborsClassifier(n_neighbors=1)
cnn = CondensedNearestNeighbour(random_state=RND_SEED, n_neighbors=knn)
X_resampled, y_resampled = cnn.fit_sample(X, Y)
X_gt = np.array([[-0.10903849, -0.12085181], [0.01936241, 0.17799828],
[0.05230552, 0.09043907], [-1.25020462, -0.40402054],
[0.70524765, 0.39816382], [0.35831463, 1.33483198],
[-0.284881, -0.62730973], [0.03394306, 0.03986753],
[-0.01252787, 0.34102657], [0.15198585, 0.12512646]])
y_gt = np.array([0, 0, 1, 1, 1, 2, 2, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
cnn = CondensedNearestNeighbour(random_state=RND_SEED, n_neighbors=1)
X_resampled, y_resampled = cnn.fit_sample(X, Y)
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_cnn_fit_sample_with_wrong_object():
knn = 'rnd'
cnn = CondensedNearestNeighbour(random_state=RND_SEED, n_neighbors=knn)
with raises(ValueError, match="has to be a int or an "):
cnn.fit_sample(X, Y)
| mit |
gpospelov/BornAgain | Examples/varia/PolarizedSpinAsymmetry.py | 1 | 9308 | """
This simulation example demonstrates how to replicate the
fitting example "Magnetically Dead Layers in Spinel Films"
given at the Nist website:
https://www.nist.gov/ncnr/magnetically-dead-layers-spinel-films
For simplicity, here we only reproduce the first part of that
demonstration without the magnetically dead layer.
"""
# import boranagain
import bornagain as ba
from bornagain import deg, angstrom, nm
import numpy
import matplotlib.pyplot as plt
# import more libs needed for data processing
from re import match, DOTALL
from sys import argv
from io import BytesIO
from urllib.request import urlopen
from zipfile import ZipFile
from os.path import isfile
# q-range on which the simulation and fitting are to be performed
qmin = 0.05997
qmax = 1.96
# number of points on which the computed result is plotted
scan_size = 1500
# The SLD of the substrate is kept constant
sldMao = (5.377e-06, 0)
# constant to convert between magnetization and magnetic SLD
RhoMconst = 2.910429812376859e-12
####################################################################
# Create Sample and Simulation #
####################################################################
def get_sample(params):
"""
construct the sample with the given parameters
"""
magnetizationMagnitude = params["rhoM_Mafo"]*1e-6/RhoMconst
angle = 0
magnetizationVector = ba.kvector_t(
magnetizationMagnitude*numpy.sin(angle*deg),
magnetizationMagnitude*numpy.cos(angle*deg), 0)
mat_vacuum = ba.MaterialBySLD("Vacuum", 0.0, 0.0)
mat_layer = ba.MaterialBySLD("(Mg,Al,Fe)3O4", params["rho_Mafo"]*1e-6, 0,
magnetizationVector)
mat_substrate = ba.MaterialBySLD("MgAl2O4", *sldMao)
ambient_layer = ba.Layer(mat_vacuum)
layer = ba.Layer(mat_layer, params["t_Mafo"]*angstrom)
substrate_layer = ba.Layer(mat_substrate)
r_Mafo = ba.LayerRoughness()
r_Mafo.setSigma(params["r_Mafo"]*angstrom)
r_substrate = ba.LayerRoughness()
r_substrate.setSigma(params["r_Mao"]*angstrom)
multi_layer = ba.MultiLayer()
multi_layer.addLayer(ambient_layer)
multi_layer.addLayerWithTopRoughness(layer, r_Mafo)
multi_layer.addLayerWithTopRoughness(substrate_layer, r_substrate)
return multi_layer
def get_simulation(q_axis, parameters, polarization, analyzer):
"""
Returns a simulation object.
Polarization, analyzer and resolution are set
from given parameters
"""
simulation = ba.SpecularSimulation()
q_axis = q_axis + parameters["q_offset"]
scan = ba.QSpecScan(q_axis)
dq = parameters["q_res"]*q_axis
n_sig = 4.0
n_samples = 25
distr = ba.RangedDistributionGaussian(n_samples, n_sig)
scan.setAbsoluteQResolution(distr, parameters["q_res"])
simulation.beam().setPolarization(polarization)
simulation.setAnalyzerProperties(analyzer, 1.0, 0.5)
simulation.setScan(scan)
return simulation
def run_simulation(q_axis, fitParams, *, polarization, analyzer):
"""
Run a simulation on the given q-axis, where the sample is
constructed with the given parameters.
Vectors for polarization and analyzer need to be provided
"""
parameters = dict(fitParams, **fixedParams)
sample = get_sample(parameters)
simulation = get_simulation(q_axis, parameters, polarization, analyzer)
simulation.setSample(sample)
simulation.runSimulation()
return simulation
def qr(result):
"""
Returns two arrays that hold the q-values as well as the
reflectivity from a given simulation result
"""
q = numpy.array(result.result().axis(ba.Axes.QSPACE))
r = numpy.array(result.result().array(ba.Axes.QSPACE))
return q, r
####################################################################
# Plot Handling #
####################################################################
def plot(qs, rs, exps, labels, filename):
"""
Plot the simulated result together with the experimental data
"""
fig = plt.figure()
ax = fig.add_subplot(111)
for q, r, exp, l in zip(qs, rs, exps, labels):
ax.errorbar(exp[0],
exp[1],
xerr=exp[3],
yerr=exp[2],
fmt='.',
markersize=0.75,
linewidth=0.5)
ax.plot(q, r, label=l)
ax.set_yscale('log')
plt.legend()
plt.xlabel("Q [nm${}^{-1}$]")
plt.ylabel("R")
plt.tight_layout()
plt.savefig(filename)
def plotSpinAsymmetry(data_pp, data_mm, q, r_pp, r_mm, filename):
"""
Plot the simulated spin asymmetry as well its
experimental counterpart with errorbars
"""
# compute the errorbars of the spin asymmetry
delta = numpy.sqrt(4 * (data_pp[1]**2 * data_mm[2]**2 + \
data_mm[1]**2 * data_pp[2]**2 ) /
( data_pp[1] + data_mm[1] )**4 )
fig = plt.figure()
ax = fig.add_subplot(111)
ax.errorbar(data_pp[0], (data_pp[1] - data_mm[1])/(data_pp[1] + data_mm[1]),
xerr=data_pp[3],
yerr=delta,
fmt='.',
markersize=0.75,
linewidth=0.5)
ax.plot(q, (r_pp - r_mm)/(r_pp + r_mm))
plt.gca().set_ylim((-0.3, 0.5))
plt.xlabel("Q [nm${}^{-1}$]")
plt.ylabel("Spin asymmetry")
plt.tight_layout()
plt.savefig(filename)
####################################################################
# Data Handling #
####################################################################
def normalizeData(data):
"""
Removes duplicate q values from the input data,
normalizes it such that the maximum of the reflectivity is
unity and rescales the q-axis to inverse nm
"""
r0 = numpy.where(data[0] - numpy.roll(data[0], 1) == 0)
data = numpy.delete(data, r0, 1)
data[0] = data[0]/angstrom
data[3] = data[3]/angstrom
norm = numpy.max(data[1])
data[1] = data[1]/norm
data[2] = data[2]/norm
so = numpy.argsort(data[0])
data = data[:, so]
return data
def filterData(data, qmin, qmax):
minIndex = numpy.argmin(numpy.abs(data[0] - qmin))
maxIndex = numpy.argmin(numpy.abs(data[0] - qmax))
return data[:, minIndex:maxIndex + 1]
def get_Experimental_data(qmin, qmax):
if hasattr(get_Experimental_data, "raw_data"):
data_pp = get_Experimental_data.raw_data_pp
data_mm = get_Experimental_data.raw_data_mm
else:
input_Data = downloadAndExtractData()
data_pp = normalizeData(input_Data[0])
data_mm = normalizeData(input_Data[1])
get_Experimental_data.data_pp = data_pp
get_Experimental_data.data_mm = data_mm
get_Experimental_data.raw_data = True
return (filterData(data_pp, qmin, qmax), filterData(data_mm, qmin, qmax))
def downloadAndExtractData():
url = "https://www.nist.gov/document/spinelfilmzip"
if not isfile("spinelfilm.zip"):
downloadfile = urlopen(url)
with open("spinelfilm.zip", 'wb') as outfile:
outfile.write(downloadfile.read())
zipfile = ZipFile("spinelfilm.zip")
rawdata = zipfile.open("MAFO_Saturated.refl").\
read().decode("utf-8")
table_pp = match(
r'.*# "polarization": "\+\+"\n#.*?\n# "units".*?\n(.*?)#.*', rawdata,
DOTALL).group(1)
table_mm = match(
r'.*# "polarization": "\-\-"\n#.*?\n# "units".*?\n(.*?)#.*', rawdata,
DOTALL).group(1)
data_pp = numpy.genfromtxt(BytesIO(table_pp.encode()), unpack=True)
data_mm = numpy.genfromtxt(BytesIO(table_mm.encode()), unpack=True)
return (data_pp, data_mm)
####################################################################
# Main Function #
####################################################################
if __name__ == '__main__':
fixedParams = {
# parameters from our own fit run
'q_res': 0.010542945012551425,
'q_offset': 7.971243487467318e-05,
'rho_Mafo': 6.370140108715461,
'rhoM_Mafo': 0.27399566816062926,
't_Mafo': 137.46913056084736,
'r_Mao': 8.60487712674644,
'r_Mafo': 3.7844265311293483
}
def run_Simulation_pp(qzs, params):
return run_simulation(qzs,
params,
polarization=ba.kvector_t(0, 1, 0),
analyzer=ba.kvector_t(0, 1, 0))
def run_Simulation_mm(qzs, params):
return run_simulation(qzs,
params,
polarization=ba.kvector_t(0, -1, 0),
analyzer=ba.kvector_t(0, -1, 0))
qzs = numpy.linspace(qmin, qmax, scan_size)
q_pp, r_pp = qr(run_Simulation_pp(qzs, fixedParams))
q_mm, r_mm = qr(run_Simulation_mm(qzs, fixedParams))
data_pp, data_mm = get_Experimental_data(qmin, qmax)
plot([q_pp, q_mm], [r_pp, r_mm], [data_pp, data_mm], ["$++$", "$--$"],
f'MAFO_Saturated.pdf')
plotSpinAsymmetry(data_pp, data_mm, qzs, r_pp, r_mm,
"MAFO_Saturated_spin_asymmetry.pdf")
| gpl-3.0 |
davidbuzz/ardupilot | Tools/mavproxy_modules/lib/magcal_graph_ui.py | 108 | 8248 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from pymavlink.mavutil import mavlink
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
import geodesic_grid as grid
class MagcalPanel(wx.Panel):
_status_markup_strings = {
mavlink.MAG_CAL_NOT_STARTED: 'Not started',
mavlink.MAG_CAL_WAITING_TO_START: 'Waiting to start',
mavlink.MAG_CAL_RUNNING_STEP_ONE: 'Step one',
mavlink.MAG_CAL_RUNNING_STEP_TWO: 'Step two',
mavlink.MAG_CAL_SUCCESS: '<span color="blue">Success</span>',
mavlink.MAG_CAL_FAILED: '<span color="red">Failed</span>',
}
_empty_color = '#7ea6ce'
_filled_color = '#4680b9'
def __init__(self, *k, **kw):
super(MagcalPanel, self).__init__(*k, **kw)
facecolor = self.GetBackgroundColour().GetAsString(wx.C2S_HTML_SYNTAX)
fig = plt.figure(facecolor=facecolor, figsize=(1,1))
self._canvas = FigureCanvas(self, wx.ID_ANY, fig)
self._canvas.SetMinSize((300,300))
self._id_text = wx.StaticText(self, wx.ID_ANY)
self._status_text = wx.StaticText(self, wx.ID_ANY)
self._completion_pct_text = wx.StaticText(self, wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._id_text)
sizer.Add(self._status_text)
sizer.Add(self._completion_pct_text)
sizer.Add(self._canvas, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
ax = fig.add_subplot(111, axis_bgcolor=facecolor, projection='3d')
self.configure_plot(ax)
def configure_plot(self, ax):
extra = .5
lim = grid.radius + extra
ax.set_xlim3d(-lim, lim)
ax.set_ylim3d(-lim, lim)
ax.set_zlim3d(-lim, lim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
self._polygons_collection = Poly3DCollection(
grid.sections_triangles,
edgecolors='#386694',
)
ax.add_collection3d(self._polygons_collection)
def update_status_from_mavlink(self, m):
status_string = self._status_markup_strings.get(m.cal_status, '???')
self._status_text.SetLabelMarkup(
'<b>Status:</b> %s' % status_string,
)
def mavlink_magcal_report(self, m):
self.update_status_from_mavlink(m)
self._completion_pct_text.SetLabel('')
def mavlink_magcal_progress(self, m):
facecolors = []
for i, mask in enumerate(m.completion_mask):
for j in range(8):
section = i * 8 + j
if mask & 1 << j:
facecolor = self._filled_color
else:
facecolor = self._empty_color
facecolors.append(facecolor)
self._polygons_collection.set_facecolors(facecolors)
self._canvas.draw()
self._id_text.SetLabelMarkup(
'<b>Compass id:</b> %d' % m.compass_id
)
self._completion_pct_text.SetLabelMarkup(
'<b>Completion:</b> %d%%' % m.completion_pct
)
self.update_status_from_mavlink(m)
_legend_panel = None
@staticmethod
def legend_panel(*k, **kw):
if MagcalPanel._legend_panel:
return MagcalPanel._legend_panel
p = MagcalPanel._legend_panel = wx.Panel(*k, **kw)
sizer = wx.BoxSizer(wx.HORIZONTAL)
p.SetSizer(sizer)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._empty_color)
sizer.Add(marker, flag=wx.ALIGN_CENTER)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections not hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._filled_color)
sizer.Add(marker, border=10, flag=wx.ALIGN_CENTER | wx.LEFT)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
return p
class MagcalFrame(wx.Frame):
def __init__(self, conn):
super(MagcalFrame, self).__init__(
None,
wx.ID_ANY,
title='Magcal Graph',
)
self.SetMinSize((300, 300))
self._conn = conn
self._main_panel = wx.ScrolledWindow(self, wx.ID_ANY)
self._main_panel.SetScrollbars(1, 1, 1, 1)
self._magcal_panels = {}
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._main_panel.SetSizer(self._sizer)
idle_text = wx.StaticText(self._main_panel, wx.ID_ANY)
idle_text.SetLabelMarkup('<i>No calibration messages received yet...</i>')
idle_text.SetForegroundColour('#444444')
self._sizer.AddStretchSpacer()
self._sizer.Add(
idle_text,
proportion=0,
flag=wx.ALIGN_CENTER | wx.ALL,
border=10,
)
self._sizer.AddStretchSpacer()
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.timer_callback, self._timer)
self._timer.Start(200)
def add_compass(self, id):
if not self._magcal_panels:
self._sizer.Clear(deleteWindows=True)
self._magcal_panels_sizer = wx.BoxSizer(wx.HORIZONTAL)
self._sizer.Add(
self._magcal_panels_sizer,
proportion=1,
flag=wx.EXPAND,
)
legend = MagcalPanel.legend_panel(self._main_panel, wx.ID_ANY)
self._sizer.Add(
legend,
proportion=0,
flag=wx.ALIGN_CENTER,
)
self._magcal_panels[id] = MagcalPanel(self._main_panel, wx.ID_ANY)
self._magcal_panels_sizer.Add(
self._magcal_panels[id],
proportion=1,
border=10,
flag=wx.EXPAND | wx.ALL,
)
def timer_callback(self, evt):
close_requested = False
mavlink_msgs = {}
while self._conn.poll():
m = self._conn.recv()
if isinstance(m, str) and m == 'close':
close_requested = True
continue
if m.compass_id not in mavlink_msgs:
# Keep the last two messages so that we get the last progress
# if the last message is the calibration report.
mavlink_msgs[m.compass_id] = [None, m]
else:
l = mavlink_msgs[m.compass_id]
l[0] = l[1]
l[1] = m
if close_requested:
self._timer.Stop()
self.Destroy()
return
if not mavlink_msgs:
return
needs_fit = False
for k in mavlink_msgs:
if k not in self._magcal_panels:
self.add_compass(k)
needs_fit = True
if needs_fit:
self._sizer.Fit(self)
for k, l in mavlink_msgs.items():
for m in l:
if not m:
continue
panel = self._magcal_panels[k]
if m.get_type() == 'MAG_CAL_PROGRESS':
panel.mavlink_magcal_progress(m)
elif m.get_type() == 'MAG_CAL_REPORT':
panel.mavlink_magcal_report(m)
| gpl-3.0 |
bhargav/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
mariosky/evo-drawings | venv/lib/python2.7/site-packages/numpy/fft/fftpack.py | 35 | 42179 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft)
def ifft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
| agpl-3.0 |
heroxbd/SHTOOLS | examples/python/GlobalSpectralAnalysis/SHRealSpectralAnalysis.py | 1 | 4386 | #!/usr/bin/env python
"""
This script tests the different Spherical Harmonics Transforms on the Mars
topography data set
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
from pyshtools import shtools
# set shtools plot style:
sys.path.append(os.path.join(os.path.dirname(__file__), "../Common"))
from FigStyle import style_shtools
mpl.rcParams.update(style_shtools)
# ==== MAIN FUNCTION ====
def main():
test_RealSpectralAnalysis()
example()
def test_RealSpectralAnalysis():
# ---- input parameters ----
lmax = 5
ls = np.arange(lmax + 1)
mask = np.zeros((2, lmax + 1, lmax + 1), dtype=np.bool)
for l in np.arange(lmax + 1):
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
print('\n---- testing SHPower/DensityL, SHPowerSpectrum/Density ----')
print('generating normal distributed coefficients with variance 1...')
coeffs1 = np.random.normal(size=(2, lmax + 1, lmax + 1))
coeffs1[np.invert(mask)] = 0.
spec1 = np.array([shtools.SHPowerL(coeffs1, l) for l in ls])
spec2 = shtools.SHPowerSpectrum(coeffs1)
print('tot power computed with SHPowerL={:2.2f}'.format(np.sum(spec1)))
print('tot power computed with SHPowerSpectrum={:2.2f}'.format(
np.sum(spec2)))
spec1 = np.array([shtools.SHPowerDensityL(coeffs1, l) for l in ls])
spec2 = shtools.SHPowerSpectrumDensity(coeffs1)
print('tot power computed with SHPowerDensityL={:2.2f}'.format(
np.sum(spec1 * (2 * ls + 1))))
print('tot power computed with SHPowerSpectrumDensity={:2.2f}'.format(
np.sum(spec2 * (2 * ls + 1))))
print('\n---- testing SHCrossCrossPower/DensityL, ' +
'SHCrossCrossPowerSpectrum/Density ----')
print('generating two sets of normal distributed coefficients ' +
'with variance 1...')
coeffs2 = np.random.normal(size=(2, lmax + 1, lmax + 1))
coeffs2[np.invert(mask)] = 0.
spec1 = np.array([shtools.SHCrossPowerL(coeffs1, coeffs2, l) for l in ls])
spec2 = shtools.SHCrossPowerSpectrum(coeffs1, coeffs2)
print('tot cpower computed with SHCrossPowerL={:2.2f}'.format(
np.sum(spec1)))
print('tot cpower computed with SHCrossPowerSpectrum={:2.2f}'.format(
np.sum(spec2)))
spec1 = np.array([shtools.SHCrossPowerDensityL(coeffs1, coeffs2, l)
for l in ls])
spec2 = shtools.SHCrossPowerSpectrumDensity(coeffs1, coeffs2)
print('tot cpower computed with SHCrossPowerDensityL={:2.2f}'.format(
np.sum(spec1 * (2 * ls + 1))))
print('tot cpower computed with SHCrossPowerSpectrumDensity={:2.2f}'
.format(np.sum(spec2 * (2 * ls + 1))))
print('\n---- testing SHAdmitCorr and SHConfidence ----')
admit, dadmit, corr = shtools.SHAdmitCorr(coeffs1, coeffs2)
confidence = np.array([shtools.SHConfidence(l, corr[l]) for l in ls])
print('admittance:', admit)
print('admittance error:', dadmit)
print('correlation:', corr)
print('confidence:', confidence)
# ==== PLOT POWER SPECTRA ====
def example():
"""
example that plots the power spectrum of Mars topography data
"""
# --- input data filename ---
infile = os.path.join(os.path.dirname(__file__),
'../../ExampleDataFiles/MarsTopo719.shape')
coeffs, lmax = shtools.SHRead(infile, 719)
lmax = coeffs.shape[1] - 1
# --- plot grid ---
grid = shtools.MakeGridDH(coeffs, csphase=-1)
fig_map = plt.figure()
plt.imshow(grid)
# ---- compute spectrum ----
ls = np.arange(lmax + 1)
pspectrum = shtools.SHPowerSpectrum(coeffs)
pdensity = shtools.SHPowerSpectrumDensity(coeffs)
# ---- plot spectrum ----
fig_spectrum, ax = plt.subplots(1, 1)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('degree l')
ax.grid(True, which='both')
ax.plot(ls[1:], pspectrum[1:], label='power per degree l')
ax.plot(ls[1:], pdensity[1:], label='power per degree l and order m')
ax.legend()
fig_map.savefig('SHRtopography_mars.png')
fig_spectrum.savefig('SHRspectrum_mars.png')
print('mars topography and spectrum saved')
# plt.show()
# ==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
| bsd-3-clause |
hainm/scikit-learn | sklearn/neighbors/unsupervised.py | 106 | 4461 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
| bsd-3-clause |
AlexRobson/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
leal26/AeroPy | examples/morphing/flight_conditions/morphed/range_constant_aoa.py | 2 | 6228 | import aeropy.xfoil_module as xf
from aeropy.geometry.airfoil import CST, create_x
from aeropy.morphing.camber_2D import *
from aeropy.aero_module import air_properties, Reynolds, LLT_calculator
from scipy.interpolate import griddata, RegularGridInterpolator
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
import scipy
def aircraft_range_varying_V(f_L, f_LD, AOA):
def to_integrate(weight):
# velocity = 0.514444*108 # m/s (113 KTAS)
def calculate_velocity(AOA):
def residual(V):
CL = f_L([V, AOA])[0]
span = 11
chord_root = span/16.2
return abs(V - math.sqrt(weight/(.5*density*(span*chord_root))))
res = scipy.optimize.minimize(residual, 30, bounds = [[20, 65],])#, options={'ftol':1e-9})
return res.x[0]
velocity = calculate_velocity(AOA)
lift_to_drag = f_LD([velocity, AOA])
span = 10.9728
RPM = 1800
a = 0.3089 # (lb/hr)/BTU
b = 0.008*RPM+19.607 # lb/hr
lbhr_to_kgs = 0.000125998
BHP_to_watt = 745.7
eta = 0.85
thrust = weight/lift_to_drag
power_SI = thrust*velocity/eta
power_BHP = power_SI/BHP_to_watt
mass_flow = (a*power_BHP + b)
mass_flow_SI = mass_flow*lbhr_to_kgs
SFC = mass_flow_SI/thrust
dR = velocity/g/SFC*lift_to_drag/weight
return dR*0.001 # *0.0005399
AOA_list = []
g = 9.81 # kg/ms
fuel = 56*6.01*0.4535*g
initial_weight = 1111*g
final_weight = initial_weight-fuel
x = np.linspace(final_weight, initial_weight, 100)
y = []
for x_i in x:
y.append(to_integrate(x_i)[0])
range = scipy.integrate.simps(y, x)
return range
# ==============================================================================
# Inputs
# ==============================================================================
altitude = 10000 # ft
air_props = air_properties(altitude, unit='feet')
density = air_props['Density']
# data = pandas.read_csv('performance_grid.csv')
# psi_spars = [0.1, 0.3, 0.6, 0.8]
# c_P = 1.0
# ranges = []
# for i in range(len(data.values)):
# AC = data.values[i,0:4]
# velocity = data.values[i,-4]
# AOA = data.values[i,-5]
# cl= data.values[i,-3]
# cd = data.values[i,-2]
# CL, CD = coefficient_LLT(AC, velocity, AOA)
# data.values[i, -3] = CL
# data.values[i, -2] = CD
# data.values[i, -1] = CL/CD
# print(i, CL, CD)
# data = data.drop_duplicates()
import pickle
# f = open('wing.p', 'wb')
# pickle.dump(data, f)
# f.close()
state = 'morphed'
concepts = ['NACA0012', 'NACA4415', 'NACA641212', 'glider']
#
# plt.figure()
# for concept in concepts:
# mat = scipy.io.loadmat(state + '_' + concept)
# aoa = mat['aoa'][0]
# velocity = mat['V'][0]
# cl = mat['CL'].T
# LD_ratio = mat['lift_to_drag']
# # print(aoa)
# # print(velocity)
# # print(cl)
# f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
# f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = [20]
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_L(data_i), label = concept)
# # plt.scatter(aoas, f_L((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.ylabel('cl')
# plt.show()
# plt.figure()
# for concept in concepts:
# mat = scipy.io.loadmat(state + '_' + concept)
# aoa = mat['aoa'][0]
# velocity = mat['V'][0]
# cl = mat['CL'].T
# LD_ratio = mat['lift_to_drag']
# f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
# f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = [20]
# aoas = np.linspace(0,12,100)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_LD(data_i), label = concept)
# # plt.scatter(aoas, f_LD((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.ylabel('Lift-to-drag ratio')
# plt.show()
range_data = {}
plt.figure()
for concept in concepts:
data = np.loadtxt('./'+state + '_' + concept + '.txt')
aoa = np.unique(data[:,0])
velocity = np.unique(data[:,1])
cl = data[:,2].reshape([200,200])
LD_ratio = data[:,3].reshape([200,200])
f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = np.linspace(20, 65, 7)
# plt.figure()
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_L(data_i), label = velocity[i])
# # plt.scatter(aoas, f_L((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.show()
# plt.figure()
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_LD(data_i), label = velocity[i])
# # plt.scatter(aoas, f_LD((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.show()
ranges = []
# velocity = np.linspace(20, 60, 5)
for i in range(len(aoa)):
range_i = aircraft_range_varying_V(f_L, f_LD, aoa[i])
# plt.plot(np.arange(len(AOA_i)), AOA_i, label=velocity[i])
# plt.scatter(np.arange(len(AOA_i)),AOA_i)
print(i, aoa[i], range_i)
ranges.append(range_i)
# print(velocity[36])
range_data[concept] = ranges
plt.plot(aoa, ranges, lw=2, label=concept)
f = open('ranges_aoa.p', 'wb')
pickle.dump(range_data, f)
f.close()
# plt.xlim(min(velocity), max(velocity))
# plt.ylim(min(ranges), max(ranges))
plt.xlabel('Angle of Attack')
plt.ylabel('Range (km)')
plt.legend()
plt.show()
| mit |
timsnyder/bokeh | sphinx/source/docs/user_guide/examples/categorical_heatmap_unemployment.py | 11 | 1638 | import pandas as pd
from bokeh.io import output_file, show
from bokeh.models import BasicTicker, ColorBar, ColumnDataSource, LinearColorMapper, PrintfTickFormatter
from bokeh.plotting import figure
from bokeh.sampledata.unemployment1948 import data
from bokeh.transform import transform
output_file("unemploymemt.html")
data.Year = data.Year.astype(str)
data = data.set_index('Year')
data.drop('Annual', axis=1, inplace=True)
data.columns.name = 'Month'
# reshape to 1D array or rates with a month and year for each row.
df = pd.DataFrame(data.stack(), columns=['rate']).reset_index()
source = ColumnDataSource(df)
# this is the colormap from the original NYTimes plot
colors = ["#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce", "#ddb7b1", "#cc7878", "#933b41", "#550b1d"]
mapper = LinearColorMapper(palette=colors, low=df.rate.min(), high=df.rate.max())
p = figure(plot_width=800, plot_height=300, title="US Unemployment 1948—2016",
x_range=list(data.index), y_range=list(reversed(data.columns)),
toolbar_location=None, tools="", x_axis_location="above")
p.rect(x="Year", y="Month", width=1, height=1, source=source,
line_color=None, fill_color=transform('rate', mapper))
color_bar = ColorBar(color_mapper=mapper, location=(0, 0),
ticker=BasicTicker(desired_num_ticks=len(colors)),
formatter=PrintfTickFormatter(format="%d%%"))
p.add_layout(color_bar, 'right')
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = 1.0
show(p)
| bsd-3-clause |
tody411/InverseToon | inversetoon/core/light_estimation/light_estimation_lumo.py | 1 | 1797 | # -*- coding: utf-8 -*-
## @package inversetoon.core.light_estimation.light_estimation_lumo
#
# inversetoon.core.light_estimation.light_estimation_lumo utility package.
# @author tody
# @date 2015/10/04
import numpy as np
import matplotlib.pyplot as plt
from inversetoon.core.lumo import lumoNormal
from inversetoon.core.light_estimation.light_estimation_common import testToon
from inversetoon.np.norm import normalizeVector
def foreGroundSamples(A_8U, num_xs=30, num_ys=30, alpha=100):
h, w = A_8U.shape[:2]
xs = np.linspace(0, w - 1, num_xs)
ys = np.linspace(0, h - 1, num_ys)
xs = np.int32(xs)
ys = np.int32(ys)
coords = [(x, y) for y in ys for x in xs if A_8U[y, x] > alpha]
coords = np.array(coords)
return coords
def luminanceClusters(Is, num_bins=16):
I_min = np.min(Is)
I_max = np.max(Is)
I_ids = np.int32((Is - I_min) * (num_bins - 1) / (I_max - I_min))
return I_ids
def estimateLightByCluster(N_lumo, I_32F, p_samples):
xs, ys = p_samples.T
Is = I_32F[ys, xs]
Ns = N_lumo[ys, xs, :]
I_ids = luminanceClusters(Is)
I_id_max = np.max(I_ids)
Ns_bright = Ns[I_ids == I_id_max, :]
L = np.sum(Ns_bright, axis=0)
L = normalizeVector(L)
print L
return L
def estimateLightDir(input_data):
N_sil = input_data["N_sil"]
I_sil = input_data["I_sil"]
cvs_sil = input_data["cvs_sil"]
I_32F = input_data["I"]
A_8U = input_data["A"]
p_samples = foreGroundSamples(A_8U)
plt.imshow(A_8U)
plt.scatter(p_samples[:, 0], p_samples[:, 1])
plt.show()
N_lumo = lumoNormal(A_8U)
L = estimateLightByCluster(N_lumo, I_32F, p_samples)
output_data = {"L": L}
return output_data
if __name__ == '__main__':
testToon("Lumo", estimateLightDir) | mit |
IntelLabs/hpat | examples/series_setitem_int.py | 1 | 1794 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import pandas as pd
from numba import njit
@njit
def series_setitem():
value = 0
series = pd.Series(np.arange(5, 0, -1)) # Series of 5, 4, 3, 2, 1
series[0] = value
return series # result Series of 0, 4, 3, 2, 1
print(series_setitem())
| bsd-2-clause |
JesseLivezey/pymc3 | pymc3/plots.py | 3 | 15589 | import numpy as np
from scipy.stats import kde
from .stats import *
from numpy.linalg import LinAlgError
__all__ = ['traceplot', 'kdeplot', 'kde2plot', 'forestplot', 'autocorrplot']
def traceplot(trace, vars=None, figsize=None,
lines=None, combined=False, grid=True,
alpha=0.35, ax=None):
"""Plot samples histograms and values
Parameters
----------
trace : result of MCMC run
vars : list of variable names
Variables to be plotted, if None all variable are plotted
figsize : figure size tuple
If None, size is (12, num of variables * 2) inch
lines : dict
Dictionary of variable name / value to be overplotted as vertical
lines to the posteriors and horizontal lines on sample values
e.g. mean of posteriors, true values of a simulation
combined : bool
Flag for combining multiple chains into a single chain. If False
(default), chains will be plotted separately.
grid : bool
Flag for adding gridlines to histogram. Defaults to True.
ax : axes
Matplotlib axes. Defaults to None.
Returns
-------
ax : matplotlib axes
"""
import matplotlib.pyplot as plt
if vars is None:
vars = trace.varnames
n = len(vars)
if figsize is None:
figsize = (12, n*2)
if ax is None:
fig, ax = plt.subplots(n, 2, squeeze=False, figsize=figsize)
elif ax.shape != (n,2):
print('traceplot requires n*2 subplots')
return None
for i, v in enumerate(vars):
for d in trace.get_values(v, combine=combined, squeeze=False):
d = np.squeeze(d)
d = make_2d(d)
if d.dtype.kind == 'i':
histplot_op(ax[i, 0], d, alpha=alpha)
else:
kdeplot_op(ax[i, 0], d)
ax[i, 0].set_title(str(v))
ax[i, 0].grid(grid)
ax[i, 1].set_title(str(v))
ax[i, 1].plot(d, alpha=alpha)
ax[i, 0].set_ylabel("Frequency")
ax[i, 1].set_ylabel("Sample value")
if lines:
try:
ax[i, 0].axvline(x=lines[v], color="r", lw=1.5)
ax[i, 1].axhline(y=lines[v], color="r", lw=1.5, alpha=alpha)
except KeyError:
pass
plt.tight_layout()
return ax
def histplot_op(ax, data, alpha=.35):
for i in range(data.shape[1]):
d = data[:, i]
mind = np.min(d)
maxd = np.max(d)
step = max((maxd-mind)//100, 1)
ax.hist(d, bins=range(mind, maxd + 2, step), alpha=alpha, align='left')
ax.set_xlim(mind - .5, maxd + .5)
def kdeplot_op(ax, data):
errored = []
for i in range(data.shape[1]):
d = data[:, i]
try:
density = kde.gaussian_kde(d)
l = np.min(d)
u = np.max(d)
x = np.linspace(0, 1, 100) * (u - l) + l
ax.plot(x, density(x))
except LinAlgError:
errored.append(i)
if errored:
ax.text(.27,.47, 'WARNING: KDE plot failed for: ' + str(errored), style='italic',
bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})
def make_2d(a):
"""Ravel the dimensions after the first.
"""
a = np.atleast_2d(a.T).T
#flatten out dimensions beyond the first
n = a.shape[0]
newshape = np.product(a.shape[1:]).astype(int)
a = a.reshape((n, newshape), order='F')
return a
def kde2plot_op(ax, x, y, grid=200):
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
grid = grid * 1j
X, Y = np.mgrid[xmin:xmax:grid, ymin:ymax:grid]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = kde.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
extent=[xmin, xmax, ymin, ymax])
def kdeplot(data, ax=None):
if ax is None:
f, ax = subplots(1, 1, squeeze=True)
kdeplot_op(ax, data)
return ax
def kde2plot(x, y, grid=200, ax=None):
if ax is None:
f, ax = subplots(1, 1, squeeze=True)
kde2plot_op(ax, x, y, grid)
return ax
def autocorrplot(trace, vars=None, max_lag=100, burn=0, ax=None):
"""Bar plot of the autocorrelation function for a trace
Parameters
----------
trace : result of MCMC run
vars : list of variable names
Variables to be plotted, if None all variable are plotted
max_lag : int
Maximum lag to calculate autocorrelation. Defaults to 100.
burn : int
Number of samples to discard from the beginning of the trace.
Defaults to 0.
ax : axes
Matplotlib axes. Defaults to None.
Returns
-------
ax : matplotlib axes
"""
import matplotlib.pyplot as plt
if vars is None:
vars = trace.varnames
else:
vars = [str(var) for var in vars]
chains = trace.nchains
fig, ax = plt.subplots(len(vars), chains, squeeze=False)
max_lag = min(len(trace) - 1, max_lag)
for i, v in enumerate(vars):
for j in range(chains):
d = np.squeeze(trace.get_values(v, chains=[j], burn=burn,
combine=False))
ax[i, j].acorr(d, detrend=plt.mlab.detrend_mean, maxlags=max_lag)
if not j:
ax[i, j].set_ylabel("correlation")
ax[i, j].set_xlabel("lag")
if chains > 1:
ax[i, j].set_title("chain {0}".format(j+1))
return (fig, ax)
def var_str(name, shape):
"""Return a sequence of strings naming the element of the tallyable object.
This is a support function for forestplot.
:Example:
>>> var_str('theta', (4,))
['theta[1]', 'theta[2]', 'theta[3]', 'theta[4]']
"""
size = np.prod(shape)
ind = (np.indices(shape) + 1).reshape(-1, size)
names = ['[' + ','.join(map(str, i)) + ']' for i in zip(*ind)]
# if len(name)>12:
# name = '\n'.join(name.split('_'))
# name += '\n'
names[0] = '%s %s' % (name, names[0])
return names
def forestplot(trace_obj, vars=None, alpha=0.05, quartiles=True, rhat=True,
main=None, xtitle=None, xrange=None, ylabels=None,
chain_spacing=0.05, vline=0, gs=None):
""" Forest plot (model summary plot)
Generates a "forest plot" of 100*(1-alpha)% credible intervals for either
the set of variables in a given model, or a specified set of nodes.
:Arguments:
trace_obj: NpTrace or MultiTrace object
Trace(s) from an MCMC sample.
vars: list
List of variables to plot (defaults to None, which results in all
variables plotted).
alpha (optional): float
Alpha value for (1-alpha)*100% credible intervals (defaults to
0.05).
quartiles (optional): bool
Flag for plotting the interquartile range, in addition to the
(1-alpha)*100% intervals (defaults to True).
rhat (optional): bool
Flag for plotting Gelman-Rubin statistics. Requires 2 or more
chains (defaults to True).
main (optional): string
Title for main plot. Passing False results in titles being
suppressed; passing None (default) results in default titles.
xtitle (optional): string
Label for x-axis. Defaults to no label
xrange (optional): list or tuple
Range for x-axis. Defaults to matplotlib's best guess.
ylabels (optional): list or array
User-defined labels for each variable. If not provided, the node
__name__ attributes are used.
chain_spacing (optional): float
Plot spacing between chains (defaults to 0.05).
vline (optional): numeric
Location of vertical reference line (defaults to 0).
gs : GridSpec
Matplotlib GridSpec object. Defaults to None.
Returns
-------
gs : matplotlib GridSpec
"""
import matplotlib.pyplot as plt
from matplotlib import gridspec
# Quantiles to be calculated
qlist = [100 * alpha / 2, 50, 100 * (1 - alpha / 2)]
if quartiles:
qlist = [100 * alpha / 2, 25, 50, 75, 100 * (1 - alpha / 2)]
# Range for x-axis
plotrange = None
# Number of chains
chains = None
# Subplots
interval_plot = None
rhat_plot = None
nchains = trace_obj.nchains
if nchains > 1:
from .diagnostics import gelman_rubin
R = gelman_rubin(trace_obj)
if vars is not None:
R = {v: R[v] for v in vars}
else:
# Can't calculate Gelman-Rubin with a single trace
rhat = False
if vars is None:
vars = trace_obj.varnames
# Empty list for y-axis labels
labels = []
if gs is None:
# Initialize plot
if rhat and nchains > 1:
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
else:
gs = gridspec.GridSpec(1, 1)
# Subplot for confidence intervals
interval_plot = plt.subplot(gs[0])
trace_quantiles = quantiles(trace_obj, qlist, squeeze=False)
hpd_intervals = hpd(trace_obj, alpha, squeeze=False)
for j, chain in enumerate(trace_obj.chains):
# Counter for current variable
var = 1
for varname in vars:
var_quantiles = trace_quantiles[chain][varname]
quants = [var_quantiles[v] for v in qlist]
var_hpd = hpd_intervals[chain][varname].T
# Substitute HPD interval for quantile
quants[0] = var_hpd[0].T
quants[-1] = var_hpd[1].T
# Ensure x-axis contains range of current interval
if plotrange:
plotrange = [min(
plotrange[0],
np.min(quants)),
max(plotrange[1],
np.max(quants))]
else:
plotrange = [np.min(quants), np.max(quants)]
# Number of elements in current variable
value = trace_obj.get_values(varname, chains=[chain])[0]
k = np.size(value)
# Append variable name(s) to list
if not j:
if k > 1:
names = var_str(varname, np.shape(value))
labels += names
else:
labels.append(varname)
# labels.append('\n'.join(varname.split('_')))
# Add spacing for each chain, if more than one
e = [0] + [(chain_spacing * ((i + 2) / 2)) *
(-1) ** i for i in range(nchains - 1)]
# Deal with multivariate nodes
if k > 1:
for i, q in enumerate(np.transpose(quants).squeeze()):
# Y coordinate with jitter
y = -(var + i) + e[j]
if quartiles:
# Plot median
plt.plot(q[2], y, 'bo', markersize=4)
# Plot quartile interval
plt.errorbar(
x=(q[1],
q[3]),
y=(y,
y),
linewidth=2,
color='b')
else:
# Plot median
plt.plot(q[1], y, 'bo', markersize=4)
# Plot outer interval
plt.errorbar(
x=(q[0],
q[-1]),
y=(y,
y),
linewidth=1,
color='b')
else:
# Y coordinate with jitter
y = -var + e[j]
if quartiles:
# Plot median
plt.plot(quants[2], y, 'bo', markersize=4)
# Plot quartile interval
plt.errorbar(
x=(quants[1],
quants[3]),
y=(y,
y),
linewidth=2,
color='b')
else:
# Plot median
plt.plot(quants[1], y, 'bo', markersize=4)
# Plot outer interval
plt.errorbar(
x=(quants[0],
quants[-1]),
y=(y,
y),
linewidth=1,
color='b')
# Increment index
var += k
labels = ylabels if ylabels is not None else labels
# Update margins
left_margin = np.max([len(x) for x in labels]) * 0.015
gs.update(left=left_margin, right=0.95, top=0.9, bottom=0.05)
# Define range of y-axis
plt.ylim(-var + 0.5, -0.5)
datarange = plotrange[1] - plotrange[0]
plt.xlim(plotrange[0] - 0.05 * datarange, plotrange[1] + 0.05 * datarange)
# Add variable labels
plt.yticks([-(l + 1) for l in range(len(labels))], labels)
# Add title
if main is not False:
plot_title = main or str(int((
1 - alpha) * 100)) + "% Credible Intervals"
plt.title(plot_title)
# Add x-axis label
if xtitle is not None:
plt.xlabel(xtitle)
# Constrain to specified range
if xrange is not None:
plt.xlim(*xrange)
# Remove ticklines on y-axes
for ticks in interval_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in interval_plot.spines.items():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
# Reference line
plt.axvline(vline, color='k', linestyle='--')
# Genenerate Gelman-Rubin plot
if rhat and nchains > 1:
# If there are multiple chains, calculate R-hat
rhat_plot = plt.subplot(gs[1])
if main is not False:
plt.title("R-hat")
# Set x range
plt.xlim(0.9, 2.1)
# X axis labels
plt.xticks((1.0, 1.5, 2.0), ("1", "1.5", "2+"))
plt.yticks([-(l + 1) for l in range(len(labels))], "")
i = 1
for varname in vars:
chain = trace_obj.chains[0]
value = trace_obj.get_values(varname, chains=[chain])[0]
k = np.size(value)
if k > 1:
plt.plot([min(r, 2) for r in R[varname]], [-(j + i)
for j in range(k)], 'bo', markersize=4)
else:
plt.plot(min(R[varname], 2), -i, 'bo', markersize=4)
i += k
# Define range of y-axis
plt.ylim(-i + 0.5, -0.5)
# Remove ticklines on y-axes
for ticks in rhat_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in rhat_plot.spines.items():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
return gs
| apache-2.0 |
LeeKamentsky/CellProfiler | cellprofiler/modules/tests/test_expandorshrinkobjects.py | 2 | 16125 | '''test_expandorshrink - test the ExpandOrShrink module
CellProfiler is distributed under the GNU General Public License.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
'''
import base64
from matplotlib.image import pil_to_array
import numpy as np
import os
import PIL.Image as PILImage
import scipy.ndimage
from StringIO import StringIO
import unittest
import zlib
from cellprofiler.preferences import set_headless
set_headless()
import cellprofiler.pipeline as cpp
import cellprofiler.cpmodule as cpm
import cellprofiler.cpimage as cpi
import cellprofiler.measurements as cpmeas
import cellprofiler.objects as cpo
import cellprofiler.workspace as cpw
import cellprofiler.cpmath.cpmorphology as morph
from cellprofiler.cpmath.outline import outline
import cellprofiler.modules.expandorshrinkobjects as E
INPUT_NAME = "input"
OUTPUT_NAME = "output"
OUTLINES_NAME = "outlines"
class TestExpandOrShrinkObjects(unittest.TestCase):
def test_01_01_load_matlab(self):
'''Load a matlab pipeline with ExpandOrShrink modules'''
data = ('eJwB+QMG/E1BVExBQiA1LjAgTUFULWZpbGUsIFBsYXRmb3JtOiBQQ1dJTiwg'
'Q3JlYXRlZCBvbjogV2VkIEp1bCAyMiAwODozNDoyNSAyMDA5ICAgICAgICAg'
'ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAAAUlN'
'DwAAAHEDAAB4nOxZ0W7aMBQ1FGi7aahbtWmPaE97aLuw7WGP2dZqQ9qgGlW1'
'PZrEgNtgo8SpYF/Xz9jnzCYJBC+djUkBqUSyrGt8jk+uc31tUwUA/HkFQIXX'
'e7wUQfSUY7uQKsJuI8Yw6QVlUAIv4/ZbXi6hj2HHQ5fQC1EApk/S3iBdejEe'
'Tn/6Tt3QQ004SHfmTzMcdJAftLoJMP75HI+Q18a/EZh/km4/0A0OMCUxPuaX'
'W6fjUiaNW+Xl5+HMDwXJD7u8HKbaRX8bzPqXMvx2kOp/EJcLNGLHZyPosNoA'
'MqcveD4oePYkHmG3fNz7xF2tg69I+MrEz46HcPQeDx1vK/CPJbywP/chIch7'
'e2zVJ+2r0FGV8MJu9/2QXCOS5vmq4Hkm8Qj7bDSExEVuizgo4Urey4Qv0ZXF'
'Z/Keib5F/PVE4hH2Kb7BKRqteVs2/soSXth16+i9pTn+roQX9rmPB9Afb/FA'
'/R08kvDCPqU1QlktDOKEIngsBc/OHM8O+MVziGnc8/jA5Np83YjiwRy/SePb'
'Cnxe81ec4ymCJjWbd76X0cIV5nAFUNfUaYrLS2eyLtkK3H3HVV7jq/LXC4lH'
'2PP5phUyDxOxZcxT10PlydpPtVgQ1r54tAO9KY9unZdu07jcNJ1yPFlH9YV0'
'LuqHd5q40hyuBKwTq74O/6l49iUeYTcIQyTAbLyA3qRWrT9PpfGEjYkrdqoh'
'9Gp8o9ObnlbXod80r5ro/Bgyyg/G2LkHnfJ3ewLWo9NkvJ4Px4EDPZTiMc2r'
'ujrXnbfz0mn6fa5ap+xP8gauZJ519d3lR13+2/3/36+l18Fl1/3JotnzaTjU'
'58nal9DOFXLYjGjLo+ZJapO8l+Kr8RyIhlu+jeWzFXxZ99+zuIzoTNYp03pT'
'9KrmIes+lUYn0H8mYsu3PJ+t4Nt+x9t6E+u840C3fl28ex8n7PR9ven+7RuF'
'biN18NV53+cSj7AbLiIMd8fi/4L0mU1HV1XiE3Z0L9jyF7m/3mSevwAAAP//'
'E2BgYOADYteKgsS8FP+i4IyizLxsiJgDELMBMQcQszBAACuUzwjVxwgVx2eO'
'BZI5bFjMYUcyhwnKFxJmAwGwfgMC7mBkQHYHI4MhA/n2svIwgQCG/wnpZ4Ha'
'GSPjL7MaDiFiFYwIcxgJmANSr8GAWz0MjKonTj0AsKrvYTgV5Hg=')
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(zlib.decompress(base64.b64decode(data))))
# Module #3: ExpandOrShrink revision - 2
# object_name - Nuclei
# output_object_name - ShrunkenNuclei
# object_choice - Primary
# operation - Shrink
# iterations - Inf
# outlines - Do not use
#
# Module #4: ExpandOrShrink revision - 2
# object_name - Nuclei
# output_object_name - ExpandedOnceNuclei
# object_choice - Primary
# operation - Expand
# iterations - 1
# outlines - ExpandedNucleiOutlines
#
# Module #5: ExpandOrShrink revision - 2
# object_name - Nuclei
# output_object_name - ShrunkenOnceNuclei
# object_choice - Primary
# operation - Shrink
# iterations - 1
# outlines - Do not use
#
# Module #6: ExpandOrShrink revision - 2
# object_name - Nuclei
# output_object_name - ExpandedNuclei
# object_choice - Primary
# operation - Expand
# iterations - Inf
# outlines Do not use
#
# Module #7: ExpandOrShrink revision - 2
# object_name - Nuclei
# output_object_name - DividedNuclei
# object_choice - Primary
# operation - Shrink
# iterations - 0
# outlines Do not use
modules = pipeline.modules()
self.assertEqual(len(modules),7)
for module, output_object_name, operation, has_outline in \
((modules[2], "ShrunkenNuclei", E.O_SHRINK_INF, False),
(modules[3], "ExpandedOnceNuclei", E.O_EXPAND, True),
(modules[4], "ShrunkenOnceNuclei", E.O_SHRINK, False),
(modules[5], "ExpandedNuclei", E.O_EXPAND_INF, False),
(modules[6], "DividedNuclei", E.O_DIVIDE, False)):
self.assertEqual(module.output_object_name, output_object_name)
self.assertEqual(module.operation, operation)
self.assertEqual(module.wants_outlines.value, has_outline)
if has_outline:
self.assertEqual(module.outlines_name,"ExpandedNucleiOutlines")
for module in modules[4:6]:
self.assertEqual(module.iterations, 1)
def test_01_02_load_v1(self):
'''Load ExpandOrShrink modules, v1'''
data = ('eJztW91u2zYUlhMnbVasSHezYr3hZbPFguS2aBMUqb24W73VjlEbDYqiP7RF'
'x2xpUpCoJF5RoJd7lD3GHmeXe4SJshTJnFvJju1YqQQI8jnidw6/Qx6KEs1a'
'ufW0/DO4p2qgVm4Vupgg0CCQd5nV3wWUb4N9C0GODMDoLjh0r785BBSLQNvZ'
'1e/s6hooatqOMt2Rq9auu5c/7yvKunu96p4r/q01X85FTiE3EeeYHtlrSl65'
'6ev/ds/n0MKwTdBzSBxkhy4CfZV2WWtgnt2qMcMhqA770cLuUXf6bWTZB90A'
'6N9u4FNEmvgPJFEIij1Dx9jGjPp4376sPfPLuORXxOHqD2EcclIc8u55K6IX'
'5Z8oYfn8mLjdiJTf9GVMDXyMDQcSgPvw6KwWwp4WY291xN6qUqmXPdyDGNy6'
'VI91L84dgvDQbykGf13CC7nZsxz6HtFJ7GxKdsTZQqe88PgUdjjoQ97ppYnP'
't5IdIVdE6yIjMJOoXXMjdnLKHR8XF4c1yb+QdW37rubj38bgVQkv5MenJqTu'
'YNN+hzrcBu0BgMA2UQd3sTv2UC+PAOsCU+SjnczPj5IfIZcNA5jQ4thNBC8j'
'3EEFEEyR6xPxE4RoUIchLkl7fCP5EXKFAco4cGwU2pk0z164WXqedpwUpyfE'
'rYzgVpQ6Sxcubvz8XhltTyFXUBc6hIOqGDxBBVtuF2HWYK7tOs94rUu44Ahw'
'G/41Sf+/JsVLyAfcdsCvhLUhSWwnaR7Nys682k3G6ap2rvjPAjcJv2nmF5qq'
'ece27v+Ycf1nyXvavMqP4PKCs34R/Eox9RyXj/s9SCkixYKmJ7azIdkRcpVy'
'RG3MBzPgsah+HcxXLyvfWT3HdW05+cl5V2cUTROX+zOs5yS4TzH1/F0Z7XdC'
'fn37UeOheCFHe+pPW2+EdIgIecZO9l6WC41XW4FmnxGnT/deaoWdVx/07eLH'
'YeEmdpGecitxnJM87xYRr15MPR9I8RKy4PwCQcsPxN2PWwWhqjHKe76u6Osq'
'cBBq5jWfWHTeLGJekPHL+C0jP20J5teLft6lmd+s30+Xjd/Xl3/3Lrye835/'
'ap0w0CHQtv0v52ni+ySG77j3+UOEj3pi+edYLHTQDorYSwvvUgzvcfO6X5iF'
'jizmUOPi6v3XzRCXk3Dj1oMWGR9v8UgEyExuZ1w+DT+yh4ZmaWeReRLxDzA1'
'kJkie2nJ4wyX4TLc+XFx48d3yuj4IWTmcLE6+r8BZB720hLHDJfhLiOuFMEl'
'/R9NOB8cZnOa+Ga4DHcZcdlzOcNluAy3bPOHpN+T0sI3w2W4DJfhvnbcP7kQ'
'J69XCDm6ni3Kv434Sfq//Q4ixLSY2KdkqX1vM42tEgaN4W4W9an7sxrZ2CL8'
'mDF+SpKf0uf8YANRjrsD03K9OZz1IccdteprG662HGiF316M36Lkt/g5v8jb'
'FMEsu2dh+l4d7pE4sJqeGPJctL9oP9kY4y/a3iuutHnrypUv9S9FGe1XYX/7'
'99E0/lZXczmBi+5juRaDyyuj/dzr18pk/fr2F8oHHJe1/H/d4QwI')
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(zlib.decompress(base64.b64decode(data))))
self.assertEqual(len(pipeline.modules()), 4)
module = pipeline.modules()[2]
self.assertTrue(isinstance(module,E.ExpandOrShrink))
self.assertEqual(module.object_name, "Nuclei")
self.assertEqual(module.output_object_name, "ShrunkenNuclei")
self.assertEqual(module.operation, E.O_EXPAND)
self.assertEqual(module.iterations, 3)
self.assertFalse(module.wants_outlines.value)
module = pipeline.modules()[3]
self.assertTrue(isinstance(module,E.ExpandOrShrink))
self.assertEqual(module.object_name, "ShrunkenNuclei")
self.assertEqual(module.output_object_name, "DividedNuclei")
self.assertEqual(module.operation, E.O_DIVIDE)
def make_workspace(self,
labels,
operation,
iterations = 1,
wants_outlines = False,
wants_fill_holes = False):
object_set = cpo.ObjectSet()
objects = cpo.Objects()
objects.segmented = labels
object_set.add_objects(objects, INPUT_NAME)
module = E.ExpandOrShrink()
module.object_name.value = INPUT_NAME
module.output_object_name.value = OUTPUT_NAME
module.outlines_name.value = OUTLINES_NAME
module.operation.value = operation
module.iterations.value = iterations
module.wants_outlines.value = wants_outlines
module.wants_fill_holes.value = wants_fill_holes
module.module_num = 1
pipeline = cpp.Pipeline()
pipeline.add_module(module)
image_set_list = cpi.ImageSetList()
workspace = cpw.Workspace(pipeline,
module,
image_set_list.get_image_set(0),
object_set,
cpmeas.Measurements(),
image_set_list)
return workspace, module
def test_02_01_expand(self):
'''Expand an object once'''
labels = np.zeros((10,10), int)
labels[4,4] = 1
expected = np.zeros((10,10), int)
expected[np.array([4,3,4,5,4],int),np.array([3,4,4,4,5],int)] = 1
workspace, module = self.make_workspace(labels, E.O_EXPAND)
module.run(workspace)
objects = workspace.object_set.get_objects(OUTPUT_NAME)
self.assertTrue(np.all(objects.segmented == expected))
self.assertTrue(OUTLINES_NAME not in workspace.get_outline_names())
m = workspace.measurements
self.assertTrue(isinstance(m, cpmeas.Measurements))
count = m.get_current_image_measurement("Count_" + OUTPUT_NAME)
if not np.isscalar(count):
count = count[0]
self.assertEqual(count, 1)
location_x = m.get_current_measurement(OUTPUT_NAME, "Location_Center_X")
self.assertEqual(len(location_x), 1)
self.assertEqual(location_x[0], 4)
location_y = m.get_current_measurement(OUTPUT_NAME, "Location_Center_Y")
self.assertEqual(len(location_y), 1)
self.assertEqual(location_y[0], 4)
def test_02_02_expand_twice(self):
'''Expand an object "twice"'''
labels = np.zeros((10,10), int)
labels[4,4] = 1
i,j = np.mgrid[0:10,0:10]-4
expected = (i**2 + j**2 <=4).astype(int)
workspace, module = self.make_workspace(labels, E.O_EXPAND, 2)
module.run(workspace)
objects = workspace.object_set.get_objects(OUTPUT_NAME)
self.assertTrue(np.all(objects.segmented == expected))
def test_02_03_expand_two(self):
'''Expand two objects once'''
labels = np.zeros((10,10), int)
labels[2,3] = 1
labels[6,5] = 2
i,j = np.mgrid[0:10,0:10]
expected = (((i-2)**2 + (j-3)**2 <=1).astype(int) +
((i-6)**2 + (j-5)**2 <=1).astype(int) * 2)
workspace, module = self.make_workspace(labels, E.O_EXPAND, 1)
module.run(workspace)
objects = workspace.object_set.get_objects(OUTPUT_NAME)
self.assertTrue(np.all(objects.segmented == expected))
def test_03_01_expand_inf(self):
'''Expand two objects infinitely'''
labels = np.zeros((10,10), int)
labels[2,3] = 1
labels[6,5] = 2
i,j = np.mgrid[0:10,0:10]
distance = (((i-2)**2 + (j-3)**2) -
((i-6)**2 + (j-5)**2))
workspace, module = self.make_workspace(labels, E.O_EXPAND_INF)
module.run(workspace)
objects = workspace.object_set.get_objects(OUTPUT_NAME)
self.assertTrue(np.all(objects.segmented[distance < 0]==1))
self.assertTrue(np.all(objects.segmented[distance > 0]==2))
def test_04_01_divide(self):
'''Divide two touching objects'''
labels = np.ones((10,10), int)
labels[5:,:] = 2
expected = labels.copy()
expected[4:6,:] = 0
workspace, module = self.make_workspace(labels, E.O_DIVIDE)
module.run(workspace)
objects = workspace.object_set.get_objects(OUTPUT_NAME)
self.assertTrue(np.all(objects.segmented == expected))
def test_04_02_dont_divide(self):
'''Don't divide an object that would disappear'''
labels = np.ones((10,10), int)
labels[9,9] = 2
expected = labels.copy()
expected[8,9] = 0
expected[8,8] = 0
expected[9,8] = 0
workspace, module = self.make_workspace(labels, E.O_DIVIDE)
module.run(workspace)
objects = workspace.object_set.get_objects(OUTPUT_NAME)
self.assertTrue(np.all(objects.segmented == expected))
def test_05_01_shrink(self):
'''Shrink once'''
labels = np.zeros((10,10), int)
labels[1:9,1:9] = 1
expected = morph.thin(labels, iterations = 1)
workspace, module = self.make_workspace(labels, E.O_SHRINK, 1)
module.run(workspace)
objects = workspace.object_set.get_objects(OUTPUT_NAME)
self.assertTrue(np.all(objects.segmented == expected))
def test_06_01_shrink_inf(self):
'''Shrink infinitely'''
labels = np.zeros((10,10), int)
labels[1:8,1:8] = 1
expected = np.zeros((10,10),int)
expected[4,4] = 1
workspace, module = self.make_workspace(labels, E.O_SHRINK_INF)
module.run(workspace)
objects = workspace.object_set.get_objects(OUTPUT_NAME)
self.assertTrue(np.all(objects.segmented == expected))
def test_06_02_shrink_inf_fill_holes(self):
'''Shrink infinitely after filling a hole'''
labels = np.zeros((10,10), int)
labels[1:8,1:8] = 1
labels[4,4] = 0
expected = np.zeros((10,10),int)
expected[4,4] = 1
# Test failure without filling the hole
workspace, module = self.make_workspace(labels, E.O_SHRINK_INF)
module.run(workspace)
objects = workspace.object_set.get_objects(OUTPUT_NAME)
self.assertFalse(np.all(objects.segmented == expected))
# Test success after filling the hole
workspace, module = self.make_workspace(labels, E.O_SHRINK_INF,
wants_fill_holes = True)
module.run(workspace)
objects = workspace.object_set.get_objects(OUTPUT_NAME)
self.assertTrue(np.all(objects.segmented == expected))
def test_07_01_outlines(self):
'''Create an outline of the resulting objects'''
labels = np.zeros((10,10), int)
labels[4,4] = 1
i,j = np.mgrid[0:10,0:10]-4
expected = (i**2 + j**2 <=4).astype(int)
expected_outlines = outline(expected)
workspace, module = self.make_workspace(labels, E.O_EXPAND, 2,
wants_outlines = True)
module.run(workspace)
objects = workspace.object_set.get_objects(OUTPUT_NAME)
self.assertTrue(np.all(objects.segmented == expected))
self.assertTrue(OUTLINES_NAME in workspace.image_set.get_names())
outlines = workspace.image_set.get_image(OUTLINES_NAME).pixel_data
self.assertTrue(np.all(outlines == expected_outlines))
| gpl-2.0 |
aiguofer/bokeh | examples/app/stocks/main.py | 11 | 4555 | ''' Create a simple stocks correlation dashboard.
Choose stocks to compare in the drop down widgets, and make selections
on the plots to update the summary and histograms accordingly.
.. note::
Running this example requires downloading sample data. See
the included `README`_ for more information.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve stocks
at your command prompt. Then navigate to the URL
http://localhost:5006/stocks
.. _README: https://github.com/bokeh/bokeh/blob/master/examples/app/stocks/README.md
'''
try:
from functools import lru_cache
except ImportError:
# Python 2 does stdlib does not have lru_cache so let's just
# create a dummy decorator to avoid crashing
print ("WARNING: Cache for this example is available on Python 3 only.")
def lru_cache():
def dec(f):
def _(*args, **kws):
return f(*args, **kws)
return _
return dec
from os.path import dirname, join
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import PreText, Select
from bokeh.plotting import figure
DATA_DIR = join(dirname(__file__), 'daily')
DEFAULT_TICKERS = ['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
def nix(val, lst):
return [x for x in lst if x != val]
@lru_cache()
def load_ticker(ticker):
fname = join(DATA_DIR, 'table_%s.csv' % ticker.lower())
data = pd.read_csv(fname, header=None, parse_dates=['date'],
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'])
data = data.set_index('date')
return pd.DataFrame({ticker: data.c, ticker+'_returns': data.c.diff()})
@lru_cache()
def get_data(t1, t2):
df1 = load_ticker(t1)
df2 = load_ticker(t2)
data = pd.concat([df1, df2], axis=1)
data = data.dropna()
data['t1'] = data[t1]
data['t2'] = data[t2]
data['t1_returns'] = data[t1+'_returns']
data['t2_returns'] = data[t2+'_returns']
return data
# set up widgets
stats = PreText(text='', width=500)
ticker1 = Select(value='AAPL', options=nix('GOOG', DEFAULT_TICKERS))
ticker2 = Select(value='GOOG', options=nix('AAPL', DEFAULT_TICKERS))
# set up plots
source = ColumnDataSource(data=dict(date=[], t1=[], t2=[], t1_returns=[], t2_returns=[]))
source_static = ColumnDataSource(data=dict(date=[], t1=[], t2=[], t1_returns=[], t2_returns=[]))
tools = 'pan,wheel_zoom,xbox_select,reset'
corr = figure(plot_width=350, plot_height=350,
tools='pan,wheel_zoom,box_select,reset')
corr.circle('t1_returns', 't2_returns', size=2, source=source,
selection_color="orange", alpha=0.6, nonselection_alpha=0.1, selection_alpha=0.4)
ts1 = figure(plot_width=900, plot_height=200, tools=tools, x_axis_type='datetime', active_drag="xbox_select")
ts1.line('date', 't1', source=source_static)
ts1.circle('date', 't1', size=1, source=source, color=None, selection_color="orange")
ts2 = figure(plot_width=900, plot_height=200, tools=tools, x_axis_type='datetime', active_drag="xbox_select")
ts2.x_range = ts1.x_range
ts2.line('date', 't2', source=source_static)
ts2.circle('date', 't2', size=1, source=source, color=None, selection_color="orange")
# set up callbacks
def ticker1_change(attrname, old, new):
ticker2.options = nix(new, DEFAULT_TICKERS)
update()
def ticker2_change(attrname, old, new):
ticker1.options = nix(new, DEFAULT_TICKERS)
update()
def update(selected=None):
t1, t2 = ticker1.value, ticker2.value
data = get_data(t1, t2)
source.data = source.from_df(data[['t1', 't2', 't1_returns', 't2_returns']])
source_static.data = source.data
update_stats(data, t1, t2)
corr.title.text = '%s returns vs. %s returns' % (t1, t2)
ts1.title.text, ts2.title.text = t1, t2
def update_stats(data, t1, t2):
stats.text = str(data[[t1, t2, t1+'_returns', t2+'_returns']].describe())
ticker1.on_change('value', ticker1_change)
ticker2.on_change('value', ticker2_change)
def selection_change(attrname, old, new):
t1, t2 = ticker1.value, ticker2.value
data = get_data(t1, t2)
selected = source.selected['1d']['indices']
if selected:
data = data.iloc[selected, :]
update_stats(data, t1, t2)
source.on_change('selected', selection_change)
# set up layout
widgets = column(ticker1, ticker2, stats)
main_row = row(corr, widgets)
series = column(ts1, ts2)
layout = column(main_row, series)
# initialize
update()
curdoc().add_root(layout)
curdoc().title = "Stocks"
| bsd-3-clause |
creyesp/RF_Estimation | Clustering/clustering/clusteringTime8.py | 2 | 8261 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# SpectralClustering.py
#
# Copyright 2014 Carlos "casep" Sepulveda <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Performs clustering using different libraries
import sys, os
#Relative path for RFE LIB
sys.path.append(os.path.join(os.path.dirname(__file__), '../..','LIB'))
import rfestimationLib as rfe #Some custom functions
import argparse #argument parsing
from sklearn import metrics
from sklearn import preprocessing
from math import ceil
from math import floor
from scipy.interpolate import UnivariateSpline
from numpy import zeros
from numpy import linspace
from numpy import concatenate
from numpy import append
from numpy import amax
from numpy import amin
from numpy import chararray
from numpy import shape
from numpy import savetxt
from numpy import where
from numpy import unique
from numpy import mean
from numpy import absolute
from math import pi
from numpy import float64
from numpy import empty
from numpy import reshape
#Output file format
# 0-19 Timestamps
# aRadius
# bRadius
# angle
# xCoordinate
# yCoordinate
# area
# clusterId
# peakTime
def main():
parser = argparse.ArgumentParser(prog='clusteringTime8.py',
description='Performs clustering, Gaussian Mixture, KMeans or Spectral',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFolder',
help='Source folder',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, required=True)
parser.add_argument('--clustersNumber',
help='Number of clusters',
type=int, default='3', choices=[2,3,4,5,6,7,8,9,10,11,12,13,14,15], required=False)
parser.add_argument('--framesNumber',
help='Number of frames used in STA analysis',
type=int, default='20', required=False)
parser.add_argument('--blockSize',
help='Size of each block in micrometres',
type=int, default='50', required=False)
parser.add_argument('--clusteringAlgorithm',
help='Clustering algorithm to use: K-Means, Spectral Clustering, GMM',
type=str, default='kmeans', choices=['kmeans','spectral','gmm','densityPeaks'], required=False)
parser.add_argument('--percentageDensityDistance',
help='Percentage used to calculate the distance',
type=float, default='2', required=False)
args = parser.parse_args()
#Source folder of the files with the timestamps
sourceFolder = rfe.fixPath(args.sourceFolder)
if not os.path.exists(sourceFolder):
print ''
print 'Source folder does not exists ' + sourceFolder
print ''
sys.exit()
#Output folder for the graphics
outputFolder = rfe.fixPath(args.outputFolder)
if not os.path.exists(outputFolder):
try:
os.makedirs(outputFolder)
except:
print ''
print 'Unable to create folder ' + outputFolder
print ''
sys.exit()
#Clusters number for the kmeans algorithm
clustersNumber = args.clustersNumber
#Frames used in STA analysis
framesNumber = args.framesNumber
#Size of each block in micrometres
blockSize = args.blockSize
#Clustering Algorithm
clusteringAlgorithm = args.clusteringAlgorithm
#dataCluster stores the data to be used for the clustering process
#the size is equal to the number of frames, aka, the time component
#plus 5 as we are incorporating the 2 dimensions of the ellipse,
#x position, y position and angle
dataCluster = zeros((1,framesNumber+7))
units = []
dato = empty((1,1))
for unitFile in os.listdir(sourceFolder):
if os.path.isdir(sourceFolder+unitFile):
dato = empty((1,1))
unitName = unitFile.rsplit('_', 1)[0]
#print unitName
dataUnit, coordinates = rfe.loadSTACurve(sourceFolder,unitFile,unitName)
xSize = dataUnit.shape[0]
ySize = dataUnit.shape[1]
fitResult = rfe.loadFitMatrix(sourceFolder,unitFile)
dataUnitTemporal = dataUnit[coordinates[0][0],[coordinates[1][0]],:]
#Time data from FITResult
#dataUnitTemporal = rfe.loadVectorAmp(sourceFolder,unitFile).T
#A radius of the RF ellipse
aRadius = fitResult[0][2]
dato[0] = aRadius
dataUnitCompleta = concatenate((dataUnitTemporal,dato),1)
#B radius of the RF ellipse
bRadius = fitResult[0][3]
dato[0] = bRadius
dataUnitCompleta = concatenate((dataUnitCompleta,dato),1)
#angle of the RF ellipse
angle = fitResult[0][1]
dato[0] = angle
dataUnitCompleta = concatenate((dataUnitCompleta,dato),1)
#X coordinate of the RF ellipse
xCoordinate = fitResult[0][4]
#print 'xCoordinate',xCoordinate
dato[0] = xCoordinate
dataUnitCompleta = concatenate((dataUnitCompleta,dato),1)
#Y coordinate of the RF ellipse
yCoordinate = fitResult[0][5]
#print 'yCoordinate',yCoordinate
dato[0] = yCoordinate
dataUnitCompleta = concatenate((dataUnitCompleta,dato),1)
#Area of the RF ellipse
area = aRadius*bRadius*pi
dato[0] = area
dataUnitCompleta = concatenate((dataUnitCompleta,dato),1)
#UnitName
dato=empty(1, dtype='|S16')
dato[0]=unitName
dataUnitCompleta = concatenate((dataUnitCompleta,dato.reshape(1, 1)),1)
dataCluster = append(dataCluster,dataUnitCompleta, axis=0)
units.append(unitName)
# remove the first row of zeroes
dataCluster = dataCluster[1:,:]
#Solo temporal dataCluster[:,0:framesNumber]
# framesNumber
data = dataCluster[:,framesNumber*.45:framesNumber*.9]
data = data.astype(float64, copy=False)
# Calculates the next 5-step for the y-coordinate
maxData = ceil(amax(data)/5)*5
minData = floor(amin(data)/5)*5
if clusteringAlgorithm == 'spectral':
from sklearn.cluster import SpectralClustering
sc = SpectralClustering(n_clusters=clustersNumber, eigen_solver=None, \
random_state=None, n_init=10, gamma=1.0, affinity='nearest_neighbors', \
n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans', degree=3, \
coef0=1, kernel_params=None)
sc.fit(data)
labels = sc.labels_
elif clusteringAlgorithm == 'gmm':
from sklearn import mixture
gmix = mixture.GMM(n_components=clustersNumber, covariance_type='spherical')
gmix.fit(data)
labels = gmix.predict(data)
elif clusteringAlgorithm == 'densityPeaks':
import densityPeaks as dp
percentageDensityDistance = args.percentageDensityDistance
clustersNumber, labels = dp.predict(data, percentageDensityDistance)
else:
from sklearn.cluster import KMeans
km = KMeans(init='k-means++', n_clusters=clustersNumber, n_init=10,n_jobs=-1)
km.fit(data)
labels = km.labels_
dataFile = empty((1,framesNumber+9),dtype='|S16')
datos = empty((1,framesNumber+7),dtype='|S16')
dato = empty((1,1),dtype='|S16')
for clusterId in range(clustersNumber):
for unitId in range(dataCluster.shape[0]):
if labels[unitId] == clusterId:
dato[0] = clusterId
dataFileTmp = concatenate(([dataCluster[unitId,:]],dato),1)
x = linspace(1, framesNumber, framesNumber)
s = UnivariateSpline(x, dataCluster[unitId,0:framesNumber], s=0)
xs = linspace(1, framesNumber, framesNumber*1000)
ys = s(xs)
media = mean(ys)
maximo = amax(ys)
minimo = amin(ys)
maximaDistancia = absolute(maximo-media)
minimaDistancia = absolute(minimo-media)
peakTempCurve = minimo
if maximaDistancia > minimaDistancia:
peakTempCurve = maximo
dato[0] = unique(where(peakTempCurve==ys)[0])[0]
dataFileTmp = concatenate((dataFileTmp,dato),1)
dataFile = append(dataFile, dataFileTmp, axis=0)
# remove the first row of zeroes
dataFile = dataFile[1:,:]
savetxt(outputFolder+'outputFile.csv',dataFile, fmt='%s', delimiter=',', newline='\n')
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
finfou/tushare | test/storing_test.py | 40 | 1729 | # -*- coding:utf-8 -*-
import os
from sqlalchemy import create_engine
from pandas.io.pytables import HDFStore
import tushare as ts
def csv():
df = ts.get_hist_data('000875')
df.to_csv('c:/day/000875.csv',columns=['open','high','low','close'])
def xls():
df = ts.get_hist_data('000875')
#直接保存
df.to_excel('c:/day/000875.xlsx', startrow=2,startcol=5)
def hdf():
df = ts.get_hist_data('000875')
# df.to_hdf('c:/day/store.h5','table')
store = HDFStore('c:/day/store.h5')
store['000875'] = df
store.close()
def json():
df = ts.get_hist_data('000875')
df.to_json('c:/day/000875.json',orient='records')
#或者直接使用
print(df.to_json(orient='records'))
def appends():
filename = 'c:/day/bigfile.csv'
for code in ['000875', '600848', '000981']:
df = ts.get_hist_data(code)
if os.path.exists(filename):
df.to_csv(filename, mode='a', header=None)
else:
df.to_csv(filename)
def db():
df = ts.get_tick_data('600848',date='2014-12-22')
engine = create_engine('mysql://root:[email protected]/mystock?charset=utf8')
# db = MySQLdb.connect(host='127.0.0.1',user='root',passwd='jimmy1',db="mystock",charset="utf8")
# df.to_sql('TICK_DATA',con=db,flavor='mysql')
# db.close()
df.to_sql('tick_data',engine,if_exists='append')
def nosql():
import pymongo
import json
conn = pymongo.Connection('127.0.0.1', port=27017)
df = ts.get_tick_data('600848',date='2014-12-22')
print(df.to_json(orient='records'))
conn.db.tickdata.insert(json.loads(df.to_json(orient='records')))
# print conn.db.tickdata.find()
if __name__ == '__main__':
nosql() | bsd-3-clause |
jblackburne/scikit-learn | examples/datasets/plot_iris_dataset.py | 35 | 1929 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
phiros/nepi | benchmark/scalability/scripts/plot.py | 1 | 6063 | import numpy as numpy
import matplotlib.pyplot as pyplot
from matplotlib.backends.backend_pdf import PdfPages
from optparse import OptionParser
def compute_estimator(samples):
if len(samples) == 0:
return 0,0,0
x = numpy.array(samples)
n = len(samples)
std = x.std()
m = x.mean()
return n, m, std
def add_sample_data(data, thread_count, rm_count, sample):
n,m,std = compute_estimator(sample)
if not thread_count in data:
data[thread_count] = dict()
if not rm_count in data[thread_count]:
data[thread_count][rm_count] = list()
data[thread_count][rm_count].append(m)
return data
def add_scalar_data(data, thread_count, rm_count, scalar):
if not thread_count in data:
data[thread_count] = dict()
if not rm_count in data[thread_count]:
data[thread_count][rm_count] = list()
data[thread_count][rm_count].append(scalar)
return data
def read_data(data_file):
data_cpu_d = dict()
data_cpu_e = dict()
data_cpu_r = dict()
data_mem_d = dict()
data_mem_e = dict()
data_mem_r = dict()
data_time = dict()
f = open(data_file, "r")
for l in f:
if l.startswith("timestamp|"):
continue
rows = l.split("|")
platform = rows[1]
node_count = int(rows[2])
app_count = int(rows[3])
thread_count = int(rows[4])
delay = float(rows[6])
opdelay = float(rows[7])
status = rows[19]
if status.strip() != "OK":
continue
if platform != "dummy":
opdelay = "Nan"
else:
opdelay = "%.2f" % opdelay
# Compute number of rms (1 dev per node)
rm_count = node_count * 2 + app_count * node_count
cpu_d = rows[9]
if cpu_d:
cpu_d = map(float, cpu_d.split(","))
add_sample_data(data_cpu_d, thread_count, rm_count, cpu_d)
cpu_e = rows[10]
if cpu_e:
cpu_e = map(float, cpu_e.split(","))
add_sample_data(data_cpu_e, thread_count, rm_count, cpu_e)
cpu_r = rows[11]
if cpu_r:
cpu_r = map(float, cpu_r.split(","))
add_sample_data(data_cpu_r, thread_count, rm_count, cpu_r)
mem_d = rows[13]
if mem_d:
mem_d = map(float, mem_d.split(","))
add_sample_data(data_mem_d, thread_count, rm_count, mem_d)
mem_e = rows[14]
if mem_e:
mem_e = map(float, mem_e.split(","))
add_sample_data(data_mem_e, thread_count, rm_count, mem_e)
mem_r = rows[15]
if mem_r:
mem_r = map(float, mem_r.split(","))
add_sample_data(data_mem_r, thread_count, rm_count, mem_r)
ttd = int(rows[16])
ttr = int(rows[17])
ttrel = int(rows[18])
# Compute total duration of experiment in seconds
duration = (ttd + ttr + ttrel) / (1000.0)
add_scalar_data(data_time, thread_count, rm_count, duration)
f.close()
return (data_cpu_d, data_cpu_e, data_cpu_r, data_mem_d, data_mem_e,
data_mem_r, data_time, platform, delay, opdelay)
def save_figure(figure, platform, delay, opdelay, metric, stage):
outputfile = "%s_d_%.2f_o_%s_%s_%s.pdf" % (
platform, delay, opdelay, metric, stage)
pp = PdfPages(outputfile)
pp.savefig(figure)
pp.close()
def plot_data(data, platform, delay, opdelay, metric, stage, unit):
plts = dict()
for thread_count in sorted(data.keys()):
plts[thread_count] = [[], [], [], []]
rm_info = data[thread_count]
for rm_count in sorted(rm_info.keys()):
sample = rm_info[rm_count]
x = numpy.array(sample)
n = len(sample)
std = x.std()
m = x.mean()
ci2 = numpy.percentile(sample, [2.5, 97.5])
plts[thread_count][0].append(rm_count)
plts[thread_count][1].append(m)
plts[thread_count][2].append(m - ci2[0])
plts[thread_count][3].append(ci2[1] - m)
colors = ['red', 'orange', 'green', 'blue']
fig = pyplot.figure(figsize=(8, 6), dpi=100)
ax = fig.add_subplot(111)
i = 0
for thread_count in sorted(plts.keys()):
info = plts[thread_count]
x = info[0]
y = info[1]
ye1 = info[2]
ye2 = info[3]
# plot
ax.errorbar(x, y, yerr=[ye1, ye2], fmt='-o',
label="%d threads" % thread_count,
color = colors[i])
i+=1
ax.grid(True)
ax.set_xlabel("# Resources")
#plt.gca().set_xscale('log')
ylabel = "%s %s" % (metric, unit)
ax.set_ylabel(ylabel)
#plt.gca().set_yscale('log')
ax.legend(loc='lower right', framealpha=0.5)
title = "%s - %s usage during %s\n" \
"Reschedule / Operation delay = %.2f / %s " % (
platform, metric, stage, delay, opdelay)
ax.set_title(title)
#pyplot.show()
save_figure(fig, platform, delay, opdelay, metric, stage)
usage = ("usage: %prog -f <data-file>")
parser = OptionParser(usage = usage)
parser.add_option("-f", "--data-file", dest="data_file",
help="File containing data to plot", type="string")
(options, args) = parser.parse_args()
data_file = options.data_file
(data_cpu_d, data_cpu_e, data_cpu_r, data_mem_d, data_mem_e,
data_mem_r, data_time, platform, delay, opdelay) = read_data(data_file)
plot_data(data_cpu_d, platform, delay, opdelay, "CPU", "deploy", "(%)")
plot_data(data_cpu_e, platform, delay, opdelay, "CPU", "execute", "(%)")
plot_data(data_cpu_r, platform, delay, opdelay, "CPU", "release", "(%)")
plot_data(data_mem_d, platform, delay, opdelay, "Memory", "deploy", "(%)")
plot_data(data_mem_e, platform, delay, opdelay, "Memory", "execute", "(%)")
plot_data(data_mem_r, platform, delay, opdelay, "Memory", "release", "(%)")
plot_data(data_time, platform, delay, opdelay, "Time", "total", "(sec)")
| gpl-3.0 |
hdmetor/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 106 | 2313 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware than the time complexity
is polynomial.
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data[:n_samples])
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause |
yask123/scikit-learn | sklearn/ensemble/tests/test_forest.py | 48 | 39224 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(X, y, name, criterion):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = est.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, X, y, name, criterion
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, X, y, name, criterion
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
| bsd-3-clause |
RPGOne/Skynet | numpy-master/numpy/core/code_generators/ufunc_docstrings.py | 7 | 92602 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise. Returns a scalar if
both ``x1`` and ``x2`` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``b = a % b + b * (a // b)``
up to roundoff.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
remainder : Remainder complementary to floor_divide.
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the
corresponding element of the input is finite; otherwise the values
are False (element is either positive infinity, negative infinity
or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or boolean ndarray
For scalar input, the result is a new boolean with value True if
the input is positive or negative infinity; otherwise the value is
False.
For array input, the result is a boolean array with the same shape
as the input and the values are True where the corresponding
element of the input is positive or negative infinity; elsewhere
the values are False. If a second argument was supplied the result
is stored there. If the type of that array is a numeric type the
result is represented as zeros and ones, if the type is boolean
then as False and True, respectively. The return value `y` is then
a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray or bool
For scalar input, the result is a new boolean with value True if
the input is NaN; otherwise the value is False.
For array input, the result is a boolean array of the same
dimensions as the input and the values are True if the
corresponding element of the input is NaN; otherwise the values are
False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape. Note that an
integer type raised to a negative integer power will raise a ValueError.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
See Also
--------
float_power : power function that promotes integers to float
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'float_power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in `x2`.
`x1` and `x2` must be broadcastable to the same shape. This differs from
the power function in that integers, float16, and float32 are promoted to
floats with a minimum precision of float64 so that the result is always
inexact. The intent is that the function will return a usable result for
negative powers and seldom overflow for positive powers.
.. versionadded:: 1.12.0
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
See Also
--------
power : power function that preserves type
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.float_power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.float_power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.float_power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
as the divisor `x2`. It should not be confused with the Matlab(TM) ``rem``
function.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The element-wise remainder of the quotient ``floor_divide(x1, x2)``.
Returns a scalar if both `x1` and `x2` are scalars.
See Also
--------
floor_divide : Equivalent of Python ``//`` operator.
fmod : Equivalent of the Matlab(TM) ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan
is returned for nan inputs.
For complex inputs, the `sign` function returns
``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``.
complex(nan, 0) is returned for complex nan inputs.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Notes
-----
There is more than one definition of sign in common use for complex
numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}`
which is different from a common alternative, :math:`x/|x|`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
>>> np.sign(5-2j)
(1+0j)
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1 : array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making
``//`` and ``/`` equivalent operators. The default floor division
operation of ``/`` can be replaced by true division with ``from
__future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa is lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
Returns
-------
(mantissa, exponent) : tuple of ndarrays, (float, int)
`mantissa` is a float array with values between -1 and 1.
`exponent` is an int array which represents the exponent of 2.
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
out : ndarray, optional
Output array for the result.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float32)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
| bsd-3-clause |
artmusic0/theano-learning.part03 | Myfile_run-py_big-taining/cnn_training_computation.old.py | 3 | 7532 | import os
import sys, getopt
import time
import numpy
import theano
import theano.tensor as T
from sklearn import preprocessing
from cnn import CNN
import pickle as cPickle
from logistic_sgd import LogisticRegression
def fit(data, labels, filename = 'weights_v5.pkl'):
fit_predict(data, labels, filename = filename, action = 'fit')
def predict(test_dataset, filename = 'weights_v5.pkl' ):
return fit_predict(data=[], labels=[], filename= filename, test_datasets=[test_dataset], action = 'predict')[0]
def fit_predict(data, labels, action, filename, test_datasets = [], learning_rate=0.1, n_epochs=100, nkerns=[20, 50, 90], batch_size=50, seed=8000):
rng = numpy.random.RandomState(seed)
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of [int] labels
index = T.lscalar() # index to a [mini]batch
if action=='fit':
NUM_TRAIN = len(data)
#print NUM_TRAIN
#print batch_size
if NUM_TRAIN % batch_size != 0: #if the last batch is not full, just don't use the remainder
whole = (NUM_TRAIN / batch_size) * batch_size
data = data[:whole]
NUM_TRAIN = len(data)
#print NUM_TRAIN
#print batch_size
# random permutation
indices = rng.permutation(NUM_TRAIN)
data, labels = data[indices, :], labels[indices]
# batch_size == 500, splits (480, 20). We will use 96% of the data for training, and the rest to validate the NN while training
is_train = numpy.array( ([0]* (batch_size - 20) + [1] * 20) * (NUM_TRAIN / batch_size))
# now we split the dataset to test and valid datasets
train_set_x, train_set_y = numpy.array(data[is_train==0]), labels[is_train==0]
valid_set_x, valid_set_y = numpy.array(data[is_train==1]), labels[is_train==1]
# compute number of minibatches
n_train_batches = len(train_set_y) / batch_size
n_valid_batches = len(valid_set_y) / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
epoch = T.scalar()
#index = T.lscalar() # index to a [mini]batch
#x = T.matrix('x') # the data is presented as rasterized images
#y = T.ivector('y') # the labels are presented as 1D vector of [int] labels
# construct the CNN class
classifier = CNN(
rng=rng,
input=x,
nkerns = nkerns,
batch_size = batch_size
)
train_set_x = theano.shared(numpy.asarray(train_set_x, dtype=theano.config.floatX))
train_set_y = T.cast(theano.shared(numpy.asarray(train_set_y, dtype=theano.config.floatX)), 'int32')
valid_set_x = theano.shared(numpy.asarray(valid_set_x, dtype=theano.config.floatX))
valid_set_y = T.cast(theano.shared(numpy.asarray(valid_set_y, dtype=theano.config.floatX)), 'int32')
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
cost = classifier.layer4.negative_log_likelihood(y)
# create a list of gradients for all model parameters
grads = T.grad(cost, classifier.params)
# specify how to update the parameters of the model as a list of (variable, update expression) pairs
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(classifier.params, grads)
]
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
###############
# TRAIN MODEL #
###############
print '... training'
best_iter = 0
test_score = 0.
start_time = time.clock()
epoch = 0
# here is an example how to print the current value of a Theano variable: print test_set_x.shape.eval()
# start training
while (epoch < n_epochs):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (epoch) % 1 == 0 and minibatch_index==0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
###############
# PREDICTIONS #
###############
# save and load
f = file(filename, 'wb')
cPickle.dump(classifier.__getstate__(), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
end_time = time.clock()
print >> sys.stderr, ('The code ran for %.2fm' % ((end_time - start_time) / 60.))
if action == 'predict':
# construct the CNN class
classifier_2 = CNN(
rng=rng,
input=x,
nkerns = nkerns,
batch_size = batch_size
)
print "...."
f = file(filename, 'rb')
classifier_2.__setstate__(cPickle.load(f))
f.close()
RET = []
for it in range(len(test_datasets)):
test_data = test_datasets[it]
N = len(test_data)
test_data = theano.shared(numpy.asarray(test_data, dtype=theano.config.floatX))
# just zeroes
test_labels = T.cast(theano.shared(numpy.asarray(numpy.zeros(batch_size), dtype=theano.config.floatX)), 'int32')
ppm = theano.function([index], classifier_2.layer3.pred_probs(),
givens={
x: test_data[index * batch_size: (index + 1) * batch_size],
y: test_labels
}, on_unused_input='warn')
# p : predictions, we need to take argmax, p is 3-dim: (# loop iterations x batch_size x 2)
p = [ppm(ii) for ii in xrange( N / batch_size)]
#p_one = sum(p, [])
#print p
p = numpy.array(p).reshape((N, 20))
#print p
p = numpy.argmax(p, axis=1)
p = p.astype(int)
RET.append(p)
return RET
| gpl-3.0 |
liyu1990/sklearn | sklearn/feature_selection/tests/test_feature_select.py | 103 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
mlperf/training_results_v0.7 | Inspur/benchmarks/dlrm/implementations/implementation_closed/tests/metrics_test.py | 1 | 1545 | """Tests for metrics"""
from absl.testing import absltest
from sklearn.metrics import roc_auc_score
import numpy as np
import torch
from dlrm.utils import metrics
# pylint:disable=missing-docstring, no-self-use
class AucTest(absltest.TestCase):
def test_against_sklearn_exact(self):
for num_samples in [100, 1000, 10000, 100000, 1048576]:
y = np.random.randint(0, 2, num_samples)
scores = np.random.power(10, num_samples)
ref_auc = roc_auc_score(y, scores)
test_auc = metrics.ref_roc_auc_score(y, scores)
assert ref_auc == test_auc
def test_against_sklearn_almost_exact(self):
for num_samples in [100, 1000, 10000, 100000, 1048576]:
y = np.random.randint(0, 2, num_samples)
scores = np.random.power(10, num_samples)
ref_auc = roc_auc_score(y, scores)
test_auc = metrics.ref_roc_auc_score(y, scores, exact=False)
np.testing.assert_almost_equal(ref_auc, test_auc)
def test_pytorch_against_sklearn(self):
for num_samples in [100, 1000, 10000, 100000, 1048576]:
y = np.random.randint(0, 2, num_samples).astype(np.float32)
scores = np.random.power(10, num_samples).astype(np.float32)
ref_auc = roc_auc_score(y, scores)
test_auc = metrics.roc_auc_score(torch.from_numpy(y).cuda(), torch.from_numpy(scores).cuda())
np.testing.assert_almost_equal(ref_auc, test_auc.cpu().numpy())
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
B3AU/waveTree | sklearn/utils/setup.py | 4 | 2703 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('arraybuilder', sources=['arraybuilder.c'])
config.add_extension('sparsefuncs', sources=['sparsefuncs.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("random",
sources=["random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
js850/pele | pele/gui/run.py | 1 | 27479 | import matplotlib
matplotlib.use("QT4Agg")
import traceback
import sys
import numpy as np
from PyQt4 import QtCore, QtGui, Qt
from pele.landscape import TSGraph
from pele.storage import Database
from pele.utils.events import Signal
from pele.config import config
from pele.gui.MainWindow import Ui_MainWindow
from pele.gui.bhrunner import BHManager
from pele.gui.dlg_params import DlgParams
from pele.gui.ui.dgraph_dlg import DGraphDialog
#from pele.gui.connect_explorer_dlg import ConnectExplorerDialog
from pele.gui.connect_run_dlg import ConnectViewer
from pele.gui.takestep_explorer import TakestepExplorer
from pele.gui.normalmode_browser import NormalmodeBrowser
from pele.gui._list_views import ListViewManager
from pele.gui._cv_viewer import HeatCapacityViewer
from pele.gui._rate_gui import RateViewer
from pele.gui.graph_viewer import GraphViewDialog
def excepthook(ex_type, ex_value, traceback_obj):
""" redirected exception handler """
errorbox = QtGui.QMessageBox()
msg = "An unhandled exception occurred:\n"+str(ex_type) + "\n\n"\
+ str(ex_value) + "\n\nTraceback:\n----------"
for line in traceback.format_tb(traceback_obj):
msg += "\n" + line
errorbox.setText(msg)
errorbox.setStandardButtons(QtGui.QMessageBox.Ignore | QtGui.QMessageBox.Cancel)
errorbox.setDefaultButton(QtGui.QMessageBox.Cancel)
if errorbox.exec_() == QtGui.QMessageBox.Cancel:
raise ex_value
class MySelection(object):
"""keep track of which minima have been selected and whether those coordinates have been modified
"""
def __init__(self):
self.minimum1 = None
self.minimum2 = None
self.coords1 = None
self.coords2 = None
class MainGUI(QtGui.QMainWindow):
"""
this is the main class for the pele gui
Parameters
----------
app :
the application object returned by QtGui.QApplication()
systemtype : system class object
the system class
"""
def __init__(self, app, systemtype, parent=None):
QtGui.QWidget.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.systemtype = systemtype
self.transition=None
self.app = app
self.double_ended_connect_runs = []
self.pick_count = 0
self.minima_selection = MySelection()
self.on_minimum_1_selected = Signal()
self.on_minimum_2_selected = Signal()
# set up the list manager
self.list_manager = ListViewManager(self)
# define the system
self.NewSystem()
# finish setting up the list manager (this must be done after NewSystem() is called)
self.list_manager.finish_setup()
#try to load the pymol viewer.
try:
self.usepymol = self.system.params.gui.use_pymol
except (KeyError, AttributeError):
self.usepymol = config.getboolean("gui", "use_pymol")
if self.usepymol:
try:
from pymol_viewer import PymolViewer
self.pymolviewer = PymolViewer(self.system.load_coords_pymol)
except (ImportError or NotImplementedError):
self.usepymol = False
if self.usepymol == False:
#note: glutInit() must be called exactly once. pymol calls it
#during pymol.finish_launching(), so if we call it again it will
#give an error. On the other hand, if we're not using pymol we
#must call it.
from OpenGL.GLUT import glutInit
glutInit()
self.bhmanager = None
def NewSystem(self):
"""
this is called to initialize the system with a database
"""
self.system = self.systemtype()
self.connect_db()
def on_action_edit_params_triggered(self, checked=None):
if checked is None: return
self.paramsdlg = DlgParams(self.system.params)
self.paramsdlg.show()
def processEvents(self):
self.app.processEvents()
def on_action_db_connect_triggered(self, checked=None):
"""
launch a file browser to connect to an existing database
"""
if checked is None: return
filename = QtGui.QFileDialog.getSaveFileName(self, 'Select database', '.')
if len(filename) > 0:
self.connect_db(filename)
def connect_db(self, database=":memory:"):
"""
connect to an existing database at location filename
"""
self.list_manager.clear()
# note: database can be either Database, or string, or QString
if isinstance(database, Database):
self.system.database = database
else:
self.system.database = self.system.create_database(db=database)
#add minima to listWidged. do sorting after all minima are added
for minimum in self.system.database.minima():
self.NewMinimum(minimum, sort_items=False)
self.list_manager._sort_minima()
self.NewTS(self.system.database.transition_states(order_energy=True))
self.list_manager.resize_columns_minima()
self.list_manager.resize_columns_ts()
self.system.database.on_minimum_added.connect(self.NewMinimum)
self.system.database.on_minimum_removed(self.RemoveMinimum)
self.system.database.on_ts_added.connect(self.NewTS)
self.system.database.on_ts_removed.connect(self.RemoveTS)
def SelectMinimum(self, minimum, set_selected=True):
"""when you click on a minimum in the basinhopping tab
"""
# print "selecting minimum", minimum._id, minimum.energy
if set_selected:
self.list_manager._select_main(minimum)
return
self.ui.ogl_main.setSystem(self.system)
self.ui.ogl_main.setCoords(minimum.coords)
self.ui.ogl_main.setMinimum(minimum)
self.ui.oglTS.setSystem(self.system)
if self.usepymol:
self.pymolviewer.update_coords([minimum.coords], index=1, delete_all=True)
def _SelectMinimum1(self, minimum, set_selected=True):
"""set the first minimum displayed in the connect tab"""
if set_selected:
self.list_manager._select1(minimum)
return
print "selecting minimum 1:", minimum._id, minimum.energy
self.ui.oglPath.setSystem(self.system)
self.ui.oglPath.setCoords(minimum.coords, index=1)
# self.ui.oglPath.setMinimum(minimum, index=1)
self.minima_selection.minimum1 = minimum
self.minima_selection.coords1 = minimum.coords
self.neb = None
if self.usepymol:
self.pymolviewer.update_coords([minimum.coords], index=1)
self.on_minimum_1_selected(minimum)
def _SelectMinimum2(self, minimum, set_selected=True):
"""set the second minimum displayed in the connect tab"""
if set_selected:
self.list_manager._select2(minimum)
return
print "selecting minimum 2:", minimum._id, minimum.energy
self.ui.oglPath.setSystem(self.system)
self.ui.oglPath.setCoords(minimum.coords, index=2)
# self.ui.oglPath.setMinimum(minimum, index=2)
self.minima_selection.minimum2 = minimum
self.minima_selection.coords2 = minimum.coords
self.neb = None
if self.usepymol:
self.pymolviewer.update_coords([minimum.coords], index=2)
self.on_minimum_2_selected(minimum)
def get_selected_minima(self):
"""return the two minima that have been chosen in the gui"""
m1, m2 = self.minima_selection.minimum1, self.minima_selection.minimum2
if m1 is None or m2 is None:
raise Exception("you must select two minima first")
return m1, m2
def get_selected_coords(self):
"""return the two sets of coordinates that have been chosen in the gui
note, that these may not be the same as what is stored in the minimum. E.g. they
may be the aligned structures
"""
coords1, coords2 = self.minima_selection.coords1, self.minima_selection.coords2
if coords1 is None or coords2 is None:
raise Exception("you must select two minima first")
return coords1, coords2
def show_TS(self, ts):
"""
show the transition state and the associated minima in the 3d viewer
"""
self.ui.oglTS.setSystem(self.system)
m1 = ts.minimum1
m2 = ts.minimum2
# put them in best alignment
mindist = self.system.get_mindist()
dist, m1coords, tscoords = mindist(m1.coords, ts.coords)
dist, m2coords, tscoords = mindist(m2.coords, ts.coords)
self.tscoordspath = np.array([m1coords, tscoords, m2coords])
labels = ["minimum: energy " + str(m1.energy) + " id " + str(m1._id)]
labels += ["ts: energy " + str(ts.energy)]
labels += ["minimum: energy " + str(m2.energy) + " id " + str(m2._id)]
self.ui.oglTS.setCoordsPath(self.tscoordspath, frame=1, labels=labels)
if self.usepymol:
self.pymolviewer.update_coords(self.tscoordspath, index=1, delete_all=True)
def on_btnAlign_clicked(self, clicked=None):
"""use mindist to align the minima.
called when the align button is pressed
"""
if clicked is None: return
coords1, coords2 = self.get_selected_coords()
align = self.system.get_mindist()
pot = self.system.get_potential()
print "energy before alignment", pot.getEnergy(coords1), pot.getEnergy(coords2)
dist, coords1, coords2 = align(coords1, coords2)
print "energy after alignment", pot.getEnergy(coords1), pot.getEnergy(coords2)
print "best alignment distance", dist
self.ui.oglPath.setCoords(coords1, index=1)
self.ui.oglPath.setCoords(coords2, index=2)
self.minima_selection.coords1 = coords1
self.minima_selection.coords2 = coords2
if self.usepymol:
self.pymolviewer.update_coords([coords1], index=1)
self.pymolviewer.update_coords([coords2], index=2)
def on_btnNEB_clicked(self, clicked=None):
"""do an NEB run (not a connect run). Don't find best alignment first"""
if clicked is None: return
coords1, coords2 = self.get_selected_coords()
from neb_explorer import NEBExplorer
if not hasattr(self, "nebexplorer"):
self.nebexplorer = NEBExplorer(system=self.system, app=self.app, parent=self)
self.nebexplorer.show()
self.nebexplorer.new_neb(coords1, coords2, run=False)
# def showFrame(self, i):
# if hasattr(self, "nebcoords"):
# self.ui.oglPath.setCoords(self.nebcoords[i,:])
def on_minimum_picked(self, min1):
"""called when a minimimum is clicked on in the graph or disconnectivity graph"""
if (self.pick_count % 2) == 0:
self._SelectMinimum1(min1)
else:
self._SelectMinimum2(min1)
self.pick_count += 1
def on_btnDisconnectivity_graph_clicked(self, clicked=None):
"""show the disconnectivity graph
it is interactive, so that when you click on an end point
that minima is selected
"""
if clicked is None: return
if not hasattr(self, "dgraph_dlg"):
self.dgraph_dlg = DGraphDialog(self.system.database, parent=self)
self.dgraph_dlg.dgraph_widget.minimum_selected.connect(self.on_minimum_picked)
self.dgraph_dlg.dgraph_widget.minimum_selected.connect(self.SelectMinimum)
self.dgraph_dlg.rebuild_disconnectivity_graph()
self.dgraph_dlg.show()
def on_btnShowGraph_clicked(self, clicked=None):
""" show the graph of minima and transition states
make it interactive, so that when you click on a point
that minima is selected
"""
if clicked is None: return
self.pick_count = 0
if not hasattr(self, "graphview"):
self.graphview = GraphViewDialog(self.system.database, parent=self, app=self.app)
self.graphview.widget.on_minima_picked.connect(self.on_minimum_picked)
self.graphview.widget.on_minima_picked.connect(self.SelectMinimum)
self.graphview.show()
self.graphview.widget.make_graph()
try:
m1, m2 = self.get_selected_minima()
self.graphview.widget._show_minimum_energy_path(m1, m2)
except:
self.graphview.widget.show_graph()
def on_pushNormalmodesMin_clicked(self, clicked=None):
if clicked is None: return
if not hasattr(self, "normalmode_explorer"):
self.normalmode_explorer = NormalmodeBrowser(self, self.system, self.app)
min1 = self.ui.ogl_main.minima[1]
if min1 is None:
raise RuntimeError("you must select a minimum first")
self.normalmode_explorer.set_coords(min1.coords)
self.normalmode_explorer.show()
def on_pushNormalmodesTS_clicked(self, clicked=None):
if clicked is None: return
if not hasattr(self, "normalmode_explorer"):
self.normalmode_explorer = NormalmodeBrowser(self, self.system, self.app)
ts = self.list_manager.get_selected_ts()
if ts is None:
raise RuntimeError("you must select a transition state first")
self.normalmode_explorer.set_coords(ts.coords)
self.normalmode_explorer.show()
def NewMinimum(self, minimum, sort_items=True):
""" add a new minimum to the system """
self.list_manager.NewMinimum(minimum, sort_items=sort_items)
def RemoveMinimum(self, minimum):
"""remove a minimum from self.minima_list_model"""
self.list_manager.RemoveMinimum(minimum)
def NewTS(self, ts, sort=True):
"""add new transition state, or list of transition states"""
self.list_manager.NewTS(ts, sort=sort)
def RemoveTS(self, ts):
"""remove transition state"""
raise Exception("removing transition states not implemented yet")
obj = self.ui.list_TS
tsid = id(ts)
itms = self.ui.list_TS.findItems('*', QtCore.Qt.MatchWildcard)
for i in itms:
if i.tsid == tsid:
obj.takeItem(obj.row(i))
def set_basinhopping_number_alive(self, nalive):
"""set the label that shows how many basinhopping processes are alive"""
self.ui.label_bh_nproc.setText("%d B.H. processes" % nalive)
def on_btn_start_basinhopping_clicked(self, clicked=None):
"""this is run when the start basinhopping button is clicked"""
if clicked is None: return
# set up the basinhopping manager if not already done
if self.bhmanager is None:
self.bhmanager = BHManager(self.system, self.system.database,
on_number_alive_changed=self.set_basinhopping_number_alive)
# get the number of steps from the input box
nstepsstr = self.ui.lineEdit_bh_nsteps.text()
nsteps = None
try:
nsteps = int(nstepsstr)
except ValueError:
# ignore the text if it is the default text
if "steps" not in nstepsstr:
sys.stderr.write("can't convert %s to integer\n" % nstepsstr)
# start a basinhopping run
self.bhmanager.start_worker(nsteps=nsteps)
def on_btn_stop_basinhopping_clicked(self, clicked=None):
if clicked is None: return
self.bhmanager.kill_all_workers()
def on_action_delete_minimum_triggered(self, checked=None):
if checked is None: return
min1 = self.ui.ogl_main.minima[1]
ret = QtGui.QMessageBox.question(self, "Deleting minima",
"Do you want to delete minima %d with energy %g"%(min1._id, min1.energy),
QtGui.QMessageBox.Ok, QtGui.QMessageBox.Cancel)
if(ret == QtGui.QMessageBox.Ok):
print "deleting minima"
print "deleting minimum", min1._id, min1.energy
self.RemoveMinimum(min1)
self.system.database.removeMinimum(min1)
def on_btnConnect_clicked(self, clicked=None):
if clicked is None: return
return self._doubleEndedConnect(reconnect=False)
def on_btnReconnect_clicked(self, clicked=None):
if clicked is None: return
return self._doubleEndedConnect(reconnect=True)
def _doubleEndedConnect(self, reconnect=False, min1min2=None):
"""
launch a double ended connect run to connect the two selected minima.
If the minima are not connected, or reconnect is True, launch a connect browser
in a separate window. Else just show the path in the OGL viewer
"""
# determine which minima to connect
if min1min2 is None:
min1, min2 = self.get_selected_minima()
else:
min1, min2 = min1min2
database = self.system.database
if not reconnect:
# check if the minima are already connected
double_ended_connect = self.system.get_double_ended_connect(min1, min2, database,
fresh_connect=False, verbosity=0)
if double_ended_connect.graph.areConnected(min1, min2):
print "minima are already connected. loading smoothed path in viewer"
mints, S, energies = double_ended_connect.returnPath()
clist = [m.coords for m in mints]
smoothpath = self.system.smooth_path(clist)
coords = np.array(smoothpath)
self.nebcoords = coords
self.nebenergies = np.array(energies)
print "setting path in oglPath"
self.ui.oglPath.setCoordsPath(coords)#, labels)
# self.ui.oglPath.setCoords(coords[0,:], 1)
# self.ui.oglPath.setCoords(None, 2)
# self.ui.sliderFrame.setRange(0, coords.shape[0]-1)
if self.usepymol:
self.pymolviewer.update_coords(self.nebcoords, index=1, delete_all=True)
return
# make the connect viewer
decviewer = ConnectViewer(self.system, self.system.database, min1, min2, parent=self, app=self.app)
print "starting double ended"
decviewer.show()
decviewer.start()
# store pointers
self.double_ended_connect_runs.append(decviewer)
def on_btn_connect_in_optim_clicked(self, clicked=None):
"""spawn an OPTIM job and retrieve the minima and transition states
it finds"""
if clicked is None: return
min1, min2 = self.get_selected_minima()
# existing_minima = set(self.system.database.minima())
spawner = self.system.get_optim_spawner(min1.coords, min2.coords)
spawner.run()
db = self.system.database
newminima, newts = spawner.load_results(self.system.database)
# for m in newminima:
# if m not in existing_minima:
# self.NewMinimum(m)
#now use DoubleEndedConnect to test if they are connected
graph = TSGraph(db)
if graph.areConnected(min1, min2):
#use double ended connect to draw the interpolated path
#this is ugly
self._doubleEndedConnect(reconnect=False, min1min2=(min1, min2))
def _merge_minima(self, min1, min2):
mindist = self.system.get_mindist()
dist, x1, x2 = mindist(min1.coords, min2.coords)
query = "Do you want to merge minimum %d with energy %g" %(min1._id, min1.energy)
query += " with minimum %d with energy %g" %(min2._id, min2.energy)
query += " separated by distance %g" % (dist)
ret = QtGui.QMessageBox.question(self, "Merging minima",
query,
QtGui.QMessageBox.Ok, QtGui.QMessageBox.Cancel)
if(ret == QtGui.QMessageBox.Ok):
m1, m2 = min1, min2
if m1._id > m2._id:
m1, m2 = m2, m1
print "merging minima", m1._id, m2._id#, ": minimum", m2._id, "will be deleted"
self.system.database.mergeMinima(m1, m2)
self.RemoveMinimum(m2)
def on_action_merge_minima_triggered(self, checked=None):
if checked is None: return
min1, min2 = self.get_selected_minima()
self._merge_minima(min1, min2)
def on_action_compute_thermodynamic_info_triggered(self, checked=None):
if checked is None: return
def on_done(): print "done computing thermodynamic info"
self._on_done = on_done # because on_finish stores a weak reference
self.compute_thermodynamic_information(on_finish=self._on_done )
# def launch_connect_explorer(self):
# coords1, coords2 = self.get_selected_coords()
#
# if not hasattr(self, "local_connect_explorer"):
# self.local_connect_explorer = ConnectExplorerDialog(self.system)
# self.local_connect_explorer.nebwgt.process_events.connect(self.processEvents)
# self.local_connect_explorer.show()
# self.local_connect_explorer.createNEB(coords1, coords2)
# self.local_connect_explorer.runNEB()
def on_btn_close_all_clicked(self, checked=None):
if checked is None: return
print "closing all windows"
for dv in self.double_ended_connect_runs:
dv.hide()
# del dv
self.double_ended_connect_runs = []
try:
self.local_connect_explorer.hide()
del self.local_connect_explorer
except AttributeError: pass
try:
self.dgraph_dlg.hide()
del self.dgraph_dlg
except AttributeError: pass
try:
self.nebexplorer.hide()
del self.nebexplorer
except AttributeError: pass
try:
self.rate_viewer.hide()
del self.rate_viewer
except AttributeError: pass
def on_btn_connect_all_clicked(self, checked=None):
if checked is None: return
from pele.gui.connect_all import ConnectAllDialog
# if hasattr(self, "connect_all"):
# if not self.connect_all.isVisible():
# self.connect_all.show()
# if not self.connect_all.is_running()
self.connect_all = ConnectAllDialog(self.system, self.system.database,
parent=self, app=self.app)
self.connect_all.show()
self.connect_all.start()
def on_pushTakestepExplorer_clicked(self):
if not hasattr(self, "takestep_explorer"):
self.takestep_explorer = TakestepExplorer(parent=self, system = self.system, app = self.app,
database = self.system.database)
self.takestep_explorer.show()
def on_btn_heat_capacity_clicked(self, clicked=None):
if clicked is None: return
self.cv_viewer = HeatCapacityViewer(self.system, self.system.database, parent=self)
self.cv_viewer.show()
self.cv_viewer.rebuild_cv_plot()
def compute_thermodynamic_information(self, on_finish=None):
"""compute thermodynamic information for minima and ts in the background
call on_finish when the calculation is done
"""
# TODO: deal carefuly with what will happen if this is called again
# before the first calculation is done. if self.thermo_worker is overwritten will
# the first calculation stop?
from pele.gui._cv_viewer import GetThermodynamicInfoParallelQT
self.thermo_worker = GetThermodynamicInfoParallelQT(self.system, self.system.database, npar=1)
if on_finish is not None:
self.thermo_worker.on_finish.connect(on_finish)
self.thermo_worker.start()
njobs = self.thermo_worker.njobs
print "calculating thermodynamics for", njobs, "minima and transition states"
# def _compute_rates(self, min1, min2, T=1.):
# """compute rates without first calculating thermodynamics
# """
# print "computing rates at temperature T =", T
# tslist = [ts for ts in self.system.database.transition_states()
# if ts.fvib is not None]
# rcalc = RateCalculation(tslist, [min1], [min2], T=T)
# r12, r21 = rcalc.compute_rates()
# print "rate from", min1._id, "to", min2._id, "=", r12
# print "rate from", min2._id, "to", min1._id, "=", r21
#
# def compute_rates(self, min1, min2, T=1.):
# """compute the transition rate from min1 to min2 and vice versa"""
# def on_finish():
# print "thermodynamic calculation finished"
# self._compute_rates(min1, min2)
# self._on_finish_thermo_reference = on_finish # so it doeesn't get garbage collected
# self.compute_thermodynamic_information(on_finish=on_finish)
def on_btn_rates_clicked(self, clicked=None):
if clicked is None: return
if not hasattr(self, "rate_viewer"):
m1, m2 = self.minima_selection.minimum1, self.minima_selection.minimum2
self.rate_viewer = RateViewer(self.system, self.system.database, parent=self)
if m1 is not None:
self.rate_viewer.update_A(m1)
if m2 is not None:
self.rate_viewer.update_B(m2)
self.on_minimum_1_selected.connect(self.rate_viewer.update_A)
self.on_minimum_2_selected.connect(self.rate_viewer.update_B)
self.rate_viewer.show()
# min1, min2 = self.get_selected_minima()
# self.compute_rates(min1, min2)
#def refresh_pl():
#pl.pause(0.000001)
def run_gui(system, db=None, application=None):
"""
The top level function that will launch the gui for a given system
Parameters
----------
system : System class
A pele system, derived from BaseSystem. All information
about the system is in this class.
db : pele database or string, optional
connect to this database or the database at this file location
application : QApplication
Use this QApplication object rather than creating a new one
"""
if application is None:
application = QtGui.QApplication(sys.argv)
sys.excepthook = excepthook
myapp = MainGUI(application, system)
if db is not None:
myapp.connect_db(db)
# refresh_timer = QtCore.QTimer()
# refresh_timer.timeout.connect(refresh_pl)
# refresh_timer.start(0.)
myapp.show()
sys.exit(application.exec_())
#def run_gui(systemtype):
# app = QtGui.QApplication(sys.argv)
# import pylab as pl
# myapp = MainGUI(systemtype)
# refresh_timer = QtCore.QTimer()
# refresh_timer.timeout.connect(refresh_pl)
# refresh_timer.start(0.)
#
# myapp.show()
# sys.exit(app.exec_())
| gpl-3.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/backends/qt_editor/figureoptions.py | 4 | 6227 | # -*- coding: utf-8 -*-
#
# Copyright © 2009 Pierre Raybaut
# Licensed under the terms of the MIT License
# see the mpl licenses directory for a copy of the license
"""Module that provides a GUI-based editor for matplotlib's figure options"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os.path as osp
import matplotlib.backends.qt_editor.formlayout as formlayout
from matplotlib.backends.qt_compat import QtGui
from matplotlib import markers
from matplotlib.colors import colorConverter, rgb2hex
def get_icon(name):
import matplotlib
basedir = osp.join(matplotlib.rcParams['datapath'], 'images')
return QtGui.QIcon(osp.join(basedir, name))
LINESTYLES = {'-': 'Solid',
'--': 'Dashed',
'-.': 'DashDot',
':': 'Dotted',
'none': 'None',
}
DRAWSTYLES = {'default': 'Default',
'steps': 'Steps',
}
MARKERS = markers.MarkerStyle.markers
def figure_edit(axes, parent=None):
"""Edit matplotlib figure options"""
sep = (None, None) # separator
has_curve = len(axes.get_lines()) > 0
# Get / General
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
general = [('Title', axes.get_title()),
sep,
(None, "<b>X-Axis</b>"),
('Min', xmin), ('Max', xmax),
('Label', axes.get_xlabel()),
('Scale', [axes.get_xscale(), 'linear', 'log']),
sep,
(None, "<b>Y-Axis</b>"),
('Min', ymin), ('Max', ymax),
('Label', axes.get_ylabel()),
('Scale', [axes.get_yscale(), 'linear', 'log']),
sep,
('(Re-)Generate automatic legend', False),
]
# Save the unit data
xconverter = axes.xaxis.converter
yconverter = axes.yaxis.converter
xunits = axes.xaxis.get_units()
yunits = axes.yaxis.get_units()
if has_curve:
# Get / Curves
linedict = {}
for line in axes.get_lines():
label = line.get_label()
if label == '_nolegend_':
continue
linedict[label] = line
curves = []
linestyles = list(six.iteritems(LINESTYLES))
drawstyles = list(six.iteritems(DRAWSTYLES))
markers = list(six.iteritems(MARKERS))
curvelabels = sorted(linedict.keys())
for label in curvelabels:
line = linedict[label]
color = rgb2hex(colorConverter.to_rgb(line.get_color()))
ec = rgb2hex(colorConverter.to_rgb(line.get_markeredgecolor()))
fc = rgb2hex(colorConverter.to_rgb(line.get_markerfacecolor()))
curvedata = [('Label', label),
sep,
(None, '<b>Line</b>'),
('Line Style', [line.get_linestyle()] + linestyles),
('Draw Style', [line.get_drawstyle()] + drawstyles),
('Width', line.get_linewidth()),
('Color', color),
sep,
(None, '<b>Marker</b>'),
('Style', [line.get_marker()] + markers),
('Size', line.get_markersize()),
('Facecolor', fc),
('Edgecolor', ec),
]
curves.append([curvedata, label, ""])
# make sure that there is at least one displayed curve
has_curve = bool(curves)
datalist = [(general, "Axes", "")]
if has_curve:
datalist.append((curves, "Curves", ""))
def apply_callback(data):
"""This function will be called to apply changes"""
if has_curve:
general, curves = data
else:
general, = data
# Set / General
title, xmin, xmax, xlabel, xscale, ymin, ymax, ylabel, yscale, \
generate_legend = general
if axes.get_xscale() != xscale:
axes.set_xscale(xscale)
if axes.get_yscale() != yscale:
axes.set_yscale(yscale)
axes.set_title(title)
axes.set_xlim(xmin, xmax)
axes.set_xlabel(xlabel)
axes.set_ylim(ymin, ymax)
axes.set_ylabel(ylabel)
# Restore the unit data
axes.xaxis.converter = xconverter
axes.yaxis.converter = yconverter
axes.xaxis.set_units(xunits)
axes.yaxis.set_units(yunits)
axes.xaxis._update_axisinfo()
axes.yaxis._update_axisinfo()
if has_curve:
# Set / Curves
for index, curve in enumerate(curves):
line = linedict[curvelabels[index]]
label, linestyle, drawstyle, linewidth, color, \
marker, markersize, markerfacecolor, markeredgecolor \
= curve
line.set_label(label)
line.set_linestyle(linestyle)
line.set_drawstyle(drawstyle)
line.set_linewidth(linewidth)
line.set_color(color)
if marker is not 'none':
line.set_marker(marker)
line.set_markersize(markersize)
line.set_markerfacecolor(markerfacecolor)
line.set_markeredgecolor(markeredgecolor)
# re-generate legend, if checkbox is checked
if generate_legend:
draggable = None
ncol = 1
if axes.legend_ is not None:
old_legend = axes.get_legend()
draggable = old_legend._draggable is not None
ncol = old_legend._ncol
new_legend = axes.legend(ncol=ncol)
if new_legend:
new_legend.draggable(draggable)
# Redraw
figure = axes.get_figure()
figure.canvas.draw()
data = formlayout.fedit(datalist, title="Figure options", parent=parent,
icon=get_icon('qt4_editor_options.svg'),
apply=apply_callback)
if data is not None:
apply_callback(data)
| mit |
hugobowne/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 55 | 19053 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
structRecomputation/computations | _modules/auxiliary_exact.py | 1 | 1627 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def plot_choices(rslts, tasks):
""" Function to produce plot for choice patterns.
"""
for task in tasks:
choice_probabilities = rslts[task]
deciles = range(40)
colors = ['blue', 'yellow', 'orange', 'red']
width = 0.9
# Plotting
bottom = [0] * 40
# Initialize plot
ax = plt.figure(figsize=(12, 8)).add_subplot(111)
labels = ['Home', 'School', 'Occupation A', 'Occupation B']
for j, i in enumerate([3, 2, 0, 1]):
heights = choice_probabilities[:, i]
plt.bar(deciles, heights, width, bottom=bottom, color=colors[j],
alpha=0.70)
bottom = [heights[i] + bottom[i] for i in range(40)]
# Both Axes
ax.tick_params(labelsize=16, direction='out', axis='both', top='off',
right='off')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
# X axis
ax.set_xlabel('Period', fontsize=16)
ax.set_xlim([0, 40])
# Y axis
ax.set_ylabel('Share of Population', fontsize=16)
ax.yaxis.get_major_ticks()[0].set_visible(False)
# Legend
plt.legend(labels, loc='upper center', bbox_to_anchor=(0.5, -0.10),
fancybox=False, frameon=False, shadow=False, ncol=4, fontsize=20)
# Write out to
plt.savefig('choices_' + task + '.png', bbox_inches='tight',
format='png')
| mit |
mramire8/active | datautil/userstudy.py | 1 | 3986 | __author__ = 'mramire8'
import sys, os
sys.path.append(os.path.abspath("."))
import argparse
from sklearn.feature_extraction.text import CountVectorizer
from bunch import Bunch
from datautil.textutils import StemTokenizer
from datautil.load_data import *
from strategy import randomsampling
import codecs
import random
ap = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
ap.add_argument('--train',
metavar='TRAIN',
#default="C:/Users/mramire8/Documents/Research/Oracle confidence and Interruption/newExperiment/evidencefit/data/evidence-data-nolabel.txt",
default="20news",
help='training data (libSVM format)')
ap.add_argument('--file',
metavar='FILE',
#default="C:/Users/mramire8/Documents/Research/Oracle confidence and Interruption/newExperiment/evidencefit/data/evidence-data-nolabel.txt",
default="file.txt",
help='output file name')
ap.add_argument('--packsize',
metavar='packsize',
type=int,
default=40,
help='number of instances per pack')
args = ap.parse_args()
def main():
vct = CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=True, ngram_range=(1, 1),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
vct_analizer = vct.build_analyzer()
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
if "imdb" in args.train:
########## IMDB MOVIE REVIEWS ###########
data = Bunch(load_imdb(args.train, shuffle=True, rnd=2356, vct=vct)) # should brind data as is
elif "aviation" in args.train:
raise Exception("We are not ready for that data yet")
elif "20news" in args.train:
########## 20 news groups ######
data = Bunch(load_20newsgroups(categories=categories[0], vectorizer=vct, min_size=50)) # for testing purposes
elif "dummy" in args.train:
########## DUMMY DATA###########
data = Bunch(load_dummy("C:/Users/mramire8/Documents/code/python/data/dummy", shuffle=True,rnd=2356,vct=vct))
else:
raise Exception("We do not know that dataset")
print("Data %s" % args.train)
total = len(data.train.data)
print("Data size %s" % total)
#print(data.train.data[0])
## prepare pool for the sampling
pool = Bunch()
pool.data = data.train.bow.tocsr() # full words, for training
pool.target = data.train.target
pool.predicted = []
pool.remaining = set(range(pool.data.shape[0])) # indices of the pool
bt = randomsampling.BootstrapFromEach(87654321)
for i in range(7):
query_index = bt.bootstrap(pool=pool, k=args.packsize) # get instances from each class
filename = "{0}-P{1}.txt".format(args.train,i)
f = codecs.open(filename, 'a+', 'utf-8')
#print documents in file
random.shuffle(query_index)
for di in query_index:
x = unicode(data.train.data[di].replace("\n","<br>"))
#y = data.train.target[di]
y = data.train.target_names[data.train.target[di]]
#f.write(str(i))
#f.write("\t")
#f.write(str(y))
#f.write("\t")
#f.write(x)
#f.write("\n")
f.close()
pool.remaining.difference_update(query_index) # remove the used ones
#print("hi there", file="file.txt")
if __name__ == '__main__':
main()
| apache-2.0 |
anurag313/scikit-learn | sklearn/lda.py | 5 | 17773 | """
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.std_[:, np.newaxis] * s * sc.std_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
| bsd-3-clause |
drusk/pml | pml/unsupervised/pca.py | 1 | 12771 | # Copyright (C) 2012 David Rusk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Implements principal component analysis (PCA) and related operations.
@author: drusk
"""
import numpy as np
import numpy.linalg as linalg
import pandas as pd
from pml.data import model
from pml.utils import plotting
class ReducedDataSet(model.DataSet):
"""
A DataSet which has had dimensionality reduction performed on it.
Columns are interpreted as features in the data set, and rows are
observations.
This dimensionally reduced data set has all of the observations of the
original, but its features have been adjusted to be linear combinations
of the originals.
Those features with little variance may have been dropped during the
dimensionality reduction process. Use the percent_variance() method to
find out how much of the original variance has been retained in the
reduced features.
"""
def __init__(self, data, original_data, eigenvalues, weights):
"""
Creates a new ReducedDataSet.
Args:
data: numpy.array
The raw array with the new data.
original_data: model.DataSet
The original data before being reduced.
eigenvalues: numpy.array (1D)
The list of eigenvalues produced to determine which components in
the new feature space were most important. This includes all of
the eigenvalues, not just the ones for the components selected.
weights: numpy.array (2D)
The weights 'matrix' that the imput data is dot-producted with
to produce the reduced data. Each column corresponds to a
principle component and contains the coefficients for each
feature.
"""
# build a pandas DataFrame with the original row index
dataframe = pd.DataFrame(data, index=original_data.get_sample_ids())
super(ReducedDataSet, self).__init__(dataframe,
original_data.get_labels())
self.eigenvalues = eigenvalues
self.weights = weights
self._original_features = original_data.feature_list()
def percent_variance(self):
"""
Calculates the percentage of the original DataSet's variance which is
still present in this dimensionally reduced DataSet.
Returns:
A floating point number between 0.0 and 1.0 representing the
percentage.
"""
return _percent_variance(self.eigenvalues, self.num_features())
def get_weights(self):
"""
Returns:
weights: numpy.array (2D)
The weights 'matrix' that the imput data was dot-producted with
to produce the reduced data. Each column corresponds to a
principle component and contains the coefficients for each
feature.
"""
return self.weights
def get_first_component_impacts(self):
"""
Takes the weights for the features in the first principle component
and finds their absolute values ('impacts'). See also get_weights().
Returns:
impacts: pd.Series
Magnitude (absolute value) of weights by feature, sorted from
largest to smallest.
"""
impact = np.abs(pd.Series(self.weights[:, 0],
index=self._original_features))
return impact.order(ascending=False)
def get_eigenvalues(self):
"""
Returns:
eigenvalues: numpy.array (1D)
The list of eigenvalues produced to determine which components in
the new feature space were most important. This includes all of
the eigenvalues, not just the ones for the components selected.
NOTE: the eigenvalues are returned in the order they were calculated,
not sorted.
"""
return self.eigenvalues
def _percent_variance(eigenvalues, num_components):
"""
Calculates the percentage of total variance found in the top princpal
components.
Args:
eigenvalues: numpy.array (1D)
The list of all eigenvalues for a data set.
num_components: int
The number of principal components which will be selected.
Returns:
The percentage of total variance for the top number of principal
components selected. This will be a floating point number between 0.0
and 1.0.
"""
# make sure eigenvalues are a numpy array (allows fancy indexing)
eigenvalues = np.array(eigenvalues)
# get indices sorted smallest to largest
sorted_indices = np.argsort(eigenvalues)
# get largest
selected_indices = sorted_indices[-num_components:]
return np.sum(eigenvalues[selected_indices]) / np.sum(eigenvalues)
def _get_cov_mat_eigen_values_and_vectors(dataset):
"""
Calculates the eigenvalues and eigenvectors for the covariance matrix of a
DataSet.
Args:
dataset: model.DataSet
The data whose covariance matrix will be calculated.
Returns:
eigenvalues: numpy.array
A 1D array of the eigenvalues of the covariance matrix.
eigenvectors: numpy.array
A 2D array of the eigenvectors of the covariance matrix.
"""
# rowvar=0 so that rows are interpreted as observations
cov_mat = np.cov(dataset.get_data_frame(), rowvar=0)
eigenvalues, eigenvectors = linalg.eig(cov_mat)
return eigenvalues, eigenvectors
def _copy_and_remove_means(dataset):
"""
Copies the DataSet before removing the column means in order to preserve
the original data.
Args:
dataset: model.DataSet
The DataSet to copy and remove means from.
Returns:
The new, copied DataSet with column means removed.
"""
dataset = dataset.copy()
remove_means(dataset)
return dataset
def _get_descending_cov_mat_eigenvalues(dataset):
"""
Get the eigenvalues of the covariance matrix sorted largest to smallest.
Args:
dataset: model.DataSet
The data whose covariance matrix will be calculated.
Returns:
eigenvalues: list
The list of eigenvalues in descending order of magnitude.
"""
eigenvalues, _ = _get_cov_mat_eigen_values_and_vectors(dataset)
eigenvalues = eigenvalues.tolist()
# sort from largest to smallest
eigenvalues.sort()
eigenvalues.reverse()
return eigenvalues
def plot_pct_variance_per_principal_component(dataset, plot_type="bar"):
"""
Generates a plot to visualize the percentage of variance captured
by each principal component in the data set.
Args:
dataset: model.DataSet
The data set whose principal components will be examined. Should not
already be reduced.
plot_type: string
The plot type to generate. Supported plot types are:
'bar': vertical bar chart
'barh': horizontal bar chart
'line': line chart
Default is 'bar'.
Returns:
void, but produces a matplotlib plot.
Raises:
UnsupportedPlotTypeError if plot_type is not recognized.
"""
# Fail early: check plot type here right away even though the plotting
# module will check it later. Don't want a user with a large data set to
# wait for all the processing to occur only to find out they made a typo
# on the plot type.
plotting.verify_supported_series_plot_type(plot_type)
variances = get_pct_variance_per_principal_component(dataset)
plotting.plot_percent_series(variances, plot_type)
def get_pct_variance_per_principal_component(dataset):
"""
Determines the percentage of variance captured by each principal component
in the data set.
Args:
dataset: model.DataSet
The data set whose principal components will be examined. Should not
already be reduced.
Returns:
variances: pandas.Series
The percentage of variance (as a float between 0.0 and 1.0) for each
principal component.
"""
eigenvalues = _get_descending_cov_mat_eigenvalues(dataset)
return pd.Series(eigenvalues) / np.sum(eigenvalues)
def recommend_num_components(dataset, min_pct_variance=0.9):
"""
Recommends the number of principal components that should be selected in
order to keep a minimum specified percentage of the original data's
variance while also minimizing dimensionality.
Args:
dataset: model.DataSet
The dataset in question.
min_pct_variance: float
The minimum percent of variance which should be maintained when
selecting the recommended number of principal components. Should be
between 0.0 and 1.0.
Defaults to 0.9 (i.e. 90%).
Returns:
The integer number of principal components which should be selected for
Principal Component Analysis.
Raises:
ValueError if min_pct_variance is < 0 or > 1.
"""
if min_pct_variance < 0 or min_pct_variance > 1:
raise ValueError("Invalid minimum percent variance "
"(must be between 0 and 1): %f" %min_pct_variance)
dataset = _copy_and_remove_means(dataset)
eigenvalues = _get_descending_cov_mat_eigenvalues(dataset)
cumulative_pct_variance = np.cumsum(eigenvalues) / np.sum(eigenvalues)
num_components = 1
for pct_variance in cumulative_pct_variance:
if pct_variance >= min_pct_variance:
return num_components
num_components += 1
# should never reach this point since if all components are used the
# percent variance will be 100%, and the min percent variance specified
# can never be greater than 100%
def remove_means(dataset):
"""
Remove the column mean from each value in the dataset.
For example, if a certain column as values [1, 2, 3], the column mean is
2. When the column means are removed, that column will then have the
values [-1, 0, 1].
NOTE: the modifications are made in place in dataset.
Args:
dataset: model.DataSet
The dataset to remove the column means from.
"""
column_means = dataset.reduce_features(np.mean)
for feature in dataset.feature_list():
def subtract_mean(sample):
"""
Subtracts the current column/feature's mean value from a sample.
"""
return sample - column_means[feature]
dataset.set_column(feature,
dataset.get_column(feature).map(subtract_mean))
def pca(dataset, num_components):
"""
Performs Principle Component Analysis (PCA) on a dataset.
Args:
dataset: model.DataSet
The dataset to be analysed.
num_components: int
The number of principal components to select.
"""
dataset = _copy_and_remove_means(dataset)
eigenvalues, eigenvectors = _get_cov_mat_eigen_values_and_vectors(dataset)
# get a list of indices for the eigenvalues ordered largest to smallest
indices = np.argsort(eigenvalues).tolist()
indices.reverse()
# take the top N eigenvectors
selected_indices = indices[:num_components]
# transform the data into the new space created by the top N eigenvectors
weights = eigenvectors[:, selected_indices]
transformed_data = np.dot(dataset.get_data_frame(), weights)
return ReducedDataSet(transformed_data, dataset, eigenvalues, weights)
| mit |
csaladenes/csaladenes.github.io | present/bi2/2020/ubb/az_en_jupyter2_mappam/sklearn_tutorial/fig_code/ML_flow_chart.py | 61 | 4970 | """
Tutorial Diagrams
-----------------
This script plots the flow-charts used in the scikit-learn tutorials.
"""
import numpy as np
import pylab as pl
from matplotlib.patches import Circle, Rectangle, Polygon, Arrow, FancyArrow
def create_base(box_bg = '#CCCCCC',
arrow1 = '#88CCFF',
arrow2 = '#88FF88',
supervised=True):
fig = pl.figure(figsize=(9, 6), facecolor='w')
ax = pl.axes((0, 0, 1, 1),
xticks=[], yticks=[], frameon=False)
ax.set_xlim(0, 9)
ax.set_ylim(0, 6)
patches = [Rectangle((0.3, 3.6), 1.5, 1.8, zorder=1, fc=box_bg),
Rectangle((0.5, 3.8), 1.5, 1.8, zorder=2, fc=box_bg),
Rectangle((0.7, 4.0), 1.5, 1.8, zorder=3, fc=box_bg),
Rectangle((2.9, 3.6), 0.2, 1.8, fc=box_bg),
Rectangle((3.1, 3.8), 0.2, 1.8, fc=box_bg),
Rectangle((3.3, 4.0), 0.2, 1.8, fc=box_bg),
Rectangle((0.3, 0.2), 1.5, 1.8, fc=box_bg),
Rectangle((2.9, 0.2), 0.2, 1.8, fc=box_bg),
Circle((5.5, 3.5), 1.0, fc=box_bg),
Polygon([[5.5, 1.7],
[6.1, 1.1],
[5.5, 0.5],
[4.9, 1.1]], fc=box_bg),
FancyArrow(2.3, 4.6, 0.35, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.75, 4.2, 0.5, -0.2, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(5.5, 2.4, 0, -0.4, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(2.0, 1.1, 0.5, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.3, 1.1, 1.3, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(6.2, 1.1, 0.8, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2)]
if supervised:
patches += [Rectangle((0.3, 2.4), 1.5, 0.5, zorder=1, fc=box_bg),
Rectangle((0.5, 2.6), 1.5, 0.5, zorder=2, fc=box_bg),
Rectangle((0.7, 2.8), 1.5, 0.5, zorder=3, fc=box_bg),
FancyArrow(2.3, 2.9, 2.0, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
Rectangle((7.3, 0.85), 1.5, 0.5, fc=box_bg)]
else:
patches += [Rectangle((7.3, 0.2), 1.5, 1.8, fc=box_bg)]
for p in patches:
ax.add_patch(p)
pl.text(1.45, 4.9, "Training\nText,\nDocuments,\nImages,\netc.",
ha='center', va='center', fontsize=14)
pl.text(3.6, 4.9, "Feature\nVectors",
ha='left', va='center', fontsize=14)
pl.text(5.5, 3.5, "Machine\nLearning\nAlgorithm",
ha='center', va='center', fontsize=14)
pl.text(1.05, 1.1, "New Text,\nDocument,\nImage,\netc.",
ha='center', va='center', fontsize=14)
pl.text(3.3, 1.7, "Feature\nVector",
ha='left', va='center', fontsize=14)
pl.text(5.5, 1.1, "Predictive\nModel",
ha='center', va='center', fontsize=12)
if supervised:
pl.text(1.45, 3.05, "Labels",
ha='center', va='center', fontsize=14)
pl.text(8.05, 1.1, "Expected\nLabel",
ha='center', va='center', fontsize=14)
pl.text(8.8, 5.8, "Supervised Learning Model",
ha='right', va='top', fontsize=18)
else:
pl.text(8.05, 1.1,
"Likelihood\nor Cluster ID\nor Better\nRepresentation",
ha='center', va='center', fontsize=12)
pl.text(8.8, 5.8, "Unsupervised Learning Model",
ha='right', va='top', fontsize=18)
def plot_supervised_chart(annotate=False):
create_base(supervised=True)
if annotate:
fontdict = dict(color='r', weight='bold', size=14)
pl.text(1.9, 4.55, 'X = vec.fit_transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(3.7, 3.2, 'clf.fit(X, y)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(1.7, 1.5, 'X_new = vec.transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(6.1, 1.5, 'y_new = clf.predict(X_new)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
def plot_unsupervised_chart():
create_base(supervised=False)
if __name__ == '__main__':
plot_supervised_chart(False)
plot_supervised_chart(True)
plot_unsupervised_chart()
pl.show()
| mit |
ch3ll0v3k/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 230 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
feranick/GES_AT | GridEdgeAT/gridedgeat/resultsWindow.py | 1 | 32720 | '''
ResultsWindow.py
----------------
Classes for providing a graphical user interface
for the resultsWindow
Copyright (C) 2017-2019 Nicola Ferralis <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
'''
import sys, random, math, json, requests, webbrowser
import numpy as np
import pandas as pd
from datetime import datetime
from PyQt5.QtWidgets import (QMainWindow,QPushButton,QVBoxLayout,QFileDialog,QWidget,
QGridLayout,QGraphicsView,QLabel,QComboBox,QLineEdit,
QTextEdit, QMenuBar,QStatusBar, QApplication,QTableWidget,
QTableWidgetItem,QAction,QHeaderView,QMenu,QHBoxLayout,
QAbstractItemView)
from PyQt5.QtCore import (QRect,pyqtSlot,pyqtSignal,Qt)
from PyQt5.QtGui import (QColor,QCursor)
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from .dataManagement import *
from .queryDMWindow import *
from .fitMethods import *
from . import logger
####################################################################
# Results Window
####################################################################
class ResultsWindow(QMainWindow):
def __init__(self, parent=None):
super(ResultsWindow, self).__init__(parent)
self.deviceID = np.zeros((0,1))
self.perfData = np.zeros((0,9))
self.JV = np.array([])
self.setupDataFrame()
self.csvFolder = self.parent().config.csvSavingFolder
self.initUI()
self.initPlots(self.perfData)
self.initJVPlot()
self.show()
# Define UI elements
def initUI(self):
self.setGeometry(380, 30, 1180, 950)
self.setWindowTitle('Results Panel')
self.setFixedSize(self.size())
# A figure instance to plot on
self.figureMPP = plt.figure()
self.figureJVresp = plt.figure()
self.figurePVresp = plt.figure()
self.figureJVresp.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.15)
self.figurePVresp.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.15)
self.figureMPP.subplots_adjust(left=0.08, right=0.98, top=0.95, bottom=0.20)
self.centralwidget = QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.jvGridLayoutWidget = QWidget(self.centralwidget)
self.jvGridLayoutWidget.setGeometry(QRect(0, 30, 1180, 455))
self.mppGridLayoutWidget = QWidget(self.centralwidget)
self.mppGridLayoutWidget.setGeometry(QRect(0, 475, 1180, 290))
self.VLayout = QVBoxLayout(self.jvGridLayoutWidget)
self.jvHLayout = QHBoxLayout()
self.canvasJVresp = FigureCanvas(self.figureJVresp)
self.toolbarJVresp = CustomToolbar(self.canvasJVresp, self.figureJVresp, self)
self.toolbarJVresp.setMaximumHeight(30)
self.toolbarJVresp.setStyleSheet("QToolBar { border: 0px }")
self.canvasPVresp = FigureCanvas(self.figurePVresp)
self.toolbarPVresp = CustomToolbar(self.canvasPVresp, self.figurePVresp, self)
self.toolbarPVresp.setMaximumHeight(30)
self.toolbarPVresp.setStyleSheet("QToolBar { border: 0px }")
self.jvLayout = QVBoxLayout()
self.pvLayout = QVBoxLayout()
self.jvLayout.addWidget(self.toolbarJVresp)
self.jvLayout.addWidget(self.canvasJVresp)
self.jvHLayout.addLayout(self.jvLayout)
self.pvLayout.addWidget(self.toolbarPVresp)
self.pvLayout.addWidget(self.canvasPVresp)
self.jvHLayout.addLayout(self.pvLayout)
self.VLayout.addLayout(self.jvHLayout)
self.mppLayout = QVBoxLayout(self.mppGridLayoutWidget)
self.canvasMPP = FigureCanvas(self.figureMPP)
self.toolbarMPP = NavigationToolbar(self.canvasMPP, self)
self.toolbarMPP.setMaximumHeight(30)
self.toolbarMPP.setStyleSheet("QToolBar { border: 0px }")
self.mppLayout.addWidget(self.toolbarMPP)
self.mppLayout.addWidget(self.canvasMPP)
self.resTableW = 1160
self.resTableH = 145
self.resTableWidget = QTableWidget(self.centralwidget)
self.resTableWidget.setGeometry(QRect(10, 770, self.resTableW, self.resTableH))
self.resTableWidget.setToolTip("Right click for more options")
self.resTableWidget.setColumnCount(11)
self.resTableWidget.setRowCount(0)
self.resTableWidget.setItem(0,0, QTableWidgetItem(""))
self.resTableWidget.setHorizontalHeaderItem(0,QTableWidgetItem("Device ID"))
self.resTableWidget.setHorizontalHeaderItem(1,QTableWidgetItem("Av Voc [V]"))
self.resTableWidget.setHorizontalHeaderItem(2,QTableWidgetItem(u"Av Jsc [mA/cm\u00B2]"))
self.resTableWidget.setHorizontalHeaderItem(3,QTableWidgetItem("Av VPP [V]"))
self.resTableWidget.setHorizontalHeaderItem(4,QTableWidgetItem("Av MPP [mW/cm\u00B2]"))
self.resTableWidget.setHorizontalHeaderItem(5,QTableWidgetItem("Av FF"))
self.resTableWidget.setHorizontalHeaderItem(6,QTableWidgetItem("Av PCE [%]"))
self.resTableWidget.setHorizontalHeaderItem(7,QTableWidgetItem("Illumination"))
self.resTableWidget.setHorizontalHeaderItem(8,QTableWidgetItem("Tracking time [s]"))
self.resTableWidget.setHorizontalHeaderItem(9,QTableWidgetItem("Acq Date"))
self.resTableWidget.setHorizontalHeaderItem(10,QTableWidgetItem("Acq Time"))
self.resTableWidget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.resTableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)
self.resTableWidget.itemClicked.connect(self.onCellClick)
self.setCentralWidget(self.centralwidget)
# Make Menu for plot related calls
self.menuBar = QMenuBar(self)
self.menuBar.setGeometry(0,0,1150,25)
self.loadMenu = QAction("&Load Local Data", self)
self.loadMenu.setShortcut("Ctrl+o")
self.loadMenu.setStatusTip('Load csv data from saved file')
self.loadMenu.triggered.connect(self.load_csv)
self.loadDMMenu = QAction("&Load Data from Data Management", self)
self.loadDMMenu.setShortcut("Ctrl+Shift+o")
self.loadDMMenu.setStatusTip('Load data from Data Management')
self.loadDMMenu.triggered.connect(self.openWindowDM)
self.saveAllMenu = QAction("&Save All Data", self)
self.saveAllMenu.setShortcut("Ctrl+Shift+s")
self.saveAllMenu.setStatusTip('Save all data into csv')
self.saveAllMenu.triggered.connect(lambda: self.selectDeviceSaveLocally(list(range(self.resTableWidget.rowCount()))))
self.directoryMenu = QAction("&Set directory for saved files", self)
self.directoryMenu.setShortcut("Ctrl+d")
self.directoryMenu.setStatusTip('Set directory for saved files')
self.directoryMenu.triggered.connect(self.set_dir_saved)
self.clearMenu = QAction("&Clear Plots", self)
self.clearMenu.setShortcut("Ctrl+x")
self.clearMenu.setStatusTip('Clear plots')
self.clearMenu.triggered.connect(lambda: self.clearPlots(True))
fileMenu = self.menuBar.addMenu('&File')
fileMenu.addAction(self.loadMenu)
fileMenu.addAction(self.loadDMMenu)
fileMenu.addAction(self.saveAllMenu)
fileMenu.addSeparator()
fileMenu.addAction(self.directoryMenu)
plotMenu = self.menuBar.addMenu('&Plot')
plotMenu.addAction(self.clearMenu)
self.parent().viewWindowMenus(self.menuBar, self.parent())
self.statusbar = QStatusBar(self)
self.statusbar.setObjectName("statusbar")
self.setStatusBar(self.statusbar)
# Set directory for saved data
def set_dir_saved(self):
self.csvFolder = str(QFileDialog.getExistingDirectory(self, "Select Directory"))
self.parent().config.conf['System']['csvSavingFolder'] = str(self.csvFolder)
self.parent().config.saveConfig(self.parent().config.configFile)
self.parent().config.readConfig(self.parent().config.configFile)
msg = "CSV Files will be saved in: "+self.csvFolder
print(msg)
logger.info(msg)
# Define axis parametrs for plots
def plotSettings(self, ax):
ax.tick_params(axis='both', which='major', labelsize=8)
ax.tick_params(axis='both', which='minor', labelsize=8)
# Initialize Time-based plots
def initPlots(self, data):
self.figureMPP.clf()
self.axMPP = self.figureMPP.add_subplot(111)
self.plotSettings(self.axMPP)
self.axMPP.set_xlabel('Time [s]',fontsize=8)
self.axMPP.set_ylabel('Max power point \n[mW/cm$^2$]',fontsize=8)
self.axMPP.set_autoscale_on(True)
self.axMPP.autoscale_view(True,True,True)
self.canvasMPP.draw()
self.lineMPP, = self.axMPP.plot(data[:,0],data[:,4], '.-',linewidth=0.5)
# Initialize JV and PV plots
def initJVPlot(self):
self.figureJVresp.clf()
self.figurePVresp.clf()
self.axJVresp = self.figureJVresp.add_subplot(111)
self.plotSettings(self.axJVresp)
self.axJVresp.set_xlabel('Voltage [V]',fontsize=8)
self.axJVresp.set_ylabel('Current density [mA/cm$^2$]',fontsize=8)
self.axJVresp.axvline(x=0, linewidth=0.5)
self.axJVresp.axhline(y=0, linewidth=0.5)
self.axPVresp = self.figurePVresp.add_subplot(111)
self.plotSettings(self.axPVresp)
self.axPVresp.set_xlabel('Voltage [V]',fontsize=8)
self.axPVresp.set_ylabel('Power density [mW/cm$^2$]',fontsize=8)
self.axPVresp.axvline(x=0, linewidth=0.5)
self.axPVresp.axhline(y=0, linewidth=0.5)
self.canvasJVresp.draw()
self.canvasPVresp.draw()
# Plot MPP with tracking
def plotMPP(self, data):
self.toolbarMPP.update()
self.lineMPP.set_data(data[:,2].astype(float), abs(data[:,6].astype(float)))
self.axMPP.relim()
self.axMPP.autoscale_view(True,True,True)
self.canvasMPP.draw()
# Plot JV response
def plotJVresp(self, JV, clearFlag):
if clearFlag:
self.initJVPlot()
self.toolbarJVresp.update()
self.toolbarPVresp.update()
labelF = "Forw"
labelR = "Back"
else:
labelF = "Forw-Fit"
labelR = "Back-Fit"
self.axJVresp.plot(JV[:,0],JV[:,1], '.-',linewidth=0.5, label=labelF)
self.axJVresp.plot(JV[:,2],JV[:,3], '.-',linewidth=0.5, label=labelR)
self.axPVresp.plot(JV[:,0],JV[:,0]*JV[:,1], '.-',linewidth=0.5, label=labelF)
self.axPVresp.plot(JV[:,2],JV[:,2]*JV[:,3], '.-',linewidth=0.5, label=labelR)
self.axJVresp.legend(loc='lower left')
self.axPVresp.legend(loc='upper left')
if self.parent().config.logPlotJV:
self.axJVresp.set_yscale('log')
self.axPVresp.set_yscale('log')
self.figureJVresp.tight_layout()
self.figurePVresp.tight_layout()
self.canvasJVresp.draw()
self.canvasPVresp.draw()
# Clear all plots and fields
def clearPlots(self, includeTable):
self.setWindowTitle('Results Panel')
self.deviceID = np.zeros((0,1))
self.perfData = np.zeros((0,8))
self.JV = np.array([])
self.initPlots(self.perfData)
self.initJVPlot()
if includeTable is True:
self.resTableWidget.setRowCount(0)
QApplication.processEvents()
# Action upon selecting a row in the table.
@pyqtSlot()
def onCellClick(self):
row = self.resTableWidget.selectedItems()[0].row()
for j in range(self.resTableWidget.columnCount()):
for i in range(self.resTableWidget.rowCount()):
self.resTableWidget.item(i,j).setBackground(QColor(255,255,255))
for j in range(self.resTableWidget.columnCount()):
self.resTableWidget.item(row,j).setBackground(QColor(0,255,0))
try:
self.setWindowTitle('Results Panel - Device: '+ str(self.dfTotDeviceID.iat[0,self.resTableWidget.rowCount()-1-row][0][0]))
self.plotData(self.dfTotDeviceID.iat[0,self.resTableWidget.rowCount()-1-row],
self.dfTotPerfData.iat[0,self.resTableWidget.rowCount()-1-row],
self.dfTotJV.iat[0,self.resTableWidget.rowCount()-1-row])
except:
pass
# Process Key Events
def keyPressEvent(self, event):
if self.resTableWidget.rowCount() > 0:
if event.key() == Qt.Key_Delete:
selectedRows = list(set([ i.row() for i in self.resTableWidget.selectedItems()]))
self.selectDeviceRemove(selectedRows)
# Enable right click on substrates for saving locally and delete
def contextMenuEvent(self, event):
self.menu = QMenu(self)
rPos = self.resTableWidget.mapFromGlobal(QCursor.pos())
if rPos.x()>0 and rPos.x()<self.resTableW and \
rPos.y()>0 and rPos.y()<self.resTableH and \
self.resTableWidget.rowCount() > 0 :
selectCellLoadAction = QAction('Load from csv...', self)
selectCellLoadAction.setShortcut("Ctrl+o")
selectCellLoadAction.setStatusTip('Load data from saved csv file...')
selectCellSaveAction = QAction('Save selected data as csv files...', self)
selectCellSaveAction.setShortcut("Ctrl+s")
selectCellSaveAllAction = QAction('Save all data as csv files...', self)
selectCellSaveAllAction.setShortcut("Ctrl+Shift+s")
viewDMEntryAction = QAction('View Entry in Database...', self)
viewDMEntryAction.setShortcut("Ctrl+v")
selectCellRemoveAction = QAction('Remove Selected...', self)
selectCellRemoveAction.setShortcut("Del")
selectRemoveAllAction = QAction('Remove All...', self)
selectRemoveAllAction.setShortcut("Shift+Del")
fitInterpolateAction = QAction('Fit with scipy.interpolate...', self)
fitInterpolateAction.setShortcut("Ctrl+i")
fitDiodeEquationAction = QAction('Fit with Diode Equation (EXPERIMENTAL)...', self)
fitDiodeEquationAction.setShortcut("Ctrl+e")
self.menu.addAction(selectCellRemoveAction)
self.menu.addAction(selectRemoveAllAction)
self.menu.addSeparator()
self.menu.addAction(selectCellLoadAction)
self.menu.addAction(selectCellSaveAction)
self.menu.addAction(selectCellSaveAllAction)
self.menu.addSeparator()
self.menu.addAction(fitInterpolateAction)
self.menu.addAction(fitDiodeEquationAction)
self.menu.addSeparator()
self.menu.addAction(viewDMEntryAction)
self.menu.popup(QCursor.pos())
QApplication.processEvents()
selectCellLoadAction.triggered.connect(self.load_csv)
selectedRows = [self.resTableWidget.rowCount()-j-1 for j in list(set([ i.row() for i in self.resTableWidget.selectedItems()]))]
#selectedRows = list(set([ i.row() for i in self.resTableWidget.selectedItems()]))
#for row in selectedRows[::-1]:
selectCellSaveAction.triggered.connect(lambda: self.selectDeviceSaveLocally(selectedRows))
selectCellSaveAllAction.triggered.connect(lambda: self.selectDeviceSaveLocally(list(range(self.resTableWidget.rowCount()))))
selectCellRemoveAction.triggered.connect(lambda: self.selectDeviceRemove(selectedRows))
selectRemoveAllAction.triggered.connect(lambda: self.clearPlots(True))
fitDiodeEquationAction.triggered.connect(lambda: self.fitDiodeEquation(selectedRows))
fitInterpolateAction.triggered.connect(lambda: self.fitInterpolate(selectedRows))
viewDMEntryAction.triggered.connect(lambda: self.parent().samplewind.viewOnDM(self.resTableWidget.selectedItems()[0].text()))
# Logic to save locally devices selected from results table
def selectDeviceSaveLocally(self, selectedRows):
try:
folder = str(QFileDialog.getExistingDirectory(self, "Select directory where to save..."))
#print(selectedRows)
for row in selectedRows:
#print(self.dfTotDeviceID.iat[0,row][0][0],self.dfTotPerfData.iat[0,row])
self.save_csv(self.dfTotDeviceID.iat[0,row][0][0],
self.dfTotAcqParams.iloc[[row]],
self.dfTotPerfData.iat[0,row],
self.dfTotJV.iat[0,row],folder)
except:
print("Error: data cannot be saved")
# Logic to remove data from devices selected from results table
def selectDeviceRemove(self, selectedRows):
for row in selectedRows:
self.dfTotDeviceID.drop(self.dfTotDeviceID.columns[row], axis=1)
self.dfTotPerfData.drop(self.dfTotPerfData.columns[row], axis=1)
self.dfTotJV.drop(self.dfTotJV.columns[row], axis=1)
for l in self.axJVresp.get_lines():
l.remove()
for l in self.axPVresp.get_lines():
l.remove()
#print(" Removed data from table: ",str(self.dfTotDeviceID.iat[0,row]))
self.canvasJVresp.draw()
self.canvasPVresp.draw()
self.resTableWidget.removeRow(row)
# Logic to Fit the JV curve using the Diode Equation
def fitDiodeEquation(self, selectedRows):
FM = FitMethods(self)
FM.results.connect(lambda msg: print(msg))
FM.results.connect(lambda msg: logger.info(msg))
#DE.func.connect(lambda func: [FM.fitDE(func,self.dfTotJV.iat[0,row]) for row in selectedRows])
FM.JV_fit.connect(lambda JV: self.plotJVresp(JV,False))
[FM.fitDE(self.dfTotJV.iat[0,row]) for row in selectedRows]
#FM.start()
# Logic to Fit the JV curve using scipy.interpolate.interp1d
def fitInterpolate(self, selectedRows):
FM = FitMethods(self)
FM.results.connect(lambda msg: print(msg))
FM.results.connect(lambda msg: logger.info(msg))
#DE.func.connect(lambda func: [FM.fitDE(func,self.dfTotJV.iat[0,row]) for row in selectedRows])
FM.JV_fit.connect(lambda JV: self.plotJVresp(JV,False))
[FM.fitInterp(self.dfTotJV.iat[0,row]) for row in selectedRows]
#FM.start()
# Add row and initialize it within the table
def setupResultTable(self):
self.resTableWidget.insertRow(0)
self.lastRowInd = 0
### Uncomment this to display latest acquisition last
#self.resTableWidget.insertRow(self.resTableWidget.rowCount())
#self.resTableWidget.setItem(self.resTableWidget.rowCount()-1,0,
# QTableWidgetItem())
#for j in range(self.resTableWidget.columnCount()):
# self.resTableWidget.setItem(self.resTableWidget.rowCount(),j,
# QTableWidgetItem())
#self.lastRowInd = self.resTableWidget.rowCount()-1
#for f in range(9):
# self.resTableWidget.setItem(self.lastRowInd, 0,QTableWidgetItem())
# Create internal dataframe with all the data.
# This is needed for plotting data after acquisition
def setupDataFrame(self):
self.dfTotDeviceID = pd.DataFrame()
self.dfTotPerfData = pd.DataFrame()
self.dfTotAcqParams = pd.DataFrame()
self.dfTotJV = pd.DataFrame()
# Process data from devices
def processDeviceData(self, deviceID, dfAcqParams, perfData, JV, flag, track_flag):
# create numpy arrays for all devices as well as dataframes for csv and jsons
self.deviceID = np.vstack((self.deviceID, np.array([deviceID])))
self.perfData = perfData
self.JV = JV
# Populate table.
if flag is False and track_flag is False:
self.setupResultTable()
self.fillTableData(deviceID, self.perfData)
QApplication.processEvents()
# Plot results
self.plotData(self.deviceID,self.perfData, JV)
QApplication.processEvents()
if flag is True:
# Save to internal dataFrame
self.makeInternalDataFrames(self.lastRowInd+self.resTableWidget.rowCount() ,
self.deviceID,self.perfData, dfAcqParams, self.JV)
# Enable/disable saving to file
# Using ALT with Start Acquisition button overrides the config settings.
if self.parent().config.saveLocalCsv == True or \
self.parent().acquisition.modifiers == Qt.AltModifier:
self.save_csv(deviceID, dfAcqParams, self.perfData, self.JV,self.csvFolder)
if self.parent().config.submitToDb == True:
self.submit_DM(deviceID, dfAcqParams, self.perfData, self.JV)
# Plot data from devices
def plotData(self, deviceID, perfData, JV):
self.plotJVresp(JV,True)
self.plotMPP(perfData)
self.show()
# Create internal dataframe with all the data.
# This is needed for plotting data after acquisition
def makeInternalDataFrames(self, index,deviceID,perfData,dfAcqParams,JV):
self.dfTotDeviceID[index] = [deviceID]
self.dfTotPerfData[index] = [perfData]
self.dfTotAcqParams = self.dfTotAcqParams.append(dfAcqParams)
self.dfTotJV[index] = [JV]
# Create DataFrames for saving csv and jsons
def makeDFPerfData(self,perfData):
dfPerfData = pd.DataFrame({'Time step': perfData[:,2], 'Voc': perfData[:,3],
'Jsc': perfData[:,4], 'VPP' : perfData[:,5], 'MPP': perfData[:,6],
'FF': perfData[:,7], 'PCE': perfData[:,8], 'Light' : perfData[:,9],
'Acq Date': perfData[:,0], 'Acq Time': perfData[:,1],
})
dfPerfData = dfPerfData[['Acq Date','Acq Time','Time step', 'Voc',
'Jsc', 'VPP', 'MPP','FF','PCE', 'Light']]
return dfPerfData
def makeDFJV(self,JV,set):
dfJV = pd.DataFrame({'V':JV[:,2*set+0], 'J':JV[:,2*set+1]})
dfJV = dfJV[['V', 'J']]
listJV = dict(dfJV.to_dict(orient='split'))
listJV['columnlabel'] = listJV.pop('columns')
listJV['output'] = listJV.pop('data')
del listJV['index']
return dfJV, listJV
### Submit json for device data to Data-Management
def submit_DM(self,deviceID, dfAcqParams, perfData, JV):
dfPerfData = self.makeDFPerfData(perfData)
# Prepare json-data
jsonData = {'itemId' : deviceID[-1]}
listSubstrateName = {'substrate' : deviceID[:-1]}
listEquipment = {'equipment' : 'auto-testing'}
listAcqParams = dict(dfAcqParams.to_dict(orient='list'))
jsonData.update(listEquipment)
jsonData.update(listSubstrateName)
jsonData.update(listAcqParams)
_, listJV0 = self.makeDFJV(JV,0)
jsonData.update(listJV0)
if float(perfData[0,2]) == 0:
if int(float(dfPerfData.at[0,'Light'])) == 0:
listMeasType = {'measType' : 'JV_dark'}
listName = {'name': 'JV_dark_f'}
listName1 = {'name': 'JV_dark_r'}
else:
listMeasType = {'measType' : 'JV'}
listName = {'name': 'JV_f'}
listName1 = {'name': 'JV_r'}
listPerfData = dict(dfPerfData.iloc[[0]].to_dict('list'))
jsonData.update(listPerfData)
jsonData.update(listName)
jsonData.update(listMeasType)
jsonData1 = jsonData.copy()
jsonData1.update(listName1)
listPerfData1 = dict(dfPerfData.iloc[[1]].to_dict('list'))
jsonData1.update(listPerfData1)
_, listJV1 = self.makeDFJV(JV,1)
jsonData1.update(listJV1)
else:
listName = {'name': 'tracking'}
listMeasType = {'measType' : 'tracking'}
listPerfData = dict(dfPerfData.to_dict('split'))
listPerfData['columnlabel'] = listPerfData.pop('columns')
listPerfData['output'] = listPerfData.pop('data')
del listPerfData['index']
jsonData.update(listPerfData)
jsonData.update(listName)
jsonData.update(listMeasType)
self.dbConnectInfo = self.parent().dbconnectionwind.getDbConnectionInfo()
try:
# This is for direct submission via pymongo
conn = DataManagement(self.dbConnectInfo)
client, _ = conn.connectDB()
db = client[self.dbConnectInfo[2]]
db_entry = db.Measurement.insert_one(json.loads(json.dumps(jsonData)))
msg = " Device " + deviceID + \
": submission to DM via Mongo successful\n (ids: " + \
str(db_entry.inserted_id)
if float(perfData[0,2]) == 0:
db_entry1 = db.Measurement.insert_one(json.loads(json.dumps(jsonData1)))
msg += ", "+str(db_entry1.inserted_id)
msg += ")"
except:
try:
msg = " Submission to DM via Mongo: failed. Trying via HTTP POST"
print(msg)
logger.info(msg)
#This is for using POST HTTP
url = "http://"+self.dbConnectInfo[0]+":"+self.dbConnectInfo[5]+self.dbConnectInfo[6]
if float(perfData[0,2]) == 0:
req = requests.post(url, json=jsonData)
req1 = requests.post(url, json=jsonData1)
if req.status_code == 200 and req1.status_code == 200:
msg = " Device " + deviceID + \
", submission to DM via HTTP POST successful\n (ETag: " + \
str(req.headers['ETag'])+", "+str(req1.headers['ETag'])+")"
else:
req.raise_for_status()
req1.raise_for_status()
else:
if req.status_code == 200:
req = requests.post(url, json=jsonData)
msg = " Device " + deviceID + \
", submission to DM via HTTP POST successful\n (ETag: " + \
str(req.headers['ETag'])+")"
else:
req.raise_for_status()
except:
msg = " Connection to DM server: failed. Saving local file"
self.save_csv(deviceID, dfAcqParams, perfData, JV, self.csvFolder)
print(msg)
logger.info(msg)
# Open DM window for searching for data in DM
def openWindowDM(self, deviceID):
self.loadDMWindow = DataLoadDMWindow(parent=self)
self.loadDMWindow.show()
deviceID = self.loadDMWindow.deviceData.connect(lambda deviceID, perfData, acqParams, JV:\
self.loadDeviceDM(deviceID, perfData, acqParams, JV))
# Once data is retrieved from DM, plot it and populate table
def loadDeviceDM(self, deviceID, perfData, dfAcqParams, JV):
print(" Plotting data for:",deviceID)
self.plotData(deviceID, perfData, JV)
self.setupResultTable()
self.fillTableData(deviceID, perfData)
self.makeInternalDataFrames(self.resTableWidget.rowCount()-1, [[deviceID]], perfData, dfAcqParams, np.array(JV))
# Load data from saved CSV
def load_csv(self):
filenames = QFileDialog.getOpenFileNames(self,
"Open csv data", "","*.csv")
try:
for filename in filenames[0]:
print("Open saved device data from: ", filename)
dftot = pd.read_csv(filename, na_filter=False)
deviceID = dftot.at[0,'Device']
perfData = dftot.to_numpy()[range(0,np.count_nonzero(dftot['Acq Date']))][:,range(1,11)]
JV = dftot.values[range(0,np.count_nonzero(dftot['V_r']))][:,np.arange(11,15)].astype(float)
dfAcqParams = dftot.loc[0:1, 'Acq Soak Voltage':'Comments']
self.plotData(deviceID, perfData, JV)
self.setupResultTable()
self.fillTableData(deviceID, perfData)
self.makeInternalDataFrames(self.resTableWidget.rowCount()-1, [[deviceID]], perfData, dfAcqParams, np.array(JV))
except:
print("Loading files failed")
# Save device acquisition as csv
def save_csv(self,deviceID, dfAcqParams, perfData, JV, folder):
dfPerfData = self.makeDFPerfData(perfData)
dfJV0,_ = self.makeDFJV(JV,0)
dfJV1,_ = self.makeDFJV(JV,1)
dfJV0 = dfJV0.rename(columns={"V": "V_f", "J": "J_f"})
dfJV1 = dfJV1.rename(columns={"V": "V_r", "J": "J_r"})
dfDeviceID = pd.DataFrame({'Device':[deviceID]})
dfTot = pd.concat([dfDeviceID, dfPerfData], axis = 1)
dfTot = pd.concat([dfTot,dfJV0], axis = 1)
dfTot = pd.concat([dfTot,dfJV1], axis = 1)
dfTot = pd.concat([dfTot,dfAcqParams], axis = 1)
dateTimeTag = str(datetime.now().strftime('%Y%m%d-%H%M%S-%f'))
csvFilename = deviceID+"_"
if int(float(dfPerfData.at[0,'Light'])) == 0:
csvFilename+="dark_"
if int(float(dfPerfData.at[0,'Light'])) != 0:
if float(perfData[0,2]) != 0:
csvFilename += "tracking_"
csvFilename += dateTimeTag + ".csv"
try:
dfTot.to_csv(folder+"/"+csvFilename, sep=',', index=False)
msg=" Device data saved on: "+folder+"/"+csvFilename
except:
msg=" Device data NOT saved. Check File saving folder in INI file"
print(msg)
logger.info(msg)
# Populate result table.
def fillTableData(self, deviceID, obj):
if str(obj[0,9]) == "1.0":
light = "ON"
else:
light = "OFF"
self.resTableWidget.setItem(self.lastRowInd, 0,QTableWidgetItem(deviceID))
for i in range(1,7,1):
self.resTableWidget.setItem(self.lastRowInd, i,QTableWidgetItem("{0:0.3f}".format(np.mean(obj[:,i+2].astype(float)))))
try:
self.resTableWidget.item(self.lastRowInd,i).setToolTip("F:{0:0.3f}".format(float(obj[0,i+2]))+" / B:{0:0.3f}".format(float(obj[1,i+2])))
except:
pass
self.resTableWidget.setItem(self.lastRowInd, 7,QTableWidgetItem(light)) #Light
self.resTableWidget.setItem(self.lastRowInd, 9,QTableWidgetItem(obj[0,0]))
self.resTableWidget.setItem(self.lastRowInd, 10,QTableWidgetItem(obj[0,1]))
if float(obj[0,2]) == 0.:
self.resTableWidget.setItem(self.lastRowInd, 8,QTableWidgetItem("None")) #track_time
else:
self.resTableWidget.setItem(self.lastRowInd, 8,QTableWidgetItem("{0:0.3f}".format(float(obj[0,2])))) #track_time
####################################################################
# Custom Toolbar with linear/log button
####################################################################
class CustomToolbar(NavigationToolbar):
def __init__(self, figure_canvas, figure, parent= None):
self.maxY = 5.0
self.figure = figure
self.figure_canvas = figure_canvas
self.toolitems +=(('Log/Lin', "Log/Lin scale", "Log/Lin scale", 'log_lin_scale'),('Toggle Max Y', "Toggle Max Y", "Toggle Max Y", 'max_ylim'))
NavigationToolbar.__init__(self, figure_canvas, parent=parent)
def log_lin_scale(self):
if len(self.figure.gca().lines) > 2:
if self.figure.gca().get_yscale() == 'log':
self.figure.gca().set_yscale('linear')
else:
self.figure.gca().set_yscale('log')
self.figure_canvas.draw()
def max_ylim(self):
if len(self.figure.gca().lines) > 2:
if self.figure.gca().get_ylim()[1] == self.maxY:
self.figure.gca().set_ylim(top=None)
self.figure.gca().relim()
self.figure.gca().autoscale()
else:
self.figure.gca().set_ylim(top=self.maxY)
self.figure_canvas.draw()
| gpl-3.0 |
neuropower/neuropower | neuropower/apps/neuropowertoolbox/plots.py | 2 | 8183 | import matplotlib as mpl
mpl.use('Agg')
from models import NeuropowerModel
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from palettable.colorbrewer.qualitative import Paired_12,Set1_9
from django.http import HttpResponse, HttpResponseRedirect
from neuropower import *
from utils import get_session_id
from mpld3 import plugins
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy
import mpld3
def plotModel(request):
plt.switch_backend('agg')
sid = get_session_id(request)
neuropowerdata = NeuropowerModel.objects.get(SID=sid)
if not neuropowerdata.err == "":
fig=plt.figure(facecolor="white")
else:
peaks = neuropowerdata.peaktable
twocol = Paired_12.mpl_colors
if neuropowerdata.pi1>0:
xn = np.arange(-10,30,0.01)
nul = [1-float(neuropowerdata.pi1)]*neuropowermodels.nulPDF(xn,exc=float(neuropowerdata.ExcZ),method="RFT")
alt = float(neuropowerdata.pi1)*neuropowermodels.altPDF(xn,mu=float(neuropowerdata.mu),sigma=float(neuropowerdata.sigma),exc=float(neuropowerdata.ExcZ),method="RFT")
mix = neuropowermodels.mixPDF(xn,pi1=float(neuropowerdata.pi1),mu=float(neuropowerdata.mu),sigma=float(neuropowerdata.sigma),exc=float(neuropowerdata.ExcZ),method="RFT")
xn_p = np.arange(0,1,0.01)
alt_p = float(neuropowerdata.pi1)*scipy.stats.beta.pdf(xn_p, float(neuropowerdata.a), 1)+1-float(neuropowerdata.pi1)
null_p = [1-float(neuropowerdata.pi1)]*len(xn_p)
mpl.rcParams['font.size']='11.0'
fig,axs=plt.subplots(1,2,figsize=(14,5))
fig.patch.set_facecolor('None')
fig.subplots_adjust(hspace=.5,wspace=0.3)
axs=axs.ravel()
axs[0].hist(peaks.pval,lw=0,normed=True,facecolor=twocol[0],bins=np.arange(0,1.1,0.1),label="observed distribution")
axs[0].set_ylim([0,3])
axs[0].plot(xn_p,null_p,color=twocol[3],lw=2,label="null distribution")
axs[0].plot(xn_p,alt_p,color=twocol[5],lw=2,label="alternative distribution")
axs[0].legend(loc="upper right",frameon=False)
axs[0].set_title("Distribution of "+str(len(peaks))+" peak p-values \n $\pi_1$ = "+str(round(float(neuropowerdata.pi1),2)))
axs[0].set_xlabel("Peak p-values")
axs[0].set_ylabel("Density")
axs[1].hist(peaks.peak,lw=0,facecolor=twocol[0],normed=True,bins=np.arange(min(peaks.peak),30,0.3),label="observed distribution")
axs[1].set_xlim([float(neuropowerdata.ExcZ),np.max(peaks.peak)+1])
axs[1].set_ylim([0,1.3])
if not neuropowerdata.pi1==0:
axs[1].plot(xn,nul,color=twocol[3],lw=2,label="null distribution")
axs[1].plot(xn,alt,color=twocol[5],lw=2, label="alternative distribution")
axs[1].plot(xn,mix,color=twocol[1],lw=2,label="total distribution")
axs[1].legend(loc="upper right",frameon=False)
peak_heights_string = str(round(float(neuropowerdata.mu)/np.sqrt(neuropowerdata.Subj),2))
axs[1].set_title("Distribution of peak heights \n $\delta_1$ = %s" %(peak_heights_string))
axs[1].set_xlabel("Peak heights (z-values)")
axs[1].set_ylabel("Density")
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def plotPower(sid,MCP='',pow=0,ss=0):
neuropowerdata = NeuropowerModel.objects.get(SID=sid)
powtab = neuropowerdata.data
powtxt = powtab.round(2)
cols = dict(zip(['BF','BH','RFT','UN'],Set1_9.mpl_colors))
sub = int(neuropowerdata.Subj)
newsubs = powtab.newsamplesize
amax = int(np.min(powtab.newsamplesize)+50)
css = """
table{border-collapse: collapse}
td{background-color: rgba(217, 222, 230,50)}
table, th, td{border: 1px solid;border-color: rgba(217, 222, 230,50);text-align: right;font-size: 12px}
"""
hover_BF = [pd.DataFrame(['Bonferroni','Sample Size: '+str(newsubs[i]),'Power: '+str(powtxt['BF'][i])]).to_html(header=False,index_names=False,index=False) for i in range(len(powtab)) if 'BF' in powtab.columns]
hover_BH = [pd.DataFrame(['Benjamini-Hochberg','Sample Size: '+str(newsubs[i]),'Power: '+str(powtxt['BH'][i])]).to_html(header=False,index_names=False,index=False) for i in range(len(powtab)) if 'BH' in powtab.columns]
hover_RFT = [pd.DataFrame(['Random Field Theory','Sample Size: '+str(newsubs[i]),'Power: '+str(powtxt['RFT'][i])]).to_html(header=False,index_names=False,index=False) for i in range(len(powtab)) if 'RFT' in powtab.columns]
hover_UN = [pd.DataFrame(['Uncorrected','Sample Size: '+str(newsubs[i]),'Power: '+str(powtxt['UN'][i])]).to_html(header=False,index_names=False,index=False) for i in range(len(powtab)) if 'UN' in powtab.columns]
fig,axs=plt.subplots(1,1,figsize=(8,5))
fig.patch.set_facecolor('None')
lty = ['--' if all(powtab.BF==powtab.RFT) else '-']
BF=axs.plot(powtab.newsamplesize,powtab.BF,'o',markersize=15,alpha=0,label="") if 'BF' in powtab.columns else 'nan'
BH=axs.plot(powtab.newsamplesize,powtab.BH,'o',markersize=15,alpha=0,label="") if 'BH' in powtab.columns else 'nan'
RFT=axs.plot(powtab.newsamplesize,powtab.RFT,'o',markersize=15,alpha=0,label="") if 'RFT' in powtab.columns else 'nan'
UN=axs.plot(powtab.newsamplesize,powtab.UN,'o',markersize=15,alpha=0,label="") if 'UN' in powtab.columns else 'nan'
plugins.clear(fig)
plugins.connect(fig, plugins.PointHTMLTooltip(BF[0], hover_BF,hoffset=0,voffset=10,css=css))
plugins.connect(fig, plugins.PointHTMLTooltip(RFT[0], hover_RFT,hoffset=0,voffset=10,css=css))
plugins.connect(fig, plugins.PointHTMLTooltip(UN[0], hover_UN,hoffset=0,voffset=10,css=css))
if 'BH' in powtab.columns:
plugins.connect(fig, plugins.PointHTMLTooltip(BH[0], hover_BH,hoffset=0,voffset=10,css=css))
axs.plot(newsubs,powtab.BH,color=cols['BH'],lw=2,label="Benjamini-Hochberg")
axs.plot(newsubs,powtab.BF,color=cols['BF'],lw=2,label="Bonferroni")
axs.plot(newsubs,powtab.RFT,color=cols['RFT'],lw=2,linestyle=str(lty[0]),label="Random Field Theory")
axs.plot(newsubs,powtab.UN,color=cols['UN'],lw=2,label="Uncorrected")
text = "None"
if pow != 0:
if MCP == 'BH' and not 'BH' in powtab.columns:
text = "There is not enough power to estimate a threshold for FDR control. As such it's impossible to predict power for FDR control."
elif all(powtab[MCP]<pow):
text = "To obtain a statistical power of "+str(pow)+" this study would require a sample size larger than 600 subjects."
amax = max(powtab.newsamplesize)
else:
min = int(np.min([i for i,elem in enumerate(powtab[MCP]>pow,1) if elem])+sub-1)
axs.plot([min,min],[0,powtab[MCP][min-sub]],color=cols[MCP])
axs.plot([sub,min],[powtab[MCP][min-sub],powtab[MCP][min-sub]],color=cols[MCP])
text = "To obtain a statistical power of %s this study would require a sample size of %s subjects." %(pow,min)
amax = max(min,amax)
if ss != 0:
if MCP == 'BH' and not 'BH' in powtab.columns:
text = "There is not enough power to estimate a threshold for FDR control. As such it's impossible to predict power for FDR control."
else:
ss_pow = powtab[MCP][ss]
axs.plot([ss,ss],[0,ss_pow],color=cols[MCP],linestyle="--")
axs.plot([sub,ss],[ss_pow,ss_pow],color=cols[MCP],linestyle="--")
xticks = [x for x in list(np.arange((np.ceil(sub/10.))*10,100,10)) if not x == np.round(ss/10.)*10]
axs.set_xticks(xticks+[ss])
axs.set_yticks(list(np.arange(0,1.1,0.1)))
text = "A sample size of %s subjects with %s control comes with a power of %s." %(ss,MCP,str(np.round(ss_pow,decimals=2)))
amax = max(ss,amax)
axs.set_ylim([0,1])
axs.set_xlim([sub,amax])
axs.set_title("Power curves")
axs.set_xlabel("Subjects")
axs.set_ylabel("Average power")
axs.legend(loc="lower right",frameon=False,title="")
code = mpld3.fig_to_html(fig)
out = {
"code":code,
"text":text
}
return out
| mit |
ldirer/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 86 | 1234 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y=1, f(x))$")
plt.show()
| bsd-3-clause |
matthewzimmer/traffic-sign-classification | zimpy/tests/dataset_tests.py | 1 | 1087 | from zimpy.datasets.german_traffic_signs import GermanTrafficSignDataset
data = GermanTrafficSignDataset()
data.configure(one_hot=False)
X_train = data.train_orig
y_train = data.train_labels
import unittest
import numpy as np
from sklearn.preprocessing import normalize
# Learn more about unit tests here at https://docs.python.org/2/library/unittest.html
class TestDataset(unittest.TestCase):
def test_shapes(self):
self.assertEqual(X_train.shape[0], y_train.shape[0],
"The number of images is not equal to the number of labels.")
self.assertEqual(X_train.shape[1:], (32, 32, 3), "The dimensions of the images are not 32 x 32 x 3.")
def test_data_normalization(self):
X_norm = data.normalize_data(X_train)
self.assertEqual(round(np.mean(X_norm)), 0, "The mean of the input data is: %f" % np.mean(X_norm))
self.assertTupleEqual((np.min(X_norm), np.max(X_norm)), (-0.5, 0.5)), "The range of the input data is: %.1f to %.1f" % (np.min(X_norm), np.max(X_norm))
if __name__ == '__main__':
unittest.main()
| mit |
ishank08/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 21 | 4784 | """
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying :class:`sklearn.decomposition.NMF`
and :class:`sklearn.decomposition.LatentDirichletAllocation` on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
Non-negative Matrix Factorization is applied with two different objective
functions: the Frobenius norm, and the generalized Kullback-Leibler divergence.
The latter is equivalent to Probabilistic Latent Semantic Indexing.
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
print()
# Fit the NMF model
print("Fitting the NMF model (Frobenius norm) with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (Frobenius norm):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
# Fit the NMF model
print("Fitting the NMF model (generalized Kullback-Leibler divergence) with "
"tf-idf features, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1, beta_loss='kullback-leibler',
solver='mu', max_iter=1000, alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (generalized Kullback-Leibler divergence):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
mblondel/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
xingjiepan/figurePXJ | figurePXJ/Figure.py | 1 | 2466 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
class Figure:
'''Base class for figures.'''
def __init__(self, info_dict={}):
self.info_dict = info_dict
def show(self):
self.make_plot()
plt.show()
def save(self, path):
self.make_plot()
plt.savefig(path)
def set_axes(self):
x_min, x_max, y_min, y_max = plt.axis()
x_min = self.info_dict.setdefault('x_min', x_min)
x_max = self.info_dict.setdefault('x_max', x_max)
y_min = self.info_dict.setdefault('y_min', y_min)
y_max = self.info_dict.setdefault('y_max', y_max)
plt.axis([x_min, x_max, y_min, y_max])
def plot_labels(self):
font = FontProperties()
font.set_family('sans-serif')
font.set_size('x-large')
plt.xlabel(self.info_dict['x_label'], fontproperties=font)
plt.ylabel(self.info_dict['y_label'], fontproperties=font)
def plot_title(self):
font = FontProperties()
font.set_family('sans-serif')
font.set_size('xx-large')
#font.set_weight('bold')
plt.title(self.info_dict['title'], fontproperties=font)
def draw_auxiliary_lines(self):
'''Draw auxiliary lines specified by users.
The format in the info_dict is:
'auxiliary_lines' : [ ((x1,y1), (x2,y2)), ... ]
'''
for line in self.info_dict.setdefault('auxiliary_lines', []):
self.draw_one_auxiliary_line(np.array(line[0]), np.array(line[1]))
def draw_one_auxiliary_line(self, p1, p2):
'''Draw one axuilary line given two points.
p1 and p2 are numpy arrays.
'''
x_min, x_max, y_min, y_max = plt.axis()
# Get the vector of the line
v = p2 - p1
if v[0] == 0 and v[1] == 0: raise Exception("Zero vector")
# Get the upper point to draw the line
scale_up = ((x_max - p1[0]) / v[0]) if v[0] != 0 \
else (y_max - p1[1]) / v[1]
p_up = p1 + scale_up * v
# Get the lower point to draw the line
scale_low = ((x_min - p1[0]) / v[0]) if v[0] != 0 \
else (y_min - p1[1]) / v[1]
p_low = p1 + scale_low * v
# Draw the line
xy = list(zip(p_up, p_low))
plt.plot(xy[0], xy[1], '--', color=self.info_dict['style'].color('normal', 'black'))
| gpl-3.0 |
DonBeo/statsmodels | statsmodels/stats/tests/test_diagnostic.py | 6 | 39973 | # -*- coding: utf-8 -*-
"""Tests for Regression Diagnostics and Specification Tests
Created on Thu Feb 09 13:19:47 2012
Author: Josef Perktold
License: BSD-3
currently all tests are against R
"""
import os
import numpy as np
from numpy.testing import (assert_, assert_almost_equal, assert_equal,
assert_approx_equal, assert_allclose)
from nose import SkipTest
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
import json
#import statsmodels.sandbox.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
cur_dir = os.path.abspath(os.path.dirname(__file__))
def compare_t_est(sp, sp_dict, decimal=(14, 14)):
assert_almost_equal(sp[0], sp_dict['statistic'], decimal=decimal[0])
assert_almost_equal(sp[1], sp_dict['pvalue'], decimal=decimal[1])
def notyet_atst():
d = macrodata.load().data
realinv = d['realinv']
realgdp = d['realgdp']
realint = d['realint']
endog = realinv
exog = add_constant(np.c_[realgdp, realint])
res_ols1 = OLS(endog, exog).fit()
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv']))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))
lint = d['realint'][:-1]
tbilrate = d['tbilrate'][:-1]
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, lint])
exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate])
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
#the following were done accidentally with res_ols1 in R,
#with original Greene data
params = np.array([-272.3986041341653, 0.1779455206941112,
0.2149432424658157])
cov_hac_4 = np.array([1321.569466333051, -0.2318836566017612,
37.01280466875694, -0.2318836566017614, 4.602339488102263e-05,
-0.0104687835998635, 37.012804668757, -0.0104687835998635,
21.16037144168061]).reshape(3,3, order='F')
cov_hac_10 = np.array([2027.356101193361, -0.3507514463299015,
54.81079621448568, -0.350751446329901, 6.953380432635583e-05,
-0.01268990195095196, 54.81079621448564, -0.01268990195095195,
22.92512402151113]).reshape(3,3, order='F')
#goldfeld-quandt
het_gq_greater = dict(statistic=13.20512768685082, df1=99, df2=98,
pvalue=1.246141976112324e-30, distr='f')
het_gq_less = dict(statistic=13.20512768685082, df1=99, df2=98, pvalue=1.)
het_gq_2sided = dict(statistic=13.20512768685082, df1=99, df2=98,
pvalue=1.246141976112324e-30, distr='f')
#goldfeld-quandt, fraction = 0.5
het_gq_greater_2 = dict(statistic=87.1328934692124, df1=48, df2=47,
pvalue=2.154956842194898e-33, distr='f')
gq = smsdia.het_goldfeldquandt(endog, exog, split=0.5)
compare_t_est(gq, het_gq_greater, decimal=(13, 14))
assert_equal(gq[-1], 'increasing')
harvey_collier = dict(stat=2.28042114041313, df=199,
pvalue=0.02364236161988260, distr='t')
#hc = harvtest(fm, order.by=ggdp , data = list())
harvey_collier_2 = dict(stat=0.7516918462158783, df=199,
pvalue=0.4531244858006127, distr='t')
##################################
class TestDiagnosticG(object):
def __init__(self):
d = macrodata.load().data
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv']))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))
lint = d['realint'][:-1]
tbilrate = d['tbilrate'][:-1]
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, lint])
exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate])
exogg3 = add_constant(np.c_[gs_l_realgdp])
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
res_ols3 = OLS(endogg, exogg3).fit()
self.res = res_ols
self.res2 = res_ols2
self.res3 = res_ols3
self.endog = self.res.model.endog
self.exog = self.res.model.exog
def test_basic(self):
#mainly to check I got the right regression
#> mkarray(fm$coefficients, "params")
params = np.array([-9.48167277465485, 4.3742216647032,
-0.613996969478989])
assert_almost_equal(self.res.params, params, decimal=12)
def test_hac(self):
res = self.res
#> nw = NeweyWest(fm, lag = 4, prewhite = FALSE, verbose=TRUE)
#> nw2 = NeweyWest(fm, lag=10, prewhite = FALSE, verbose=TRUE)
#> mkarray(nw, "cov_hac_4")
cov_hac_4 = np.array([1.385551290884014, -0.3133096102522685,
-0.0597207976835705, -0.3133096102522685, 0.1081011690351306,
0.000389440793564336, -0.0597207976835705, 0.000389440793564339,
0.0862118527405036]).reshape(3,3, order='F')
#> mkarray(nw2, "cov_hac_10")
cov_hac_10 = np.array([1.257386180080192, -0.2871560199899846,
-0.03958300024627573, -0.2871560199899845, 0.1049107028987101,
0.0003896205316866944, -0.03958300024627578, 0.0003896205316866961,
0.0985539340694839]).reshape(3,3, order='F')
cov = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov)
assert_almost_equal(cov, cov_hac_4, decimal=14)
assert_almost_equal(bse_hac, np.sqrt(np.diag(cov)), decimal=14)
cov = sw.cov_hac_simple(res, nlags=10, use_correction=False)
bse_hac = sw.se_cov(cov)
assert_almost_equal(cov, cov_hac_10, decimal=14)
assert_almost_equal(bse_hac, np.sqrt(np.diag(cov)), decimal=14)
def test_het_goldfeldquandt(self):
#TODO: test options missing
#> gq = gqtest(fm, alternative='greater')
#> mkhtest_f(gq, 'het_gq_greater', 'f')
het_gq_greater = dict(statistic=0.5313259064778423,
pvalue=0.9990217851193723,
parameters=(98, 98), distr='f')
#> gq = gqtest(fm, alternative='less')
#> mkhtest_f(gq, 'het_gq_less', 'f')
het_gq_less = dict(statistic=0.5313259064778423,
pvalue=0.000978214880627621,
parameters=(98, 98), distr='f')
#> gq = gqtest(fm, alternative='two.sided')
#> mkhtest_f(gq, 'het_gq_two_sided', 'f')
het_gq_two_sided = dict(statistic=0.5313259064778423,
pvalue=0.001956429761255241,
parameters=(98, 98), distr='f')
#> gq = gqtest(fm, fraction=0.1, alternative='two.sided')
#> mkhtest_f(gq, 'het_gq_two_sided_01', 'f')
het_gq_two_sided_01 = dict(statistic=0.5006976835928314,
pvalue=0.001387126702579789,
parameters=(88, 87), distr='f')
#> gq = gqtest(fm, fraction=0.5, alternative='two.sided')
#> mkhtest_f(gq, 'het_gq_two_sided_05', 'f')
het_gq_two_sided_05 = dict(statistic=0.434815645134117,
pvalue=0.004799321242905568,
parameters=(48, 47), distr='f')
endogg, exogg = self.endog, self.exog
#tests
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5)
compare_t_est(gq, het_gq_greater, decimal=(14, 14))
assert_equal(gq[-1], 'increasing')
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5,
alternative='decreasing')
compare_t_est(gq, het_gq_less, decimal=(14, 14))
assert_equal(gq[-1], 'decreasing')
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5,
alternative='two-sided')
compare_t_est(gq, het_gq_two_sided, decimal=(14, 14))
assert_equal(gq[-1], 'two-sided')
#TODO: forcing the same split as R 202-90-90-1=21
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=90, drop=21,
alternative='two-sided')
compare_t_est(gq, het_gq_two_sided_01, decimal=(14, 14))
assert_equal(gq[-1], 'two-sided')
#TODO other options ???
def test_het_breush_pagan(self):
res = self.res
bptest = dict(statistic=0.709924388395087, pvalue=0.701199952134347,
parameters=(2,), distr='f')
bp = smsdia.het_breushpagan(res.resid, res.model.exog)
compare_t_est(bp, bptest, decimal=(12, 12))
def test_het_white(self):
res = self.res
#TODO: regressiontest, compare with Greene or Gretl or Stata
hw = smsdia.het_white(res.resid, res.model.exog)
hw_values = (33.503722896538441, 2.9887960597830259e-06,
7.7945101228430946, 1.0354575277704231e-06)
assert_almost_equal(hw, hw_values)
def test_het_arch(self):
#test het_arch and indirectly het_lm against R
#> library(FinTS)
#> at = ArchTest(residuals(fm), lags=4)
#> mkhtest(at, 'archtest_4', 'chi2')
archtest_4 = dict(statistic=3.43473400836259,
pvalue=0.487871315392619, parameters=(4,),
distr='chi2')
#> at = ArchTest(residuals(fm), lags=12)
#> mkhtest(at, 'archtest_12', 'chi2')
archtest_12 = dict(statistic=8.648320999014171,
pvalue=0.732638635007718, parameters=(12,),
distr='chi2')
at4 = smsdia.het_arch(self.res.resid, maxlag=4)
at12 = smsdia.het_arch(self.res.resid, maxlag=12)
compare_t_est(at4[:2], archtest_4, decimal=(12, 13))
compare_t_est(at12[:2], archtest_12, decimal=(12, 13))
def test_het_arch2(self):
#test autolag options, this also test het_lm
#unfortunately optimal lag=1 for this data
resid = self.res.resid
res1 = smsdia.het_arch(resid, maxlag=1, autolag=None, store=True)
rs1 = res1[-1]
res2 = smsdia.het_arch(resid, maxlag=5, autolag='aic', store=True)
rs2 = res2[-1]
assert_almost_equal(rs2.resols.params, rs1.resols.params, decimal=13)
assert_almost_equal(res2[:4], res1[:4], decimal=13)
#test that smallest lag, maxlag=1 works
res3 = smsdia.het_arch(resid, maxlag=1, autolag='aic')
assert_almost_equal(res3[:4], res1[:4], decimal=13)
def test_acorr_breush_godfrey(self):
res = self.res
#bgf = bgtest(fm, order = 4, type="F")
breushgodfrey_f = dict(statistic=1.179280833676792,
pvalue=0.321197487261203,
parameters=(4,195,), distr='f')
#> bgc = bgtest(fm, order = 4, type="Chisq")
#> mkhtest(bgc, "breushpagan_c", "chi2")
breushgodfrey_c = dict(statistic=4.771042651230007,
pvalue=0.3116067133066697,
parameters=(4,), distr='chi2')
bg = smsdia.acorr_breush_godfrey(res, nlags=4)
bg_r = [breushgodfrey_c['statistic'], breushgodfrey_c['pvalue'],
breushgodfrey_f['statistic'], breushgodfrey_f['pvalue']]
assert_almost_equal(bg, bg_r, decimal=13)
# check that lag choice works
bg2 = smsdia.acorr_breush_godfrey(res, nlags=None)
bg3 = smsdia.acorr_breush_godfrey(res, nlags=14)
assert_almost_equal(bg2, bg3, decimal=13)
def test_acorr_ljung_box(self):
res = self.res
#> bt = Box.test(residuals(fm), lag=4, type = "Ljung-Box")
#> mkhtest(bt, "ljung_box_4", "chi2")
ljung_box_4 = dict(statistic=5.23587172795227, pvalue=0.263940335284713,
parameters=(4,), distr='chi2')
#> bt = Box.test(residuals(fm), lag=4, type = "Box-Pierce")
#> mkhtest(bt, "ljung_box_bp_4", "chi2")
ljung_box_bp_4 = dict(statistic=5.12462932741681,
pvalue=0.2747471266820692,
parameters=(4,), distr='chi2')
#ddof correction for fitted parameters in ARMA(p,q) fitdf=p+q
#> bt = Box.test(residuals(fm), lag=4, type = "Ljung-Box", fitdf=2)
#> mkhtest(bt, "ljung_box_4df2", "chi2")
ljung_box_4df2 = dict(statistic=5.23587172795227,
pvalue=0.0729532930400377,
parameters=(2,), distr='chi2')
#> bt = Box.test(residuals(fm), lag=4, type = "Box-Pierce", fitdf=2)
#> mkhtest(bt, "ljung_box_bp_4df2", "chi2")
ljung_box_bp_4df2 = dict(statistic=5.12462932741681,
pvalue=0.0771260128929921,
parameters=(2,), distr='chi2')
lb, lbpval, bp, bppval = smsdia.acorr_ljungbox(res.resid, 4,
boxpierce=True)
compare_t_est([lb[-1], lbpval[-1]], ljung_box_4, decimal=(13, 14))
compare_t_est([bp[-1], bppval[-1]], ljung_box_bp_4, decimal=(13, 14))
def test_harvey_collier(self):
#> hc = harvtest(fm, order.by = NULL, data = list())
#> mkhtest_f(hc, 'harvey_collier', 't')
harvey_collier = dict(statistic=0.494432160939874,
pvalue=0.6215491310408242,
parameters=(198), distr='t')
#> hc2 = harvtest(fm, order.by=ggdp , data = list())
#> mkhtest_f(hc2, 'harvey_collier_2', 't')
harvey_collier_2 = dict(statistic=1.42104628340473,
pvalue=0.1568762892441689,
parameters=(198), distr='t')
hc = smsdia.linear_harvey_collier(self.res)
compare_t_est(hc, harvey_collier, decimal=(12, 12))
def test_rainbow(self):
#rainbow test
#> rt = raintest(fm)
#> mkhtest_f(rt, 'raintest', 'f')
raintest = dict(statistic=0.6809600116739604, pvalue=0.971832843583418,
parameters=(101, 98), distr='f')
#> rt = raintest(fm, center=0.4)
#> mkhtest_f(rt, 'raintest_center_04', 'f')
raintest_center_04 = dict(statistic=0.682635074191527,
pvalue=0.971040230422121,
parameters=(101, 98), distr='f')
#> rt = raintest(fm, fraction=0.4)
#> mkhtest_f(rt, 'raintest_fraction_04', 'f')
raintest_fraction_04 = dict(statistic=0.565551237772662,
pvalue=0.997592305968473,
parameters=(122, 77), distr='f')
#> rt = raintest(fm, order.by=ggdp)
#Warning message:
#In if (order.by == "mahalanobis") { :
# the condition has length > 1 and only the first element will be used
#> mkhtest_f(rt, 'raintest_order_gdp', 'f')
raintest_order_gdp = dict(statistic=1.749346160513353,
pvalue=0.002896131042494884,
parameters=(101, 98), distr='f')
rb = smsdia.linear_rainbow(self.res)
compare_t_est(rb, raintest, decimal=(13, 14))
rb = smsdia.linear_rainbow(self.res, frac=0.4)
compare_t_est(rb, raintest_fraction_04, decimal=(13, 14))
def test_compare_lr(self):
res = self.res
res3 = self.res3 #nested within res
#lrtest
#lrt = lrtest(fm, fm2)
#Model 1: ginv ~ ggdp + lint
#Model 2: ginv ~ ggdp
lrtest = dict(loglike1=-763.9752181602237, loglike2=-766.3091902020184,
chi2value=4.66794408358942, pvalue=0.03073069384028677,
df=(4,3,1))
lrt = res.compare_lr_test(res3)
assert_almost_equal(lrt[0], lrtest['chi2value'], decimal=11)
assert_almost_equal(lrt[1], lrtest['pvalue'], decimal=11)
waldtest = dict(fvalue=4.65216373312492, pvalue=0.03221346195239025,
df=(199,200,1))
wt = res.compare_f_test(res3)
assert_almost_equal(wt[0], waldtest['fvalue'], decimal=11)
assert_almost_equal(wt[1], waldtest['pvalue'], decimal=11)
def test_compare_nonnested(self):
res = self.res
res2 = self.res2
#jt = jtest(fm, lm(ginv ~ ggdp + tbilrate))
#Estimate Std. Error t value Pr(>|t|)
jtest = [('M1 + fitted(M2)', 1.591505670785873, 0.7384552861695823,
2.155182176352370, 0.032354572525314450, '*'),
('M2 + fitted(M1)', 1.305687653016899, 0.4808385176653064,
2.715438978051544, 0.007203854534057954, '**')]
jt1 = smsdia.compare_j(res2, res)
assert_almost_equal(jt1, jtest[0][3:5], decimal=13)
jt2 = smsdia.compare_j(res, res2)
assert_almost_equal(jt2, jtest[1][3:5], decimal=14)
#Estimate Std. Error z value Pr(>|z|)
coxtest = [('fitted(M1) ~ M2', -0.782030488930356, 0.599696502782265,
-1.304043770977755, 1.922186587840554e-01, ' '),
('fitted(M2) ~ M1', -2.248817107408537, 0.392656854330139,
-5.727181590258883, 1.021128495098556e-08, '***')]
ct1 = smsdia.compare_cox(res, res2)
assert_almost_equal(ct1, coxtest[0][3:5], decimal=13)
ct2 = smsdia.compare_cox(res2, res)
assert_almost_equal(ct2, coxtest[1][3:5], decimal=12)
#TODO should be approx
# Res.Df Df F Pr(>F)
encomptest = [('M1 vs. ME', 198, -1, 4.644810213266983,
0.032354572525313666, '*'),
('M2 vs. ME', 198, -1, 7.373608843521585,
0.007203854534058054, '**')]
# Estimate Std. Error t value
petest = [('M1 + log(fit(M1))-fit(M2)', -229.281878354594596,
44.5087822087058598, -5.15139, 6.201281252449979e-07),
('M2 + fit(M1)-exp(fit(M2))', 0.000634664704814,
0.0000462387010349, 13.72583, 1.319536115230356e-30)]
def test_cusum_ols(self):
#R library(strucchange)
#> sc = sctest(ginv ~ ggdp + lint, type="OLS-CUSUM")
#> mkhtest(sc, 'cusum_ols', 'BB')
cusum_ols = dict(statistic=1.055750610401214, pvalue=0.2149567397376543,
parameters=(), distr='BB') #Brownian Bridge
k_vars=3
cs_ols = smsdia.breaks_cusumolsresid(self.res.resid, ddof=k_vars) #
compare_t_est(cs_ols, cusum_ols, decimal=(12, 12))
def test_breaks_hansen(self):
#> sc = sctest(ginv ~ ggdp + lint, type="Nyblom-Hansen")
#> mkhtest(sc, 'breaks_nyblom_hansen', 'BB')
breaks_nyblom_hansen = dict(statistic=1.0300792740544484,
pvalue=0.1136087530212015,
parameters=(), distr='BB')
bh = smsdia.breaks_hansen(self.res)
assert_almost_equal(bh[0], breaks_nyblom_hansen['statistic'],
decimal=13)
#TODO: breaks_hansen doesn't return pvalues
def test_recursive_residuals(self):
reccumres_standardize = np.array([-2.151, -3.748, -3.114, -3.096,
-1.865, -2.230, -1.194, -3.500, -3.638, -4.447, -4.602, -4.631, -3.999,
-4.830, -5.429, -5.435, -6.554, -8.093, -8.567, -7.532, -7.079, -8.468,
-9.320, -12.256, -11.932, -11.454, -11.690, -11.318, -12.665, -12.842,
-11.693, -10.803, -12.113, -12.109, -13.002, -11.897, -10.787, -10.159,
-9.038, -9.007, -8.634, -7.552, -7.153, -6.447, -5.183, -3.794, -3.511,
-3.979, -3.236, -3.793, -3.699, -5.056, -5.724, -4.888, -4.309, -3.688,
-3.918, -3.735, -3.452, -2.086, -6.520, -7.959, -6.760, -6.855, -6.032,
-4.405, -4.123, -4.075, -3.235, -3.115, -3.131, -2.986, -1.813, -4.824,
-4.424, -4.796, -4.000, -3.390, -4.485, -4.669, -4.560, -3.834, -5.507,
-3.792, -2.427, -1.756, -0.354, 1.150, 0.586, 0.643, 1.773, -0.830,
-0.388, 0.517, 0.819, 2.240, 3.791, 3.187, 3.409, 2.431, 0.668, 0.957,
-0.928, 0.327, -0.285, -0.625, -2.316, -1.986, -0.744, -1.396, -1.728,
-0.646, -2.602, -2.741, -2.289, -2.897, -1.934, -2.532, -3.175, -2.806,
-3.099, -2.658, -2.487, -2.515, -2.224, -2.416, -1.141, 0.650, -0.947,
0.725, 0.439, 0.885, 2.419, 2.642, 2.745, 3.506, 4.491, 5.377, 4.624,
5.523, 6.488, 6.097, 5.390, 6.299, 6.656, 6.735, 8.151, 7.260, 7.846,
8.771, 8.400, 8.717, 9.916, 9.008, 8.910, 8.294, 8.982, 8.540, 8.395,
7.782, 7.794, 8.142, 8.362, 8.400, 7.850, 7.643, 8.228, 6.408, 7.218,
7.699, 7.895, 8.725, 8.938, 8.781, 8.350, 9.136, 9.056, 10.365, 10.495,
10.704, 10.784, 10.275, 10.389, 11.586, 11.033, 11.335, 11.661, 10.522,
10.392, 10.521, 10.126, 9.428, 9.734, 8.954, 9.949, 10.595, 8.016,
6.636, 6.975])
rr = smsdia.recursive_olsresiduals(self.res, skip=3, alpha=0.95)
assert_equal(np.round(rr[5][1:], 3), reccumres_standardize) #extra zero in front
#assert_equal(np.round(rr[3][4:], 3), np.diff(reccumres_standardize))
assert_almost_equal(rr[3][4:], np.diff(reccumres_standardize),3)
assert_almost_equal(rr[4][3:].std(ddof=1), 10.7242, decimal=4)
#regression number, visually checked with graph from gretl
ub0 = np.array([ 13.37318571, 13.50758959, 13.64199346, 13.77639734,
13.91080121])
ub1 = np.array([ 39.44753774, 39.58194162, 39.7163455 , 39.85074937,
39.98515325])
lb, ub = rr[6]
assert_almost_equal(ub[:5], ub0, decimal=7)
assert_almost_equal(lb[:5], -ub0, decimal=7)
assert_almost_equal(ub[-5:], ub1, decimal=7)
assert_almost_equal(lb[-5:], -ub1, decimal=7)
#test a few values with explicit OLS
endog = self.res.model.endog
exog = self.res.model.exog
params = []
ypred = []
for i in range(3,10):
resi = OLS(endog[:i], exog[:i]).fit()
ypred.append(resi.model.predict(resi.params, exog[i]))
params.append(resi.params)
assert_almost_equal(rr[2][3:10], ypred, decimal=12)
assert_almost_equal(rr[0][3:10], endog[3:10] - ypred, decimal=12)
assert_almost_equal(rr[1][2:9], params, decimal=12)
def test_normality(self):
res = self.res
#> library(nortest) #Lilliefors (Kolmogorov-Smirnov) normality test
#> lt = lillie.test(residuals(fm))
#> mkhtest(lt, "lillifors", "-")
lillifors1 = dict(statistic=0.0723390908786589,
pvalue=0.01204113540102896, parameters=(), distr='-')
#> lt = lillie.test(residuals(fm)**2)
#> mkhtest(lt, "lillifors", "-")
lillifors2 = dict(statistic=0.301311621898024,
pvalue=1.004305736618051e-51,
parameters=(), distr='-')
#> lt = lillie.test(residuals(fm)[1:20])
#> mkhtest(lt, "lillifors", "-")
lillifors3 = dict(statistic=0.1333956004203103,
pvalue=0.4618672180799566, parameters=(), distr='-')
lf1 = smsdia.lillifors(res.resid)
lf2 = smsdia.lillifors(res.resid**2)
lf3 = smsdia.lillifors(res.resid[:20])
compare_t_est(lf1, lillifors1, decimal=(14, 14))
compare_t_est(lf2, lillifors2, decimal=(14, 14)) #pvalue very small
assert_approx_equal(lf2[1], lillifors2['pvalue'], significant=10)
compare_t_est(lf3, lillifors3, decimal=(14, 1))
#R uses different approximation for pvalue in last case
#> ad = ad.test(residuals(fm))
#> mkhtest(ad, "ad3", "-")
adr1 = dict(statistic=1.602209621518313, pvalue=0.0003937979149362316,
parameters=(), distr='-')
#> ad = ad.test(residuals(fm)**2)
#> mkhtest(ad, "ad3", "-")
adr2 = dict(statistic=np.inf, pvalue=np.nan, parameters=(), distr='-')
#> ad = ad.test(residuals(fm)[1:20])
#> mkhtest(ad, "ad3", "-")
adr3 = dict(statistic=0.3017073732210775, pvalue=0.5443499281265933,
parameters=(), distr='-')
ad1 = smsdia.normal_ad(res.resid)
compare_t_est(ad1, adr1, decimal=(11, 13))
ad2 = smsdia.normal_ad(res.resid**2)
assert_(np.isinf(ad2[0]))
ad3 = smsdia.normal_ad(res.resid[:20])
compare_t_est(ad3, adr3, decimal=(11, 12))
def test_influence(self):
res = self.res
#this test is slow
infl = oi.OLSInfluence(res)
fp = open(os.path.join(cur_dir,"results/influence_lsdiag_R.json"))
lsdiag = json.load(fp)
#basic
assert_almost_equal(np.array(lsdiag['cov.scaled']).reshape(3, 3),
res.cov_params(), decimal=14)
assert_almost_equal(np.array(lsdiag['cov.unscaled']).reshape(3, 3),
res.normalized_cov_params, decimal=14)
c0, c1 = infl.cooks_distance #TODO: what's c1
assert_almost_equal(c0, lsdiag['cooks'], decimal=14)
assert_almost_equal(infl.hat_matrix_diag, lsdiag['hat'], decimal=14)
assert_almost_equal(infl.resid_studentized_internal,
lsdiag['std.res'], decimal=14)
#slow:
#infl._get_all_obs() #slow, nobs estimation loop, called implicitly
dffits, dffth = infl.dffits
assert_almost_equal(dffits, lsdiag['dfits'], decimal=14)
assert_almost_equal(infl.resid_studentized_external,
lsdiag['stud.res'], decimal=14)
import pandas
fn = os.path.join(cur_dir,"results/influence_measures_R.csv")
infl_r = pandas.read_csv(fn, index_col=0)
conv = lambda s: 1 if s=='TRUE' else 0
fn = os.path.join(cur_dir,"results/influence_measures_bool_R.csv")
#not used yet:
#infl_bool_r = pandas.read_csv(fn, index_col=0,
# converters=dict(zip(lrange(7),[conv]*7)))
infl_r2 = np.asarray(infl_r)
assert_almost_equal(infl.dfbetas, infl_r2[:,:3], decimal=13)
assert_almost_equal(infl.cov_ratio, infl_r2[:,4], decimal=14)
#duplicates
assert_almost_equal(dffits, infl_r2[:,3], decimal=14)
assert_almost_equal(c0, infl_r2[:,5], decimal=14)
assert_almost_equal(infl.hat_matrix_diag, infl_r2[:,6], decimal=14)
#Note: for dffits, R uses a threshold around 0.36, mine: dffits[1]=0.24373
#TODO: finish and check thresholds and pvalues
'''
R has
>>> np.nonzero(np.asarray(infl_bool_r["dffit"]))[0]
array([ 6, 26, 63, 76, 90, 199])
>>> np.nonzero(np.asarray(infl_bool_r["cov.r"]))[0]
array([ 4, 26, 59, 61, 63, 72, 76, 84, 91, 92, 94, 95, 108,
197, 198])
>>> np.nonzero(np.asarray(infl_bool_r["hat"]))[0]
array([ 62, 76, 84, 90, 91, 92, 95, 108, 197, 199])
'''
class TestDiagnosticGPandas(TestDiagnosticG):
def __init__(self):
d = macrodata.load_pandas().data
#growth rates
d['gs_l_realinv'] = 400 * np.log(d['realinv']).diff()
d['gs_l_realgdp'] = 400 * np.log(d['realgdp']).diff()
d['lint'] = d['realint'].shift(1)
d['tbilrate'] = d['tbilrate'].shift(1)
d = d.dropna()
self.d = d
endogg = d['gs_l_realinv']
exogg = add_constant(d[['gs_l_realgdp', 'lint']])
exogg2 = add_constant(d[['gs_l_realgdp', 'tbilrate']])
exogg3 = add_constant(d[['gs_l_realgdp']])
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
res_ols3 = OLS(endogg, exogg3).fit()
self.res = res_ols
self.res2 = res_ols2
self.res3 = res_ols3
self.endog = self.res.model.endog
self.exog = self.res.model.exog
def grangertest():
#> gt = grangertest(ginv, ggdp, order=4)
#> gt
#Granger causality test
#
#Model 1: ggdp ~ Lags(ggdp, 1:4) + Lags(ginv, 1:4)
#Model 2: ggdp ~ Lags(ggdp, 1:4)
grangertest = dict(fvalue=1.589672703015157, pvalue=0.178717196987075,
df=(198,193))
def test_outlier_influence_funcs():
#smoke test
x = add_constant(np.random.randn(10, 2))
y = x.sum(1) + np.random.randn(10)
res = OLS(y, x).fit()
oi.summary_table(res, alpha=0.05)
res2 = OLS(y, x[:,0]).fit()
oi.summary_table(res2, alpha=0.05)
infl = res2.get_influence()
infl.summary_table()
def test_influence_wrapped():
from pandas import DataFrame
from pandas.util.testing import assert_series_equal
d = macrodata.load_pandas().data
#growth rates
gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
lint = d['realint'][:-1]
# re-index these because they won't conform to lint
gs_l_realgdp.index = lint.index
gs_l_realinv.index = lint.index
data = dict(const=np.ones_like(lint), lint=lint, lrealgdp=gs_l_realgdp)
#order is important
exog = DataFrame(data, columns=['const','lrealgdp','lint'])
res = OLS(gs_l_realinv, exog).fit()
#basic
# already tested
#assert_almost_equal(lsdiag['cov.scaled'],
# res.cov_params().values.ravel(), decimal=14)
#assert_almost_equal(lsdiag['cov.unscaled'],
# res.normalized_cov_params.values.ravel(), decimal=14)
infl = oi.OLSInfluence(res)
# smoke test just to make sure it works, results separately tested
df = infl.summary_frame()
assert_(isinstance(df, DataFrame))
#this test is slow
fp = open(os.path.join(cur_dir,"results/influence_lsdiag_R.json"))
lsdiag = json.load(fp)
c0, c1 = infl.cooks_distance #TODO: what's c1, it's pvalues? -ss
#NOTE: we get a hard-cored 5 decimals with pandas testing
assert_almost_equal(c0, lsdiag['cooks'], 14)
assert_almost_equal(infl.hat_matrix_diag, (lsdiag['hat']), 14)
assert_almost_equal(infl.resid_studentized_internal,
lsdiag['std.res'], 14)
#slow:
dffits, dffth = infl.dffits
assert_almost_equal(dffits, lsdiag['dfits'], 14)
assert_almost_equal(infl.resid_studentized_external,
lsdiag['stud.res'], 14)
import pandas
fn = os.path.join(cur_dir,"results/influence_measures_R.csv")
infl_r = pandas.read_csv(fn, index_col=0)
conv = lambda s: 1 if s=='TRUE' else 0
fn = os.path.join(cur_dir,"results/influence_measures_bool_R.csv")
#not used yet:
#infl_bool_r = pandas.read_csv(fn, index_col=0,
# converters=dict(zip(lrange(7),[conv]*7)))
infl_r2 = np.asarray(infl_r)
#TODO: finish wrapping this stuff
assert_almost_equal(infl.dfbetas, infl_r2[:,:3], decimal=13)
assert_almost_equal(infl.cov_ratio, infl_r2[:,4], decimal=14)
def test_influence_dtype():
# see #2148 bug when endog is integer
y = np.ones(20)
np.random.seed(123)
x = np.random.randn(20, 3)
res1 = OLS(y, x).fit()
res2 = OLS(y*1., x).fit()
cr1 = res1.get_influence().cov_ratio
cr2 = res2.get_influence().cov_ratio
assert_allclose(cr1, cr2, rtol=1e-14)
# regression test for values
cr3 = np.array(
[ 1.22239215, 1.31551021, 1.52671069, 1.05003921, 0.89099323,
1.57405066, 1.03230092, 0.95844196, 1.15531836, 1.21963623,
0.87699564, 1.16707748, 1.10481391, 0.98839447, 1.08999334,
1.35680102, 1.46227715, 1.45966708, 1.13659521, 1.22799038])
assert_almost_equal(cr1, cr3, decimal=8)
def test_outlier_test():
# results from R with NA -> 1. Just testing interface here because
# outlier_test is just a wrapper
labels = ['accountant', 'pilot', 'architect', 'author', 'chemist',
'minister', 'professor', 'dentist', 'reporter', 'engineer',
'undertaker', 'lawyer', 'physician', 'welfare.worker', 'teacher',
'conductor', 'contractor', 'factory.owner', 'store.manager',
'banker', 'bookkeeper', 'mail.carrier', 'insurance.agent',
'store.clerk', 'carpenter', 'electrician', 'RR.engineer',
'machinist', 'auto.repairman', 'plumber', 'gas.stn.attendant',
'coal.miner', 'streetcar.motorman', 'taxi.driver',
'truck.driver', 'machine.operator', 'barber', 'bartender',
'shoe.shiner', 'cook', 'soda.clerk', 'watchman', 'janitor',
'policeman', 'waiter']
#Duncan's prestige data from car
exog = [[1.0, 62.0, 86.0], [1.0, 72.0, 76.0], [1.0, 75.0, 92.0],
[1.0, 55.0, 90.0], [1.0, 64.0, 86.0], [1.0, 21.0, 84.0],
[1.0, 64.0, 93.0], [1.0, 80.0, 100.0], [1.0, 67.0, 87.0],
[1.0, 72.0, 86.0], [1.0, 42.0, 74.0], [1.0, 76.0, 98.0],
[1.0, 76.0, 97.0], [1.0, 41.0, 84.0], [1.0, 48.0, 91.0],
[1.0, 76.0, 34.0], [1.0, 53.0, 45.0], [1.0, 60.0, 56.0],
[1.0, 42.0, 44.0], [1.0, 78.0, 82.0], [1.0, 29.0, 72.0],
[1.0, 48.0, 55.0], [1.0, 55.0, 71.0], [1.0, 29.0, 50.0],
[1.0, 21.0, 23.0], [1.0, 47.0, 39.0], [1.0, 81.0, 28.0],
[1.0, 36.0, 32.0], [1.0, 22.0, 22.0], [1.0, 44.0, 25.0],
[1.0, 15.0, 29.0], [1.0, 7.0, 7.0], [1.0, 42.0, 26.0],
[1.0, 9.0, 19.0], [1.0, 21.0, 15.0], [1.0, 21.0, 20.0],
[1.0, 16.0, 26.0], [1.0, 16.0, 28.0], [1.0, 9.0, 17.0],
[1.0, 14.0, 22.0], [1.0, 12.0, 30.0], [1.0, 17.0, 25.0],
[1.0, 7.0, 20.0], [1.0, 34.0, 47.0], [1.0, 8.0, 32.0]]
endog = [ 82., 83., 90., 76., 90., 87., 93., 90., 52., 88., 57.,
89., 97., 59., 73., 38., 76., 81., 45., 92., 39., 34.,
41., 16., 33., 53., 67., 57., 26., 29., 10., 15., 19.,
10., 13., 24., 20., 7., 3., 16., 6., 11., 8., 41.,
10.]
ndarray_mod = OLS(endog, exog).fit()
rstudent = [3.1345185839, -2.3970223990, 2.0438046359, -1.9309187757,
1.8870465798, -1.7604905300, -1.7040324156, 1.6024285876,
-1.4332485037, -1.1044851583, 1.0688582315, 1.0185271840,
-0.9024219332, -0.9023876471, -0.8830953936, 0.8265782334,
0.8089220547, 0.7682770197, 0.7319491074, -0.6665962829,
0.5227352794, -0.5135016547, 0.5083881518, 0.4999224372,
-0.4980818221, -0.4759717075, -0.4293565820, -0.4114056499,
-0.3779540862, 0.3556874030, 0.3409200462, 0.3062248646,
0.3038999429, -0.3030815773, -0.1873387893, 0.1738050251,
0.1424246593, -0.1292266025, 0.1272066463, -0.0798902878,
0.0788467222, 0.0722556991, 0.0505098280, 0.0233215136,
0.0007112055]
unadj_p = [0.003177202, 0.021170298, 0.047432955, 0.060427645, 0.066248120,
0.085783008, 0.095943909, 0.116738318, 0.159368890, 0.275822623,
0.291386358, 0.314400295, 0.372104049, 0.372122040, 0.382333561,
0.413260793, 0.423229432, 0.446725370, 0.468363101, 0.508764039,
0.603971990, 0.610356737, 0.613905871, 0.619802317, 0.621087703,
0.636621083, 0.669911674, 0.682917818, 0.707414459, 0.723898263,
0.734904667, 0.760983108, 0.762741124, 0.763360242, 0.852319039,
0.862874018, 0.887442197, 0.897810225, 0.899398691, 0.936713197,
0.937538115, 0.942749758, 0.959961394, 0.981506948, 0.999435989]
bonf_p = [0.1429741, 0.9526634, 2.1344830, 2.7192440, 2.9811654, 3.8602354,
4.3174759, 5.2532243, 7.1716001, 12.4120180, 13.1123861, 14.1480133,
16.7446822, 16.7454918, 17.2050103, 18.5967357, 19.0453245,
20.1026416, 21.0763395, 22.8943818, 27.1787396, 27.4660532,
27.6257642, 27.8911043, 27.9489466, 28.6479487, 30.1460253,
30.7313018, 31.8336506, 32.5754218, 33.0707100, 34.2442399,
34.3233506, 34.3512109, 38.3543568, 38.8293308, 39.9348989,
40.4014601, 40.4729411, 42.1520939, 42.1892152, 42.4237391,
43.1982627, 44.1678127, 44.9746195]
bonf_p = np.array(bonf_p)
bonf_p[bonf_p > 1] = 1
sorted_labels = ["minister", "reporter", "contractor", "insurance.agent",
"machinist", "store.clerk", "conductor", "factory.owner",
"mail.carrier", "streetcar.motorman", "carpenter", "coal.miner",
"bartender", "bookkeeper", "soda.clerk", "chemist", "RR.engineer",
"professor", "electrician", "gas.stn.attendant", "auto.repairman",
"watchman", "banker", "machine.operator", "dentist", "waiter",
"shoe.shiner", "welfare.worker", "plumber", "physician", "pilot",
"engineer", "accountant", "lawyer", "undertaker", "barber",
"store.manager", "truck.driver", "cook", "janitor", "policeman",
"architect", "teacher", "taxi.driver", "author"]
res2 = np.c_[rstudent, unadj_p, bonf_p]
res = oi.outlier_test(ndarray_mod, method='b', labels=labels, order=True)
np.testing.assert_almost_equal(res.values, res2, 7)
np.testing.assert_equal(res.index.tolist(), sorted_labels) # pylint: disable-msg=E1103
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x'], exit=False)
#t = TestDiagnosticG()
#t.test_basic()
#t.test_hac()
#t.test_acorr_breush_godfrey()
#t.test_acorr_ljung_box()
#t.test_het_goldfeldquandt()
#t.test_het_breush_pagan()
#t.test_het_white()
#t.test_compare_lr()
#t.test_compare_nonnested()
#t.test_influence()
##################################################
'''
J test
Model 1: ginv ~ ggdp + lint
Model 2: ginv ~ ggdp + tbilrate
Estimate Std. Error t value Pr(>|t|)
M1 + fitted(M2) 1.591505670785873 0.7384552861695823 2.15518 0.0323546 *
M2 + fitted(M1) 1.305687653016899 0.4808385176653064 2.71544 0.0072039 **
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
= lm(ginv ~ ggdp + tbilrate)
> ct = coxtest(fm, fm3)
> ct
Cox test
Model 1: ginv ~ ggdp + lint
Model 2: ginv ~ ggdp + tbilrate
Estimate Std. Error z value Pr(>|z|)
fitted(M1) ~ M2 -0.782030488930356 0.599696502782265 -1.30404 0.19222
fitted(M2) ~ M1 -2.248817107408537 0.392656854330139 -5.72718 1.0211e-08 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> et = encomptest(fm, fm3)
> et
Encompassing test
Model 1: ginv ~ ggdp + lint
Model 2: ginv ~ ggdp + tbilrate
Model E: ginv ~ ggdp + lint + tbilrate
Res.Df Df F Pr(>F)
M1 vs. ME 198 -1 4.64481 0.0323546 *
M2 vs. ME 198 -1 7.37361 0.0072039 **
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> fm4 = lm(realinv ~ realgdp + realint, data=d)
> fm5 = lm(log(realinv) ~ realgdp + realint, data=d)
> pet = petest(fm4, fm5)
> pet
PE test
Model 1: realinv ~ realgdp + realint
Model 2: log(realinv) ~ realgdp + realint
Estimate Std. Error t value
M1 + log(fit(M1))-fit(M2) -229.281878354594596 44.5087822087058598 -5.15139
M2 + fit(M1)-exp(fit(M2)) 0.000634664704814 0.0000462387010349 13.72583
Pr(>|t|)
M1 + log(fit(M1))-fit(M2) 6.2013e-07 ***
M2 + fit(M1)-exp(fit(M2)) < 2.22e-16 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
'''
| bsd-3-clause |
dsquareindia/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 55 | 7386 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
dopplershift/MetPy | src/metpy/plots/declarative.py | 1 | 65191 | # Copyright (c) 2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Declarative plotting tools."""
import contextlib
import copy
from datetime import datetime, timedelta
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from traitlets import (Any, Bool, Float, HasTraits, Instance, Int, List, observe, TraitError,
Tuple, Unicode, Union, validate)
from . import ctables
from . import wx_symbols
from .cartopy_utils import import_cartopy
from .station_plot import StationPlot
from ..calc import reduce_point_density
from ..package_tools import Exporter
from ..units import units
ccrs = import_cartopy()
exporter = Exporter(globals())
_areas = {
'105': (-129.3, -22.37, 17.52, 53.78),
'local': (-92., -64., 28.5, 48.5),
'wvaac': (120.86, -15.07, -53.6, 89.74),
'tropsfc': (-100., -55., 8., 33.),
'epacsfc': (-155., -75., -20., 33.),
'ofagx': (-100., -80., 20., 35.),
'ahsf': (-105., -30., -5., 35.),
'ehsf': (-145., -75., -5., 35.),
'shsf': (-125., -75., -20., 5.),
'tropful': (-160., 0., -20., 50.),
'tropatl': (-115., 10., 0., 40.),
'subtrop': (-90., -20., 20., 60.),
'troppac': (-165., -80., -25., 45.),
'gulf': (-105., -70., 10., 40.),
'carib': (-100., -50., 0., 40.),
'sthepac': (-170., -70., -60., 0.),
'opcahsf': (-102., -20., 0., 45.),
'opcphsf': (175., -70., -28., 45.),
'wwe': (-106., -50., 18., 54.),
'world': (-24., -24., -90., 90.),
'nwwrd1': (-180., 180., -90., 90.),
'nwwrd2': (0., 0., -90., 90.),
'afna': (-135.02, -23.04, 10.43, 40.31),
'awna': (-141.03, -18.58, 7.84, 35.62),
'medr': (-178., -25., -15., 5.),
'pacsfc': (129., -95., -5., 18.),
'saudi': (4.6, 92.5, -13.2, 60.3),
'natlmed': (-30., 70., 0., 65.),
'ncna': (-135.5, -19.25, 8., 37.7),
'ncna2': (-133.5, -20.5, 10., 42.),
'hpcsfc': (-124., -26., 15., 53.),
'atlhur': (-96., -6., 4., 3.),
'nam': (-134., 3., -4., 39.),
'sam': (-120., -20., -60., 20.),
'samps': (-148., -36., -28., 12.),
'eur': (-16., 80., 24., 52.),
'afnh': (-155.19, 18.76, -6.8, -3.58),
'awnh': (-158.94, 15.35, -11.55, -8.98),
'wwwus': (-127.7, -59., 19.8, 56.6),
'ccfp': (-130., -65., 22., 52.),
'llvl': (-119.6, -59.5, 19.9, 44.5),
'llvl2': (-125., -32.5, 5., 46.),
'llvl_e': (-89., -59.5, 23.5, 44.5),
'llvl_c': (-102.4, -81.25, 23.8, 51.6),
'llvl_w': (-119.8, -106.5, 19.75, 52.8),
'ak_artc': (163.7, -65.3, 17.5, 52.6),
'fxpswna': (-80.5, 135., -1., 79.),
'fxpsnna': (-80.5, 54., -1., 25.5),
'fxpsna': (-72.6, 31.4, -3.6, 31.),
'natl_ps': (-80.5, 54., -1., 25.5),
'fxpsena': (-45., 54., 11., 25.5),
'fxpsnp': (155.5, -106.5, 22.5, 47.),
'npac_ps': (155.5, -106.5, 22.5, 47.),
'fxpsus': (-120., -59., 20., 44.5),
'fxmrwrd': (58., 58., -70., 70.),
'fxmrwr2': (-131., -131., -70., 70.),
'nwmrwrd': (70., 70., -70., 70.),
'wrld_mr': (58., 58., -70., 70.),
'fxmr110': (-180., -110., -20., 50.5),
'fxmr180': (110., -180., -20., 50.5),
'fxmrswp': (97.5, -147.5, -36., 45.5),
'fxmrus': (-162.5, -37.5, -28., 51.2),
'fxmrea': (-40., 20., -20., 54.2),
'fxmrjp': (100., -160., 0., 45.),
'icao_a': (-137.4, -12.6, -54., 67.),
'icao_b': (-52.5, -16., -62.5, 77.5),
'icao_b1': (-125., 40., -45.5, 62.7),
'icao_c': (-35., 70., -45., 75.),
'icao_d': (-15., 132., -27., 63.),
'icao_e': (25., 180., -54., 40.),
'icao_f': (100., -110., -52.7, 50.),
'icao_g': (34.8, 157.2, -0.8, 13.7),
'icao_h': (-79.1, 56.7, 1.6, 25.2),
'icao_i': (166.24, -60.62, -6.74, 33.32),
'icao_j': (106.8, -101.1, -27.6, 0.8),
'icao_k': (3.3, 129.1, -11.1, 6.7),
'icao_m': (100., -110., -10., 70.),
'icao_eu': (-21.6, 68.4, 21.4, 58.7),
'icao_me': (17., 70., 10., 44.),
'icao_as': (53., 108., 00., 36.),
'icao_na': (-54.1, 60.3, 17.2, 50.7),
'nhem': (-135., 45., -15., -15.),
'nhem_ps': (-135., 45., -15., -15.),
'nhem180': (135., -45., -15., -15.),
'nhem155': (160., -20., -15., -15.),
'nhem165': (150., -30., -15., -15.),
'nh45_ps': (-90., 90., -15., -15.),
'nhem0': (-45., 135., -15., -15.),
'shem_ps': (88., -92., 30., 30.),
'hfo_gu': (160., -130., -30., 40.),
'natl': (-110., 20.1, 15., 70.),
'watl': (-84., -38., 25., 46.),
'tatl': (-90., -15., -10., 35.),
'npac': (102., -110., -12., 60.),
'spac': (102., -70., -60., 20.),
'tpac': (-165., -75., -10., 40.),
'epac': (-134., -110., 12., 75.),
'wpac': (130., -120., 0., 63.),
'mpac': (128., -108., 15., 71.95),
'opcsfp': (128.89, -105.3, 3.37, 16.77),
'opcsfa': (-55.5, 75., -8.5, 52.6),
'opchur': (-99., -15., 1., 50.05),
'us': (-119., -56., 19., 47.),
'spcus': (-116.4, -63.9, 22.1, 47.2),
'afus': (-119.04, -63.44, 23.1, 44.63),
'ncus': (-124.2, -40.98, 17.89, 47.39),
'nwus': (-118., -55.5, 17., 46.5),
'awips': (-127., -59., 20., 50.),
'bwus': (-124.6, -46.7, 13.1, 43.1),
'usa': (-118., -62., 22.8, 45.),
'usnps': (-118., -62., 18., 51.),
'uslcc': (-118., -62., 20., 51.),
'uswn': (-129., -45., 17., 53.),
'ussf': (-123.5, -44.5, 13., 32.1),
'ussp': (-126., -49., 13., 54.),
'whlf': (-123.8, -85.9, 22.9, 50.2),
'chlf': (-111., -79., 27.5, 50.5),
'centus': (-105.4, -77., 24.7, 47.6),
'ehlf': (-96.2, -62.7, 22., 49.),
'mehlf': (-89.9, -66.6, 23.8, 49.1),
'bosfa': (-87.5, -63.5, 34.5, 50.5),
'miafa': (-88., -72., 23., 39.),
'chifa': (-108., -75., 34., 50.),
'dfwfa': (-106.5, -80.5, 22., 40.),
'slcfa': (-126., -98., 29.5, 50.5),
'sfofa': (-129., -111., 30., 50.),
'g8us': (-116., -58., 19., 56.),
'wsig': (155., -115., 18., 58.),
'esig': (-80., -30., 25., 51.),
'eg8': (-79., -13., 24., 52.),
'west': (-125., -90., 25., 55.),
'cent': (-107.4, -75.3, 24.3, 49.7),
'east': (-100.55, -65.42, 24.57, 47.2),
'nwse': (-126., -102., 38.25, 50.25),
'swse': (-126., -100., 28.25, 40.25),
'ncse': (-108., -84., 38.25, 50.25),
'scse': (-108.9, -84., 24., 40.25),
'nese': (-89., -64., 37.25, 47.25),
'sese': (-90., -66., 28.25, 40.25),
'afwh': (170.7, 15.4, -48.6, 69.4),
'afeh': (-9.3, -164.6, -48.6, 69.4),
'afpc': (80.7, -74.6, -48.6, 69.4),
'ak': (-179., -116.4, 49., 69.),
'ak2': (-180., -106., 42., 73.),
'nwak': (-180., -110., 50., 60.),
'al': (-95., -79., 27., 38.),
'ar': (-100.75, -84.75, 29.5, 40.5),
'ca': (-127.75, -111.75, 31.5, 42.5),
'co': (-114., -98., 33.5, 44.5),
'ct': (-81.25, -65.25, 36., 47.),
'dc': (-85., -69., 33.35, 44.35),
'de': (-83.75, -67.75, 33.25, 44.25),
'fl': (-90., -74., 23., 34.),
'ga': (-92., -76., 27.5, 38.5),
'hi': (-161.5, -152.5, 17., 23.),
'nwxhi': (-166., -148., 14., 26.),
'ia': (-102., -86., 36.5, 47.5),
'id': (-123., -107., 39.25, 50.25),
'il': (-97.75, -81.75, 34.5, 45.5),
'in': (-94.5, -78.5, 34.5, 45.5),
'ks': (-106.5, -90.5, 33.25, 44.25),
'ky': (-93., -77., 31.75, 42.75),
'la': (-100.75, -84.75, 25.75, 36.75),
'ma': (-80.25, -64.25, 36.75, 47.75),
'md': (-85.25, -69.25, 33.75, 44.75),
'me': (-77.75, -61.75, 39.5, 50.5),
'mi': (-93., -77., 37.75, 48.75),
'mn': (-102., -86., 40.5, 51.5),
'mo': (-101., -85., 33., 44.),
'ms': (-98., -82., 27., 38.),
'mt': (-117., -101., 41.5, 52.5),
'nc': (-87.25, -71.25, 30., 41.),
'nd': (-107.5, -91.5, 42.25, 53.25),
'ne': (-107.5, -91.5, 36.25, 47.25),
'nh': (-79.5, -63.5, 38.25, 49.25),
'nj': (-82.5, -66.5, 34.75, 45.75),
'nm': (-114.25, -98.25, 29., 40.),
'nv': (-125., -109., 34., 45.),
'ny': (-84., -68., 37.25, 48.25),
'oh': (-91., -75., 34.5, 45.5),
'ok': (-105.25, -89.25, 30.25, 41.25),
'or': (-128., -112., 38.75, 49.75),
'pa': (-86., -70., 35.5, 46.5),
'ri': (-79.75, -63.75, 36., 47.),
'sc': (-89., -73., 28.5, 39.5),
'sd': (-107.5, -91.5, 39., 50.),
'tn': (-95., -79., 30., 41.),
'tx': (-107., -91., 25.4, 36.5),
'ut': (-119., -103., 34., 45.),
'va': (-86.5, -70.5, 32.25, 43.25),
'vt': (-80.75, -64.75, 38.25, 49.25),
'wi': (-98., -82., 38.5, 49.5),
'wv': (-89., -73., 33., 44.),
'wy': (-116., -100., 37.75, 48.75),
'az': (-119., -103., 29., 40.),
'wa': (-128., -112., 41.75, 52.75),
'abrfc': (-108., -88., 30., 42.),
'ab10': (-106.53, -90.28, 31.69, 40.01),
'cbrfc': (-117., -103., 28., 46.),
'cb10': (-115.69, -104.41, 29.47, 44.71),
'lmrfc': (-100., -77., 26., 40.),
'lm10': (-97.17, -80.07, 28.09, 38.02),
'marfc': (-83.5, -70., 35.5, 44.),
'ma10': (-81.27, -72.73, 36.68, 43.1),
'mbrfc': (-116., -86., 33., 53.),
'mb10': (-112.8, -89.33, 35.49, 50.72),
'ncrfc': (-108., -76., 34., 53.),
'nc10': (-104.75, -80.05, 35.88, 50.6),
'nerfc': (-84., -61., 39., 49.),
'ne10': (-80.11, -64.02, 40.95, 47.62),
'nwrfc': (-128., -105., 35., 55.),
'nw10': (-125.85, -109.99, 38.41, 54.46),
'ohrfc': (-92., -75., 34., 44.),
'oh10': (-90.05, -77.32, 35.2, 42.9),
'serfc': (-94., -70., 22., 40.),
'se10': (-90.6, -73.94, 24.12, 37.91),
'wgrfc': (-112., -88., 21., 42.),
'wg10': (-108.82, -92.38, 23.99, 39.18),
'nwcn': (-133.5, -10.5, 32., 56.),
'cn': (-120.4, -14., 37.9, 58.6),
'ab': (-119.6, -108.2, 48.6, 60.4),
'bc': (-134.5, -109., 47.2, 60.7),
'mb': (-102.4, -86.1, 48.3, 60.2),
'nb': (-75.7, -57.6, 42.7, 49.6),
'nf': (-68., -47., 45., 62.),
'ns': (-67., -59., 43., 47.5),
'nt': (-131.8, -33.3, 57.3, 67.8),
'on': (-94.5, -68.2, 41.9, 55.),
'pe': (-64.6, -61.7, 45.8, 47.1),
'qb': (-80., -49.2, 44.1, 60.9),
'sa': (-111.2, -97.8, 48.5, 60.3),
'yt': (-142., -117., 59., 70.5),
'ag': (-80., -53., -56., -20.),
'ah': (60., 77., 27., 40.),
'afrca': (-25., 59.4, -36., 41.),
'ai': (-14.3, -14.1, -8., -7.8),
'alba': (18., 23., 39., 43.),
'alge': (-9., 12., 15., 38.),
'an': (10., 25., -20., -5.),
'antl': (-70., -58., 11., 19.),
'antg': (-86., -65., 17., 25.),
'atg': (-62., -61.6, 16.9, 17.75),
'au': (101., 148., -45., -6.5),
'azor': (-27.6, -23., 36., 41.),
'ba': (-80.5, -72.5, 22.5, 28.5),
'be': (-64.9, -64.5, 32.2, 32.6),
'bel': (2.5, 6.5, 49.4, 51.6),
'bf': (113., 116., 4., 5.5),
'bfa': (-6., 3., 9., 15.1),
'bh': (-89.3, -88.1, 15.7, 18.5),
'bi': (29., 30.9, -4.6, -2.2),
'bj': (0., 5., 6., 12.6),
'bn': (50., 51., 25.5, 27.1),
'bo': (-72., -50., -24., -8.),
'bots': (19., 29.6, -27., -17.),
'br': (-62.5, -56.5, 12.45, 13.85),
'bt': (71.25, 72.6, -7.5, -5.),
'bu': (22., 30., 40., 45.),
'bv': (3., 4., -55., -54.),
'bw': (87., 93., 20.8, 27.),
'by': (19., 33., 51., 60.),
'bz': (-75., -30., -35., 5.),
'cais': (-172., -171., -3., -2.),
'nwcar': (-120., -50., -15., 35.),
'cari': (-103., -53., 3., 36.),
'cb': (13., 25., 7., 24.),
'ce': (14., 29., 2., 11.5),
'cg': (10., 20., -6., 5.),
'ch': (-80., -66., -56., -15.),
'ci': (85., 145., 14., 48.5),
'cm': (7.5, 17.1, 1., 14.),
'colm': (-81., -65., -5., 14.),
'cr': (-19., -13., 27., 30.),
'cs': (-86.5, -81.5, 8.2, 11.6),
'cu': (-85., -74., 19., 24.),
'cv': (-26., -22., 14., 18.),
'cy': (32., 35., 34., 36.),
'cz': (8.9, 22.9, 47.4, 52.4),
'dj': (41.5, 44.1, 10.5, 13.1),
'dl': (4.8, 16.8, 47., 55.),
'dn': (8., 11., 54., 58.6),
'do': (-61.6, -61.2, 15.2, 15.8),
'dr': (-72.2, -68., 17.5, 20.2),
'eg': (24., 37., 21., 33.),
'eq': (-85., -74., -7., 3.),
'er': (50., 57., 22., 26.6),
'es': (-90.3, -87.5, 13., 14.6),
'et': (33., 49., 2., 19.),
'fa': (-8., -6., 61., 63.),
'fg': (-55., -49., 1., 7.),
'fi': (20.9, 35.1, 59., 70.6),
'fj': (176., -179., 16., 19.),
'fk': (-61.3, -57.5, -53., -51.),
'fn': (0., 17., 11., 24.),
'fr': (-5., 11., 41., 51.5),
'gb': (-17.1, -13.5, 13., 14.6),
'gc': (-82.8, -77.6, 17.9, 21.1),
'gh': (-4.5, 1.5, 4., 12.),
'gi': (-8., -4., 35., 38.),
'gl': (-56.7, 14., 58.3, 79.7),
'glp': (-64.2, -59.8, 14.8, 19.2),
'gm': (144.5, 145.1, 13., 14.),
'gn': (2., 16., 3.5, 15.5),
'go': (8., 14.5, -4.6, 3.),
'gr': (20., 27.6, 34., 42.),
'gu': (-95.6, -85., 10.5, 21.1),
'gw': (-17.5, -13.5, 10.8, 12.8),
'gy': (-62., -55., 0., 10.),
'ha': (-75., -71., 18., 20.),
'he': (-6.1, -5.5, -16.3, -15.5),
'hk': (113.5, 114.7, 22., 23.),
'ho': (-90., -83., 13., 16.6),
'hu': (16., 23., 45.5, 49.1),
'ic': (43., 45., -13.2, -11.),
'icel': (-24.1, -11.5, 63., 67.5),
'ie': (-11.1, -4.5, 50., 55.6),
'inda': (67., 92., 4.2, 36.),
'indo': (95., 141., -8., 6.),
'iq': (38., 50., 29., 38.),
'ir': (44., 65., 25., 40.),
'is': (34., 37., 29., 34.),
'iv': (-9., -2., 4., 11.),
'iw': (34.8, 35.6, 31.2, 32.6),
'iy': (6.6, 20.6, 35.6, 47.2),
'jd': (34., 39.6, 29., 33.6),
'jm': (-80., -76., 16., 19.),
'jp': (123., 155., 24., 47.),
'ka': (131., 155., 1., 9.6),
'kash': (74., 78., 32., 35.),
'kb': (172., 177., -3., 3.2),
'khm': (102., 108., 10., 15.),
'ki': (105.2, 106.2, -11., -10.),
'kn': (32.5, 42.1, -6., 6.),
'kna': (-62.9, -62.4, 17., 17.5),
'ko': (124., 131.5, 33., 43.5),
'ku': (-168., -155., -24.1, -6.1),
'kw': (46.5, 48.5, 28.5, 30.5),
'laos': (100., 108., 13.5, 23.1),
'lb': (34.5, 37.1, 33., 35.),
'lc': (60.9, 61.3, 13.25, 14.45),
'li': (-12., -7., 4., 9.),
'ln': (-162.1, -154.9, -4.2, 6.),
'ls': (27., 29.6, -30.6, -28.),
'lt': (9.3, 9.9, 47., 47.6),
'lux': (5.6, 6.6, 49.35, 50.25),
'ly': (8., 26., 19., 35.),
'maar': (-63.9, -62.3, 17., 18.6),
'made': (-17.3, -16.5, 32.6, 33.),
'mala': (100., 119.6, 1., 8.),
'mali': (-12.5, 6., 8.5, 25.5),
'maur': (57.2, 57.8, -20.7, -19.9),
'maut': (-17.1, -4.5, 14.5, 28.1),
'mc': (-13., -1., 25., 36.),
'mg': (43., 50.6, -25.6, -12.),
'mh': (160., 172., 4.5, 12.1),
'ml': (14.3, 14.7, 35.8, 36.),
'mmr': (92., 102., 7.5, 28.5),
'mong': (87.5, 123.1, 38.5, 52.6),
'mr': (-61.2, -60.8, 14.3, 15.1),
'mu': (113., 114., 22., 23.),
'mv': (70.1, 76.1, -6., 10.),
'mw': (32.5, 36.1, -17., -9.),
'mx': (-119., -83., 13., 34.),
'my': (142.5, 148.5, 9., 25.),
'mz': (29., 41., -26.5, -9.5),
'nama': (11., 25., -29.5, -16.5),
'ncal': (158., 172., -23., -18.),
'ng': (130., 152., -11., 0.),
'ni': (2., 14.6, 3., 14.),
'nk': (-88., -83., 10.5, 15.1),
'nl': (3.5, 7.5, 50.5, 54.1),
'no': (3., 35., 57., 71.5),
'np': (80., 89., 25., 31.),
'nw': (166.4, 167.4, -1., 0.),
'nz': (165., 179., -48., -33.),
'om': (52., 60., 16., 25.6),
'os': (9., 18., 46., 50.),
'pf': (-154., -134., -28., -8.),
'ph': (116., 127., 4., 21.),
'pi': (-177.5, -167.5, -9., 1.),
'pk': (60., 78., 23., 37.),
'pl': (14., 25., 48.5, 55.),
'pm': (-83., -77., 7., 10.),
'po': (-10., -4., 36.5, 42.5),
'pr': (-82., -68., -20., 5.),
'pt': (-130.6, -129.6, -25.56, -24.56),
'pu': (-67.5, -65.5, 17.5, 18.5),
'py': (-65., -54., -32., -17.),
'qg': (7., 12., -2., 3.),
'qt': (50., 52., 24., 27.),
'ra': (60., -165., 25., 55.),
're': (55., 56., -21.5, -20.5),
'riro': (-18., -12., 17.5, 27.5),
'ro': (19., 31., 42.5, 48.5),
'rw': (29., 31., -3., -1.),
'saud': (34.5, 56.1, 15., 32.6),
'sb': (79., 83., 5., 10.),
'seyc': (55., 56., -5., -4.),
'sg': (-18., -10., 12., 17.),
'si': (39.5, 52.1, -4.5, 13.5),
'sk': (109.5, 119.3, 1., 7.),
'sl': (-13.6, -10.2, 6.9, 10.1),
'sm': (-59., -53., 1., 6.),
'sn': (10., 25., 55., 69.6),
'so': (156., 167., -12., -6.),
'sp': (-10., 6., 35., 44.),
'sr': (103., 105., 1., 2.),
'su': (21.5, 38.5, 3.5, 23.5),
'sv': (30.5, 33.1, -27.5, -25.3),
'sw': (5.9, 10.5, 45.8, 48.),
'sy': (35., 42.6, 32., 37.6),
'tanz': (29., 40.6, -13., 0.),
'td': (-62.1, -60.5, 10., 11.6),
'tg': (-0.5, 2.5, 5., 12.),
'th': (97., 106., 5., 21.),
'ti': (-71.6, -70.6, 21., 22.),
'tk': (-173., -171., -11.5, -7.5),
'to': (-178.5, -170.5, -22., -15.),
'tp': (6., 7.6, 0., 2.),
'ts': (7., 13., 30., 38.),
'tu': (25., 48., 34.1, 42.1),
'tv': (176., 180., -11., -5.),
'tw': (120., 122., 21.9, 25.3),
'ug': (29., 35., -3.5, 5.5),
'uk': (-11., 5., 49., 60.),
'ur': (24., 41., 44., 55.),
'uy': (-60., -52., -35.5, -29.5),
'vanu': (167., 170., -21., -13.),
'vi': (-65.5, -64., 16.6, 19.6),
'vk': (13.8, 25.8, 46.75, 50.75),
'vn': (-75., -60., -2., 14.),
'vs': (102., 110., 8., 24.),
'wk': (166.1, 167.1, 18.8, 19.8),
'ye': (42.5, 54.1, 12.5, 19.1),
'yg': (13.5, 24.6, 40., 47.),
'za': (16., 34., -36., -22.),
'zb': (21., 35., -20., -7.),
'zm': (170.5, 173.5, -15., -13.),
'zr': (12., 31.6, -14., 6.),
'zw': (25., 34., -22.9, -15.5)
}
def lookup_projection(projection_code):
"""Get a Cartopy projection based on a short abbreviation."""
import cartopy.crs as ccrs
projections = {'lcc': ccrs.LambertConformal(central_latitude=40, central_longitude=-100,
standard_parallels=[30, 60]),
'ps': ccrs.NorthPolarStereo(central_longitude=-100),
'mer': ccrs.Mercator()}
return projections[projection_code]
def lookup_map_feature(feature_name):
"""Get a Cartopy map feature based on a name."""
import cartopy.feature as cfeature
from . import cartopy_utils
name = feature_name.upper()
try:
feat = getattr(cfeature, name)
scaler = cfeature.AdaptiveScaler('110m', (('50m', 50), ('10m', 15)))
except AttributeError:
feat = getattr(cartopy_utils, name)
scaler = cfeature.AdaptiveScaler('20m', (('5m', 5), ('500k', 1)))
return feat.with_scale(scaler)
class Panel(HasTraits):
"""Draw one or more plots."""
@exporter.export
class PanelContainer(HasTraits):
"""Collects panels and set complete figure related settings (e.g., size)."""
size = Union([Tuple(Union([Int(), Float()]), Union([Int(), Float()])),
Instance(type(None))], default_value=None)
size.__doc__ = """This trait takes a tuple of (width, height) to set the size of the
figure.
This trait defaults to None and will assume the default `matplotlib.pyplot.figure` size.
"""
panels = List(Instance(Panel))
panels.__doc__ = """A list of panels to plot on the figure.
This trait must contain at least one panel to plot on the figure."""
@property
def panel(self):
"""Provide simple access for a single panel."""
return self.panels[0]
@panel.setter
def panel(self, val):
self.panels = [val]
@observe('panels')
def _panels_changed(self, change):
for panel in change.new:
panel.parent = self
panel.observe(self.refresh, names=('_need_redraw'))
@property
def figure(self):
"""Provide access to the underlying figure object."""
if not hasattr(self, '_fig'):
self._fig = plt.figure(figsize=self.size)
return self._fig
def refresh(self, _):
"""Refresh the rendering of all panels."""
# First make sure everything is properly constructed
self.draw()
# Trigger the graphics refresh
self.figure.canvas.draw()
# Flush out interactive events--only ok on Agg for newer matplotlib
with contextlib.suppress(NotImplementedError):
self.figure.canvas.flush_events()
def draw(self):
"""Draw the collection of panels."""
for panel in self.panels:
with panel.hold_trait_notifications():
panel.draw()
def save(self, *args, **kwargs):
"""Save the constructed graphic as an image file.
This method takes a string for saved file name. Additionally, the same arguments and
keyword arguments that `matplotlib.pyplot.savefig` does.
"""
self.draw()
self.figure.savefig(*args, **kwargs)
def show(self):
"""Show the constructed graphic on the screen."""
self.draw()
plt.show()
def copy(self):
"""Return a copy of the panel container."""
return copy.copy(self)
@exporter.export
class MapPanel(Panel):
"""Set figure related elements for an individual panel.
Parameters that need to be set include collecting all plotting types
(e.g., contours, wind barbs, etc.) that are desired to be in a given panel.
Additionally, traits can be set to plot map related features (e.g., coastlines, borders),
projection, graphics area, and title.
"""
parent = Instance(PanelContainer, allow_none=True)
layout = Tuple(Int(), Int(), Int(), default_value=(1, 1, 1))
layout.__doc__ = """A tuple that contains the description (nrows, ncols, index) of the
panel position; default value is (1, 1, 1).
This trait is set to describe the panel position and the default is for a single panel. For
example, a four-panel plot will have two rows and two columns with the tuple setting for
the upper-left panel as (2, 2, 1), upper-right as (2, 2, 2), lower-left as (2, 2, 3), and
lower-right as (2, 2, 4). For more details see the documentation for
`matplotlib.figure.Figure.add_subplot`.
"""
plots = List(Any())
plots.__doc__ = """A list of handles that represent the plots (e.g., `ContourPlot`,
`FilledContourPlot`, `ImagePlot`) to put on a given panel.
This trait collects the different plots, including contours and images, that are intended
for a given panel.
"""
_need_redraw = Bool(default_value=True)
area = Union([Unicode(), Tuple(Float(), Float(), Float(), Float())], allow_none=True,
default_value=None)
area.__doc__ = """A tuple or string value that indicates the graphical area of the plot.
The tuple value corresponds to longitude/latitude box based on the projection of the map
with the format (west-most longitude, east-most longitude, south-most latitude,
north-most latitude). This tuple defines a box from the lower-left to the upper-right
corner.
This trait can also be set with a string value associated with the named geographic regions
within MetPy. The tuples associated with the names are based on a PlatteCarree projection.
For a CONUS region, the following strings can be used: 'us', 'spcus', 'ncus', and 'afus'.
For regional plots, US postal state abbreviations can be used, such as 'co', 'ny', 'ca',
et cetera. Providing a '+' or '-' suffix to the string value will zoom in or out,
respectively. Providing multiple '+' or '-' characters will zoom in or out further.
"""
projection = Union([Unicode(), Instance('cartopy.crs.Projection')], default_value='data')
projection.__doc__ = """A string for a pre-defined projection or a Cartopy projection
object.
There are three pre-defined projections that can be called with a short name:
Lambert conformal conic ('lcc'), Mercator ('mer'), or polar-stereographic ('ps').
Additionally, this trait can be set to a Cartopy projection object.
"""
layers = List(Union([Unicode(), Instance('cartopy.feature.Feature')]),
default_value=['coastline'])
layers.__doc__ = """A list of strings for a pre-defined feature layer or a Cartopy Feature object.
Like the projection, there are a couple of pre-defined feature layers that can be called
using a short name. The pre-defined layers are: 'coastline', 'states', 'borders', 'lakes',
'land', 'ocean', 'rivers', 'usstates', and 'uscounties'. Additionally, this can accept
Cartopy Feature objects.
"""
title = Unicode()
title.__doc__ = """A string to set a title for the figure.
This trait sets a user-defined title that will plot at the top center of the figure.
"""
title_fontsize = Union([Int(), Float(), Unicode()], allow_none=True, default_value=None)
title_fontsize.__doc__ = """An integer or string value for the font size of the title of the
figure.
This trait sets the font size for the title that will plot at the top center of the figure.
Accepts size in points or relative size. Allowed relative sizes are those of Matplotlib:
'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'.
"""
@validate('area')
def _valid_area(self, proposal):
"""Check that proposed string or tuple is valid and turn string into a tuple extent."""
area = proposal['value']
# Parse string, check that string is valid, and determine extent based on string
if isinstance(area, str):
match = re.match(r'(\w+)([-+]*)$', area)
if match is None:
raise TraitError(f'"{area}" is not a valid string area.')
region, modifier = match.groups()
region = region.lower()
if region == 'global':
extent = 'global'
elif region in _areas:
extent = _areas[region]
zoom = modifier.count('+') - modifier.count('-')
extent = self._zoom_extent(extent, zoom)
else:
raise TraitError(f'"{area}" is not a valid string area.')
# Otherwise, assume area is a tuple and check that latitudes/longitudes are valid
else:
west_lon, east_lon, south_lat, north_lat = area
valid_west = -180 <= west_lon <= 180
valid_east = -180 <= east_lon <= 180
valid_south = -90 <= south_lat <= 90
valid_north = -90 <= north_lat <= 90
if not (valid_west and valid_east and valid_south and valid_north):
raise TraitError(f'"{area}" is not a valid string area.')
extent = area
return extent
@observe('plots')
def _plots_changed(self, change):
"""Handle when our collection of plots changes."""
for plot in change.new:
plot.parent = self
plot.observe(self.refresh, names=('_need_redraw'))
self._need_redraw = True
@observe('parent')
def _parent_changed(self, _):
"""Handle when the parent is changed."""
self.ax = None
@property
def _proj_obj(self):
"""Return the projection as a Cartopy object.
Handles looking up a string for the projection, or if the projection
is set to ``'data'`` looks at the data for the projection.
"""
if isinstance(self.projection, str):
if self.projection == 'data':
if isinstance(self.plots[0].griddata, tuple):
return self.plots[0].griddata[0].metpy.cartopy_crs
else:
return self.plots[0].griddata.metpy.cartopy_crs
else:
return lookup_projection(self.projection)
else:
return self.projection
@property
def _layer_features(self):
"""Iterate over all map features and return as Cartopy objects.
Handle converting names of maps to auto-scaling map features.
"""
for item in self.layers:
if isinstance(item, str):
feat = lookup_map_feature(item)
else:
feat = item
yield feat
@observe('area')
def _set_need_redraw(self, _):
"""Watch traits and set the need redraw flag as necessary."""
self._need_redraw = True
@staticmethod
def _zoom_extent(extent, zoom):
"""Calculate new bounds for zooming in or out of a given extent.
``extent`` is given as a tuple with four numeric values, in the same format as the
``area`` trait.
If ``zoom`` = 0, the extent will not be changed from what was provided to the method
If ``zoom`` > 0, the returned extent will be smaller (zoomed in)
If ``zoom`` < 0, the returned extent will be larger (zoomed out)
"""
west_lon, east_lon, south_lat, north_lat = extent
# Turn number of pluses and minuses into a number than can scale the latitudes and
# longitudes of our extent
zoom_multiplier = (1 - 2**-zoom) / 2
# Calculate bounds for new, zoomed extent
new_north_lat = north_lat + (south_lat - north_lat) * zoom_multiplier
new_south_lat = south_lat - (south_lat - north_lat) * zoom_multiplier
new_east_lon = east_lon + (west_lon - east_lon) * zoom_multiplier
new_west_lon = west_lon - (west_lon - east_lon) * zoom_multiplier
return (new_west_lon, new_east_lon, new_south_lat, new_north_lat)
@property
def ax(self):
"""Get the :class:`matplotlib.axes.Axes` to draw on.
Creates a new instance if necessary.
"""
# If we haven't actually made an instance yet, make one with the right size and
# map projection.
if getattr(self, '_ax', None) is None:
self._ax = self.parent.figure.add_subplot(*self.layout, projection=self._proj_obj)
return self._ax
@ax.setter
def ax(self, val):
"""Set the :class:`matplotlib.axes.Axes` to draw on.
Clears existing state as necessary.
"""
if getattr(self, '_ax', None) is not None:
self._ax.cla()
self._ax = val
def refresh(self, changed):
"""Refresh the drawing if necessary."""
self._need_redraw = changed.new
def draw(self):
"""Draw the panel."""
# Only need to run if we've actually changed.
if self._need_redraw:
# Set the extent as appropriate based on the area. One special case for 'global'.
if self.area == 'global':
self.ax.set_global()
elif self.area is not None:
self.ax.set_extent(self.area, ccrs.PlateCarree())
# Draw all of the plots.
for p in self.plots:
with p.hold_trait_notifications():
p.draw()
# Add all of the maps
for feat in self._layer_features:
self.ax.add_feature(feat)
# Use the set title or generate one.
title = self.title or ',\n'.join(plot.name for plot in self.plots)
self.ax.set_title(title, fontsize=self.title_fontsize)
self._need_redraw = False
def __copy__(self):
"""Return a copy of this MapPanel."""
# Create new, blank instance of MapPanel
cls = self.__class__
obj = cls.__new__(cls)
# Copy each attribute from current MapPanel to new MapPanel
for name in self.trait_names():
# The 'plots' attribute is a list.
# A copy must be made for each plot in the list.
if name == 'plots':
obj.plots = [copy.copy(plot) for plot in self.plots]
else:
setattr(obj, name, getattr(self, name))
return obj
def copy(self):
"""Return a copy of the panel."""
return copy.copy(self)
@exporter.export
class Plots2D(HasTraits):
"""The highest level class related to plotting 2D data.
This class collects all common methods no matter whether plotting a scalar variable or
vector. Primary settings common to all types of 2D plots are time and level.
"""
parent = Instance(Panel)
_need_redraw = Bool(default_value=True)
level = Union([Int(allow_none=True, default_value=None), Instance(units.Quantity)])
level.__doc__ = """The level of the field to be plotted.
This is a value with units to choose the desired plot level. For example, selecting the
850-hPa level, set this parameter to ``850 * units.hPa``
"""
time = Instance(datetime, allow_none=True)
time.__doc__ = """Set the valid time to be plotted as a datetime object.
If a forecast hour is to be plotted the time should be set to the valid future time, which
can be done using the `~datetime.datetime` and `~datetime.timedelta` objects
from the Python standard library.
"""
plot_units = Unicode(allow_none=True, default_value=None)
plot_units.__doc__ = """The desired units to plot the field in.
Setting this attribute will convert the units of the field variable to the given units for
plotting using the MetPy Units module.
"""
scale = Float(default_value=1e0)
scale.__doc__ = """Scale the field to be plotted by the value given.
This attribute will scale the field by multiplying by the scale. For example, to
scale vorticity to be whole values for contouring you could set the scale to 1e5, such that
the data values will be scaled by 10^5.
"""
@property
def _cmap_obj(self):
"""Return the colormap object.
Handle convert the name of the colormap to an object from matplotlib or metpy.
"""
try:
return ctables.registry.get_colortable(self.colormap)
except KeyError:
return plt.get_cmap(self.colormap)
@property
def _norm_obj(self):
"""Return the normalization object.
Converts the tuple image range to a matplotlib normalization instance.
"""
return plt.Normalize(*self.image_range)
def clear(self):
"""Clear the plot.
Resets all internal state and sets need for redraw.
"""
if getattr(self, 'handle', None) is not None:
if getattr(self.handle, 'collections', None) is not None:
self.clear_collections()
else:
self.clear_handle()
self._need_redraw = True
def clear_handle(self):
"""Clear the handle to the plot instance."""
self.handle.remove()
self.handle = None
def clear_collections(self):
"""Clear the handle collections to the plot instance."""
for col in self.handle.collections:
col.remove()
self.handle = None
@observe('parent')
def _parent_changed(self, _):
"""Handle setting the parent object for the plot."""
self.clear()
@observe('level', 'time')
def _update_data(self, _=None):
"""Handle updating the internal cache of data.
Responds to changes in various subsetting parameters.
"""
self._griddata = None
self.clear()
# Can't be a Traitlet because notifications don't work with arrays for traits
# notification never happens
@property
def data(self):
"""Xarray dataset that contains the field to be plotted."""
return self._data
@data.setter
def data(self, val):
self._data = val
self._update_data()
@property
def name(self):
"""Generate a name for the plot."""
if isinstance(self.field, tuple):
ret = ''
ret += ' and '.join(f for f in self.field)
else:
ret = self.field
if self.level is not None:
ret += f'@{self.level:d}'
return ret
def copy(self):
"""Return a copy of the plot."""
return copy.copy(self)
@exporter.export
class PlotScalar(Plots2D):
"""Defines the common elements of 2D scalar plots for single scalar value fields.
Most of the other traits here are for one or more of the specific plots. Currently this
allows too many options for `ContourPlot` since it does not user image_range, for
example. Similar issues for `ImagePlot` and `FilledContourPlot`.
"""
field = Unicode()
field.__doc__ = """Name of the field to be plotted.
This is the name of the variable from the dataset that is to be plotted. An example,
from a model grid file that uses the THREDDS convention for naming would be
`Geopotential_height_isobaric` or `Temperature_isobaric`. For GOES-16/17 satellite data it
might be `Sectorized_CMI`. To check for the variables available within a dataset, list the
variables with the following command assuming the dataset was read using xarray as `ds`,
`list(ds)`
"""
@observe('field')
def _update_data(self, _=None):
"""Handle updating the internal cache of data.
Responds to changes in various subsetting parameters.
"""
self._griddata = None
self.clear()
@property
def griddata(self):
"""Return the internal cached data."""
if getattr(self, '_griddata', None) is None:
if self.field:
data = self.data.metpy.parse_cf(self.field)
elif not hasattr(self.data.metpy, 'x'):
# Handles the case where we have a dataset but no specified field
raise ValueError('field attribute has not been set.')
else:
data = self.data
subset = {'method': 'nearest'}
if self.level is not None:
subset[data.metpy.vertical.name] = self.level
if self.time is not None:
subset[data.metpy.time.name] = self.time
data_subset = data.metpy.sel(**subset).squeeze()
if self.plot_units is not None:
data_subset = data_subset.metpy.convert_units(self.plot_units)
self._griddata = data_subset * self.scale
return self._griddata
@property
def plotdata(self):
"""Return the data for plotting.
The data array, x coordinates, and y coordinates.
"""
x = self.griddata.metpy.x
y = self.griddata.metpy.y
return x, y, self.griddata
def draw(self):
"""Draw the plot."""
if self._need_redraw:
if getattr(self, 'handle', None) is None:
self._build()
if getattr(self, 'colorbar', None) is not None:
cbar = self.parent.ax.figure.colorbar(
self.handle, orientation=self.colorbar, pad=0, aspect=50)
cbar.ax.tick_params(labelsize=self.colorbar_fontsize)
self._need_redraw = False
class ContourTraits(HasTraits):
"""Represents common contour traits."""
contours = Union([List(Float()), Int()], default_value=25)
contours.__doc__ = """A list of values to contour or an integer number of contour levels.
This parameter sets contour or colorfill values for a plot. Values can be entered either
as a list of values or as an integer with the number of contours to be plotted (as per
matplotlib documentation). A list can be generated by using square brackets or creating
a numpy 1D array and converting it to a list with the `~numpy.ndarray.tolist` method.
"""
clabels = Bool(default_value=False)
clabels.__doc__ = """A boolean (True/False) on whether to plot contour labels.
To plot contour labels set this trait to ``True``, the default value is ``False``.
"""
label_fontsize = Union([Int(), Float(), Unicode()], allow_none=True, default_value=None)
label_fontsize.__doc__ = """An integer, float, or string value to set the font size of labels for contours.
This trait sets the font size for labels that will plot along contour lines. Accepts
size in points or relative size. Allowed relative sizes are those of Matplotlib:
'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'.
"""
class ColorfillTraits(HasTraits):
"""Represent common colorfill traits."""
colormap = Unicode(allow_none=True, default_value=None)
colormap.__doc__ = """The string name for a Matplolib or MetPy colormap.
For example, the Blue-Purple colormap from Matplotlib can be accessed using 'BuPu'.
"""
image_range = Union([Tuple(Int(allow_none=True), Int(allow_none=True)),
Instance(plt.Normalize)], default_value=(None, None))
image_range.__doc__ = """A tuple of min and max values that represent the range of values
to color the rasterized image.
The min and max values entered as a tuple will be converted to a
`matplotlib.colors.Normalize` instance for plotting.
"""
colorbar = Unicode(default_value=None, allow_none=True)
colorbar.__doc__ = """A string (horizontal/vertical) on whether to add a colorbar to the plot.
To add a colorbar associated with the plot, set the trait to ``horizontal`` or
``vertical``,specifying the orientation of the produced colorbar. The default value is
``None``.
"""
colorbar_fontsize = Union([Int(), Float(), Unicode()], allow_none=True, default_value=None)
colorbar_fontsize.__doc__ = """An integer, float, or string value to set the font size of
labels for the colorbar.
This trait sets the font size of labels for the colorbar. Accepts size in points or
relative size. Allowed relative sizes are those of Matplotlib: 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large'.
"""
@exporter.export
class ImagePlot(PlotScalar, ColorfillTraits):
"""Make raster image using `~matplotlib.pyplot.imshow` for satellite or colored image."""
@observe('colormap', 'image_range')
def _set_need_redraw(self, _):
"""Handle changes to attributes that just need a simple redraw."""
if hasattr(self, 'handle'):
self.handle.set_cmap(self._cmap_obj)
self.handle.set_norm(self._norm_obj)
self._need_redraw = True
@observe('colorbar')
def _set_need_rebuild(self, _):
"""Handle changes to attributes that need to regenerate everything."""
# Because matplotlib doesn't let you just change these properties, we need
# to trigger a clear and re-call of contour()
self.clear()
@property
def plotdata(self):
"""Return the data for plotting.
The data array, x coordinates, and y coordinates.
"""
x = self.griddata.metpy.x
y = self.griddata.metpy.y
# At least currently imshow with cartopy does not like this
if 'degree' in x.units:
x = x.data
x[x > 180] -= 360
return x, y, self.griddata
def _build(self):
"""Build the plot by calling any plotting methods as necessary."""
x, y, imdata = self.plotdata
# We use min/max for y and manually figure out origin to try to avoid upside down
# images created by images where y[0] > y[-1]
extents = (x[0], x[-1], y.min(), y.max())
origin = 'upper' if y[0] > y[-1] else 'lower'
self.handle = self.parent.ax.imshow(imdata, extent=extents, origin=origin,
cmap=self._cmap_obj, norm=self._norm_obj,
transform=imdata.metpy.cartopy_crs)
@exporter.export
class ContourPlot(PlotScalar, ContourTraits):
"""Make contour plots by defining specific traits."""
linecolor = Unicode('black')
linecolor.__doc__ = """A string value to set the color of plotted contours; default is
black.
This trait can be set to any Matplotlib color
(https://matplotlib.org/3.1.0/gallery/color/named_colors.html)
"""
linewidth = Int(2)
linewidth.__doc__ = """An integer value to set the width of plotted contours; default value
is 2.
This trait changes the thickness of contour lines with a higher value plotting a thicker
line.
"""
linestyle = Unicode('solid', allow_none=True)
linestyle.__doc__ = """A string value to set the linestyle (e.g., dashed); default is
solid.
The valid string values are those of Matplotlib which are solid, dashed, dotted, and
dashdot.
"""
@observe('contours', 'linecolor', 'linewidth', 'linestyle', 'clabels', 'label_fontsize')
def _set_need_rebuild(self, _):
"""Handle changes to attributes that need to regenerate everything."""
# Because matplotlib doesn't let you just change these properties, we need
# to trigger a clear and re-call of contour()
self.clear()
def _build(self):
"""Build the plot by calling any plotting methods as necessary."""
x, y, imdata = self.plotdata
self.handle = self.parent.ax.contour(x, y, imdata, self.contours,
colors=self.linecolor, linewidths=self.linewidth,
linestyles=self.linestyle,
transform=imdata.metpy.cartopy_crs)
if self.clabels:
self.handle.clabel(inline=1, fmt='%.0f', inline_spacing=8,
use_clabeltext=True, fontsize=self.label_fontsize)
@exporter.export
class FilledContourPlot(PlotScalar, ColorfillTraits, ContourTraits):
"""Make color-filled contours plots by defining appropriate traits."""
@observe('contours', 'colorbar', 'colormap')
def _set_need_rebuild(self, _):
"""Handle changes to attributes that need to regenerate everything."""
# Because matplotlib doesn't let you just change these properties, we need
# to trigger a clear and re-call of contour()
self.clear()
def _build(self):
"""Build the plot by calling any plotting methods as necessary."""
x, y, imdata = self.plotdata
self.handle = self.parent.ax.contourf(x, y, imdata, self.contours,
cmap=self._cmap_obj, norm=self._norm_obj,
transform=imdata.metpy.cartopy_crs)
@exporter.export
class PlotVector(Plots2D):
"""Defines common elements for 2D vector plots.
This class collects common elements including the field trait, which is a tuple argument
accepting two strings, for plotting 2D vector fields.
"""
field = Tuple(Unicode(), Unicode())
field.__doc__ = """A tuple containing the two components of the vector field from the
dataset in the form (east-west component, north-south component).
For a wind barb plot each component of the wind must be specified and should be of the form
(u-wind, v-wind).
"""
pivot = Unicode('middle')
pivot.__doc__ = """A string setting the pivot point of the vector. Default value is
'middle'.
This trait takes the values of the keyword argument from `matplotlin.pyplot.barbs`:
'tip' or 'middle'.
"""
skip = Tuple(Int(), Int(), default_value=(1, 1))
skip.__doc__ = """A tuple of integers to indicate the number of grid points to skip between
plotting vectors. Default is (1, 1).
This trait is to be used to reduce the number of vectors plotted in the (east-west,
north-south) components. The two values can be set to the same or different integer values
depending on what is desired.
"""
earth_relative = Bool(default_value=True)
earth_relative.__doc__ = """A boolean value to indicate whether the vector to be plotted
is earth- or grid-relative. Default value is `True`, indicating that vectors are
earth-relative.
Common gridded meteorological datasets including GFS and NARR output contain wind
components that are earth-relative. The primary exception is NAM output with wind
components that are grid-relative. For any grid-relative vectors set this trait to `False`.
"""
color = Unicode(default_value='black')
color.__doc__ = """A string value that controls the color of the vectors. Default value is
black.
This trait can be set to any named color from
`Matplotlibs Colors <https://matplotlib.org/3.1.0/gallery/color/named_colors.html>`
"""
@observe('field')
def _update_data(self, _=None):
"""Handle updating the internal cache of data.
Responds to changes in various subsetting parameters.
"""
self._griddata_u = None
self._griddata_v = None
self.clear()
@property
def griddata(self):
"""Return the internal cached data."""
if getattr(self, '_griddata_u', None) is None:
if self.field[0]:
u = self.data.metpy.parse_cf(self.field[0])
v = self.data.metpy.parse_cf(self.field[1])
else:
raise ValueError('field attribute not set correctly')
subset = {'method': 'nearest'}
if self.level is not None:
subset[u.metpy.vertical.name] = self.level
if self.time is not None:
subset[u.metpy.time.name] = self.time
data_subset_u = u.metpy.sel(**subset).squeeze()
data_subset_v = v.metpy.sel(**subset).squeeze()
if self.plot_units is not None:
data_subset_u = data_subset_u.metpy.convert_units(self.plot_units)
data_subset_v = data_subset_v.metpy.convert_units(self.plot_units)
self._griddata_u = data_subset_u
self._griddata_v = data_subset_v
return (self._griddata_u, self._griddata_v)
@property
def plotdata(self):
"""Return the data for plotting.
The data array, x coordinates, and y coordinates.
"""
x = self.griddata[0].metpy.x
y = self.griddata[0].metpy.y
if self.earth_relative:
x, y, _ = ccrs.PlateCarree().transform_points(self.griddata[0].metpy.cartopy_crs,
*np.meshgrid(x, y)).T
x = x.T
y = y.T
else:
if 'degree' in x.units:
x, y, _ = self.griddata[0].metpy.cartopy_crs.transform_points(
ccrs.PlateCarree(), *np.meshgrid(x, y)).T
x = x.T
y = y.T
if x.ndim == 1:
xx, yy = np.meshgrid(x, y)
else:
xx, yy = x, y
return xx, yy, self.griddata[0], self.griddata[1]
def draw(self):
"""Draw the plot."""
if self._need_redraw:
if getattr(self, 'handle', None) is None:
self._build()
self._need_redraw = False
@exporter.export
class BarbPlot(PlotVector):
"""Make plots of wind barbs on a map with traits to refine the look of plotted elements."""
barblength = Float(default_value=7)
barblength.__doc__ = """A float value that changes the length of the wind barbs. Default
value is 7.
This trait corresponds to the keyword length in `matplotlib.pyplot.barbs`.
"""
@observe('barblength', 'pivot', 'skip', 'earth_relative', 'color')
def _set_need_rebuild(self, _):
"""Handle changes to attributes that need to regenerate everything."""
# Because matplotlib doesn't let you just change these properties, we need
# to trigger a clear and re-call of contour()
self.clear()
def _build(self):
"""Build the plot by calling needed plotting methods as necessary."""
x, y, u, v = self.plotdata
if self.earth_relative:
transform = ccrs.PlateCarree()
else:
transform = u.metpy.cartopy_crs
wind_slice = (slice(None, None, self.skip[0]), slice(None, None, self.skip[1]))
self.handle = self.parent.ax.barbs(
x[wind_slice], y[wind_slice],
u.values[wind_slice], v.values[wind_slice],
color=self.color, pivot=self.pivot, length=self.barblength,
transform=transform)
@exporter.export
class PlotObs(HasTraits):
"""The highest level class related to plotting observed surface and upperair data.
This class collects all common methods no matter whether plotting a upper-level or
surface data using station plots.
List of Traits:
* level
* time
* fields
* locations (optional)
* time_window (optional)
* formats (optional)
* colors (optional)
* plot_units (optional)
* vector_field (optional)
* vector_field_color (optional)
* vector_field_length (optional)
* vector_plot_units (optional)
* reduce_points (optional)
* fontsize (optional)
"""
parent = Instance(Panel)
_need_redraw = Bool(default_value=True)
level = Union([Int(allow_none=True), Instance(units.Quantity)], default_value=None)
level.__doc__ = """The level of the field to be plotted.
This is a value with units to choose the desired plot level. For example, selecting the
850-hPa level, set this parameter to ``850 * units.hPa``. For surface data, parameter
must be set to `None`.
"""
time = Instance(datetime, allow_none=True)
time.__doc__ = """Set the valid time to be plotted as a datetime object.
If a forecast hour is to be plotted the time should be set to the valid future time, which
can be done using the `~datetime.datetime` and `~datetime.timedelta` objects
from the Python standard library.
"""
time_window = Instance(timedelta, default_value=timedelta(minutes=0), allow_none=True)
time_window.__doc__ = """Set a range to look for data to plot as a timedelta object.
If this parameter is set, it will subset the data provided to be within the time and plus
or minus the range value given. If there is more than one observation from a given station
then it will keep only the most recent one for plotting purposes. Default value is to have
no range. (optional)
"""
fields = List(Unicode())
fields.__doc__ = """Name of the scalar or symbol fields to be plotted.
List of parameters to be plotted around station plot (e.g., temperature, dewpoint, skyc).
"""
locations = List(default_value=['C'])
locations.__doc__ = """List of strings for scalar or symbol field plotting locations.
List of parameters locations for plotting parameters around the station plot (e.g.,
NW, NE, SW, SE, W, C). (optional)
"""
formats = List(default_value=[None])
formats.__doc__ = """List of the scalar, symbol, and text field data formats. (optional)
List of scalar parameters formmaters or mapping values (if symbol) for plotting text and/or
symbols around the station plot (e.g., for pressure variable
```lambda v: format(10 * v, '.0f')[-3:]```).
For symbol mapping the following options are available to be put in as a string:
current_weather, sky_cover, low_clouds, mid_clouds, high_clouds, and pressure_tendency.
For plotting text, use the format setting of 'text'.
"""
colors = List(Unicode(), default_value=['black'])
colors.__doc__ = """List of the scalar and symbol field colors.
List of strings that represent the colors to be used for the variable being plotted.
(optional)
"""
vector_field = List(default_value=[None], allow_none=True)
vector_field.__doc__ = """List of the vector field to be plotted.
List of vector components to combined and plotted from the center of the station plot
(e.g., wind components). (optional)
"""
vector_field_color = Unicode('black', allow_none=True)
vector_field_color.__doc__ = """String color name to plot the vector. (optional)"""
vector_field_length = Int(default_value=None, allow_none=True)
vector_field_length.__doc__ = """Integer value to set the length of the plotted vector.
(optional)
"""
reduce_points = Float(default_value=0)
reduce_points.__doc__ = """Float to reduce number of points plotted. (optional)"""
plot_units = List(default_value=[None], allow_none=True)
plot_units.__doc__ = """A list of the desired units to plot the fields in.
Setting this attribute will convert the units of the field variable to the given units for
plotting using the MetPy Units module, provided that units are attached to the DataFrame.
"""
vector_plot_units = Unicode(default_value=None, allow_none=True)
vector_plot_units.__doc__ = """The desired units to plot the vector field in.
Setting this attribute will convert the units of the field variable to the given units for
plotting using the MetPy Units module, provided that units are attached to the DataFrame.
"""
fontsize = Int(10)
fontsize.__doc__ = """An integer value to set the font size of station plots. Default
is 10 pt."""
def clear(self):
"""Clear the plot.
Resets all internal state and sets need for redraw.
"""
if getattr(self, 'handle', None) is not None:
self.handle.ax.cla()
self.handle = None
self._need_redraw = True
@observe('parent')
def _parent_changed(self, _):
"""Handle setting the parent object for the plot."""
self.clear()
@observe('fields', 'level', 'time', 'vector_field', 'time_window')
def _update_data(self, _=None):
"""Handle updating the internal cache of data.
Responds to changes in various subsetting parameters.
"""
self._obsdata = None
self.clear()
# Can't be a Traitlet because notifications don't work with arrays for traits
# notification never happens
@property
def data(self):
"""Pandas dataframe that contains the fields to be plotted."""
return self._data
@data.setter
def data(self, val):
self._data = val
self._update_data()
@property
def name(self):
"""Generate a name for the plot."""
ret = ''
ret += ' and '.join(f for f in self.fields)
if self.level is not None:
ret += f'@{self.level:d}'
return ret
@property
def obsdata(self):
"""Return the internal cached data."""
if getattr(self, '_obsdata', None) is None:
# Use a copy of data so we retain all of the original data passed in unmodified
data = self.data
# Subset for a particular level if given
if self.level is not None:
mag = getattr(self.level, 'magnitude', self.level)
data = data[data.pressure == mag]
# Subset for our particular time
if self.time is not None:
# If data are not currently indexed by time, we need to do so choosing one of
# the columns we're looking for
if not isinstance(data.index, pd.DatetimeIndex):
time_vars = ['valid', 'time', 'valid_time', 'date_time', 'date']
dim_times = [time_var for time_var in time_vars if
time_var in list(self.data)]
if not dim_times:
raise AttributeError(
'Time variable not found. Valid variable names are:'
f'{time_vars}')
data = data.set_index(dim_times[0])
if not isinstance(data.index, pd.DatetimeIndex):
# Convert our column of interest to a datetime
data = data.reset_index()
time_index = pd.to_datetime(data[dim_times[0]])
data = data.set_index(time_index)
# Works around the fact that traitlets 4.3 insists on sending us None by
# default because timedelta(0) is Falsey.
window = timedelta(minutes=0) if self.time_window is None else self.time_window
# Indexes need to be properly sorted for the slicing below to work; the
# error you get if that's not the case really convoluted, which is why
# we don't rely on users doing it.
data = data.sort_index()
data = data[self.time - window:self.time + window]
# Look for the station column
stn_vars = ['station', 'stn', 'station_id', 'stid']
dim_stns = [stn_var for stn_var in stn_vars if stn_var in list(self.data)]
if not dim_stns:
raise AttributeError('Station variable not found. Valid variable names are: '
f'{stn_vars}')
else:
dim_stn = dim_stns[0]
# Make sure we only use one observation per station
self._obsdata = data.groupby(dim_stn).tail(1)
return self._obsdata
@property
def plotdata(self):
"""Return the data for plotting.
The data arrays, x coordinates, and y coordinates.
"""
plot_data = {}
for dim_name in list(self.obsdata):
if dim_name.find('lat') != -1:
lat = self.obsdata[dim_name]
elif dim_name.find('lon') != -1:
lon = self.obsdata[dim_name]
else:
plot_data[dim_name] = self.obsdata[dim_name]
return lon.values, lat.values, plot_data
def draw(self):
"""Draw the plot."""
if self._need_redraw:
if getattr(self, 'handle', None) is None:
self._build()
self._need_redraw = False
@observe('colors', 'formats', 'locations', 'reduce_points', 'vector_field_color')
def _set_need_rebuild(self, _):
"""Handle changes to attributes that need to regenerate everything."""
# Because matplotlib doesn't let you just change these properties, we need
# to trigger a clear and re-call of contour()
self.clear()
def _build(self):
"""Build the plot by calling needed plotting methods as necessary."""
lon, lat, data = self.plotdata
# Use the cartopy map projection to transform station locations to the map and
# then refine the number of stations plotted by setting a radius
if self.parent._proj_obj == ccrs.PlateCarree():
scale = 1.
else:
scale = 100000.
point_locs = self.parent._proj_obj.transform_points(ccrs.PlateCarree(), lon, lat)
subset = reduce_point_density(point_locs, self.reduce_points * scale)
self.handle = StationPlot(self.parent.ax, lon[subset], lat[subset], clip_on=True,
transform=ccrs.PlateCarree(), fontsize=self.fontsize)
for i, ob_type in enumerate(self.fields):
field_kwargs = {}
if len(self.locations) > 1:
location = self.locations[i]
else:
location = self.locations[0]
if len(self.colors) > 1:
field_kwargs['color'] = self.colors[i]
else:
field_kwargs['color'] = self.colors[0]
if len(self.formats) > 1:
field_kwargs['formatter'] = self.formats[i]
else:
field_kwargs['formatter'] = self.formats[0]
if len(self.plot_units) > 1:
field_kwargs['plot_units'] = self.plot_units[i]
else:
field_kwargs['plot_units'] = self.plot_units[0]
if hasattr(self.data, 'units') and (field_kwargs['plot_units'] is not None):
parameter = units.Quantity(data[ob_type][subset].values,
self.data.units[ob_type])
else:
parameter = data[ob_type][subset]
if field_kwargs['formatter'] is not None:
mapper = getattr(wx_symbols, str(field_kwargs['formatter']), None)
if mapper is not None:
field_kwargs.pop('formatter')
self.handle.plot_symbol(location, parameter, mapper, **field_kwargs)
else:
if self.formats[i] == 'text':
self.handle.plot_text(location, parameter, color=field_kwargs['color'])
else:
self.handle.plot_parameter(location, parameter, **field_kwargs)
else:
field_kwargs.pop('formatter')
self.handle.plot_parameter(location, parameter, **field_kwargs)
if self.vector_field[0] is not None:
vector_kwargs = {}
vector_kwargs['color'] = self.vector_field_color
vector_kwargs['plot_units'] = self.vector_plot_units
if hasattr(self.data, 'units') and (vector_kwargs['plot_units'] is not None):
u = units.Quantity(data[self.vector_field[0]][subset].values,
self.data.units[self.vector_field[0]])
v = units.Quantity(data[self.vector_field[1]][subset].values,
self.data.units[self.vector_field[1]])
else:
vector_kwargs.pop('plot_units')
u = data[self.vector_field[0]][subset]
v = data[self.vector_field[1]][subset]
if self.vector_field_length is not None:
vector_kwargs['length'] = self.vector_field_length
self.handle.plot_barb(u, v, **vector_kwargs)
def copy(self):
"""Return a copy of the plot."""
return copy.copy(self)
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.12/_downloads/plot_temporal_whitening.py | 9 | 1849 | """
================================
Temporal whitening with AR model
================================
This script shows how to fit an AR model to data and use it
to temporally whiten the signals.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import fit_iir_model_raw
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_ecg-proj.fif'
raw = mne.io.read_raw_fif(raw_fname)
proj = mne.read_proj(proj_fname)
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
# Set up pick list: Gradiometers - bad channels
picks = mne.pick_types(raw.info, meg='grad', exclude='bads')
order = 5 # define model order
picks = picks[:5]
# Estimate AR models on raw data
b, a = fit_iir_model_raw(raw, order=order, picks=picks, tmin=60, tmax=180)
d, times = raw[0, 1e4:2e4] # look at one channel from now on
d = d.ravel() # make flat vector
innovation = signal.convolve(d, a, 'valid')
d_ = signal.lfilter(b, a, innovation) # regenerate the signal
d_ = np.r_[d_[0] * np.ones(order), d_] # dummy samples to keep signal length
###############################################################################
# Plot the different time series and PSDs
plt.close('all')
plt.figure()
plt.plot(d[:100], label='signal')
plt.plot(d_[:100], label='regenerated signal')
plt.legend()
plt.figure()
plt.psd(d, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(innovation, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(d_, Fs=raw.info['sfreq'], NFFT=2048, linestyle='--')
plt.legend(('Signal', 'Innovation', 'Regenerated signal'))
plt.show()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.13/_downloads/plot_temporal_whitening.py | 9 | 1849 | """
================================
Temporal whitening with AR model
================================
This script shows how to fit an AR model to data and use it
to temporally whiten the signals.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import fit_iir_model_raw
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_ecg-proj.fif'
raw = mne.io.read_raw_fif(raw_fname)
proj = mne.read_proj(proj_fname)
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
# Set up pick list: Gradiometers - bad channels
picks = mne.pick_types(raw.info, meg='grad', exclude='bads')
order = 5 # define model order
picks = picks[:5]
# Estimate AR models on raw data
b, a = fit_iir_model_raw(raw, order=order, picks=picks, tmin=60, tmax=180)
d, times = raw[0, 1e4:2e4] # look at one channel from now on
d = d.ravel() # make flat vector
innovation = signal.convolve(d, a, 'valid')
d_ = signal.lfilter(b, a, innovation) # regenerate the signal
d_ = np.r_[d_[0] * np.ones(order), d_] # dummy samples to keep signal length
###############################################################################
# Plot the different time series and PSDs
plt.close('all')
plt.figure()
plt.plot(d[:100], label='signal')
plt.plot(d_[:100], label='regenerated signal')
plt.legend()
plt.figure()
plt.psd(d, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(innovation, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(d_, Fs=raw.info['sfreq'], NFFT=2048, linestyle='--')
plt.legend(('Signal', 'Innovation', 'Regenerated signal'))
plt.show()
| bsd-3-clause |
LennonLab/Micro-Encounter | fig-scripts/OLD-fig-scripts/NewFig2.py | 1 | 10871 | from __future__ import division
import matplotlib.pyplot as plt
import random
from random import shuffle
import pandas as pd
import numpy as np
import os
import sys
import statsmodels.formula.api as smf
from statsmodels.stats.outliers_influence import summary_table
mydir = os.path.expanduser('~/GitHub/Micro-Encounter')
sys.path.append(mydir+'/tools')
mydir2 = os.path.expanduser("~/")
dat = pd.read_csv(mydir + '/results/simulated_data/SimData.csv')
dat = dat.convert_objects(convert_numeric=True).dropna()
color1 = 'm'
color2 = 'steelblue'
color3 = 'goldenrod'
#-------------------------DATA FILTERS------------------------------------------
dat = dat[dat['ResourceComplexityLevel'] == 2]
dat = dat[dat['TrophicComplexityLevel'] == 1]
dat['DormFreq'] = np.log10(dat['MeanDormFreq'])
dat = dat[np.isfinite(dat['DormFreq'])]
dat['Encounters'] = np.log10(dat['MeanEncounter'])
dat = dat[np.isfinite(dat['Encounters'])]
dat['Production'] = np.log10(dat['MeanIndProduction'])
dat = dat[np.isfinite(dat['Production'])]
dat['TotalAbundance'] = np.log10(dat['MeanTotalAbundance'])
dat = dat[np.isfinite(dat['TotalAbundance'])]
dat['ActiveAbundance'] = np.log10(dat['MeanTotalAbundance'] * (1 - dat['MeanDormFreq']))
dat = dat[np.isfinite(dat['ActiveAbundance'])]
#-------------------------------------------------------------------------------
#### plot figure ###############################################################
fs = 8 # fontsize
fig = plt.figure()
dat1 = dat[dat['SpatialComplexityLevel'] == 1]
dat2 = dat[dat['SpatialComplexityLevel'] == 2]
dat3 = dat[dat['SpatialComplexityLevel'] == 3]
label1 = 'White noise'
label2 = 'Aggregated w/ Random walks'
label3 = 'Aggregated w/ chemotaxis'
Xs = []
Ys = []
colors = []
#### PLOT 1 #################################################################
fig.add_subplot(2, 2, 1)
xlab = 'Average encounters, '+'$log$'+r'$_{10}$'
ylab = '% Dormancy, '+'$log$'+r'$_{10}$'
width = 1
f = smf.ols('DormFreq ~ Encounters', dat1).fit()
st, data, ss2 = summary_table(f, alpha=0.05)
mean_ci_low, mean_ci_upp = data[:,4:6].T
x = dat1['Encounters'].tolist()
y = dat1['DormFreq'].tolist()
ylow = mean_ci_low.tolist()
yupp = mean_ci_upp.tolist()
x, y, yupp, ylow = zip(*sorted(zip(x, y, yupp, ylow)))
Xs.extend(x)
Ys.extend(y)
colors.extend([color1]*len(x))
plt.plot(x, ylow, color = color1, alpha = 0.9, lw=width, label=label1)
plt.plot(x, yupp, color = color1, alpha = 0.9, lw=width)
f = smf.ols('DormFreq ~ Encounters', dat2).fit()
st, data, ss2 = summary_table(f, alpha=0.05)
mean_ci_low, mean_ci_upp = data[:,4:6].T
x = dat2['Encounters'].tolist()
y = dat2['DormFreq'].tolist()
ylow = mean_ci_low.tolist()
yupp = mean_ci_upp.tolist()
x, y, yupp, ylow = zip(*sorted(zip(x, y, yupp, ylow)))
Xs.extend(x)
Ys.extend(y)
colors.extend([color2]*len(x))
plt.plot(x, ylow, color = color2, alpha = 0.9, lw=width, label=label2)
plt.plot(x, yupp, color = color2, alpha = 0.9, lw=width)
f = smf.ols('DormFreq ~ Encounters', dat3).fit()
st, data, ss2 = summary_table(f, alpha=0.05)
mean_ci_low, mean_ci_upp = data[:,4:6].T
x = dat3['Encounters'].tolist()
y = dat3['DormFreq'].tolist()
ylow = mean_ci_low.tolist()
yupp = mean_ci_upp.tolist()
x, y, yupp, ylow = zip(*sorted(zip(x, y, yupp, ylow)))
Xs.extend(x)
Ys.extend(y)
colors.extend([color3]*len(x))
plt.plot(x, ylow, color = color3, alpha = 0.9, lw=width, label=label3)
plt.plot(x, yupp, color = color3, alpha = 0.9, lw=width)
indices = range(0, len(Xs))
shuffle(indices)
for i in indices:
plt.scatter(Xs[i], Ys[i], color = colors[i], alpha = 0.6 , s = 5, linewidths = 0.0)
plt.ylabel(ylab, fontsize=fs+5)
plt.xlabel(xlab, fontsize=fs+5)
plt.ylim(-1.0, 0.1)
plt.tick_params(axis='both', which='major', labelsize=fs)
plt.legend(bbox_to_anchor=(-0.04, 1.05, 2.48, .2), loc=10, ncol=3, mode="expand",prop={'size':fs})
#### PLOT 2 ################################
fig.add_subplot(2, 2, 2)
Xs = []
Ys = []
colors = []
xlab = 'Average encounters, '+'$log$'+r'$_{10}$'
ylab = 'Individual production, '+'$log$'+r'$_{10}$'
f = smf.ols('Production ~ Encounters', dat1).fit()
st, data, ss2 = summary_table(f, alpha=0.05)
mean_ci_low, mean_ci_upp = data[:,4:6].T
x = dat1['Encounters'].tolist()
y = dat1['Production'].tolist()
ylow = mean_ci_low.tolist()
yupp = mean_ci_upp.tolist()
x, y, yupp, ylow = zip(*sorted(zip(x, y, yupp, ylow)))
Xs.extend(x)
Ys.extend(y)
colors.extend([color1]*len(x))
plt.scatter(x, y, color = color1, alpha = 0.3 , s = 5, linewidths = 0.0)
plt.plot(x, ylow, color = color1, alpha = 0.9, lw=width, label=label1)
plt.plot(x, yupp, color = color1, alpha = 0.9, lw=width)
f = smf.ols('Production ~ Encounters', dat2).fit()
st, data, ss2 = summary_table(f, alpha=0.05)
mean_ci_low, mean_ci_upp = data[:,4:6].T
x = dat2['Encounters'].tolist()
y = dat2['Production'].tolist()
ylow = mean_ci_low.tolist()
yupp = mean_ci_upp.tolist()
x, y, yupp, ylow = zip(*sorted(zip(x, y, yupp, ylow)))
Xs.extend(x)
Ys.extend(y)
colors.extend([color2]*len(x))
plt.scatter(x, y, color = color2, alpha = 0.3 , s = 5, linewidths = 0.0)
plt.plot(x, ylow, color = color2, alpha = 0.9, lw=width, label=label2)
plt.plot(x, yupp, color = color2, alpha = 0.9, lw=width)
f = smf.ols('Production ~ Encounters', dat3).fit()
st, data, ss2 = summary_table(f, alpha=0.05)
mean_ci_low, mean_ci_upp = data[:,4:6].T
x = dat3['Encounters'].tolist()
y = dat3['Production'].tolist()
ylow = mean_ci_low.tolist()
yupp = mean_ci_upp.tolist()
x, y, yupp, ylow = zip(*sorted(zip(x, y, yupp, ylow)))
Xs.extend(x)
Ys.extend(y)
colors.extend([color3]*len(x))
plt.scatter(x, y, color = color3, alpha = 0.3 , s = 5, linewidths = 0.0)
plt.plot(x, ylow, color = color3, alpha = 0.9, lw=width, label=label3)
plt.plot(x, yupp, color = color3, alpha = 0.9, lw=width)
indices = range(0, len(Xs))
shuffle(indices)
for i in indices:
plt.scatter(Xs[i], Ys[i], color = colors[i], alpha = 0.6 , s = 5, linewidths = 0.0)
plt.ylabel(ylab, fontsize=fs+5)
plt.xlabel(xlab, fontsize=fs+5)
plt.ylim(-2.0, 2.0)
#plt.xlim(0.1, 300)
plt.tick_params(axis='both', which='major', labelsize=fs)
#### PLOT 3 #################################################################
fig.add_subplot(2, 2, 3)
Xs = []
Ys = []
colors = []
xlab = 'Average encounters, '+'$log$'+r'$_{10}$'
ylab = 'Total abundance, '+'$log$'+r'$_{10}$'
f = smf.ols('TotalAbundance ~ Encounters', dat1).fit()
st, data, ss2 = summary_table(f, alpha=0.05)
mean_ci_low, mean_ci_upp = data[:,4:6].T
x = dat1['Encounters'].tolist()
y = dat1['TotalAbundance'].tolist()
ylow = mean_ci_low.tolist()
yupp = mean_ci_upp.tolist()
x, y, yupp, ylow = zip(*sorted(zip(x, y, yupp, ylow)))
Xs.extend(x)
Ys.extend(y)
colors.extend([color1]*len(x))
plt.scatter(x, y, color = color1, alpha = 0.3 , s = 5, linewidths = 0.0)
plt.plot(x, ylow, color = color1, alpha = 0.9, lw=width, label=label1)
plt.plot(x, yupp, color = color1, alpha = 0.9, lw=width)
f = smf.ols('TotalAbundance ~ Encounters', dat2).fit()
st, data, ss2 = summary_table(f, alpha=0.05)
mean_ci_low, mean_ci_upp = data[:,4:6].T
x = dat2['Encounters'].tolist()
y = dat2['TotalAbundance'].tolist()
ylow = mean_ci_low.tolist()
yupp = mean_ci_upp.tolist()
x, y, yupp, ylow = zip(*sorted(zip(x, y, yupp, ylow)))
Xs.extend(x)
Ys.extend(y)
colors.extend([color2]*len(x))
plt.scatter(x, y, color = color2, alpha = 0.3 , s = 5, linewidths = 0.0)
plt.plot(x, ylow, color = color2, alpha = 0.9, lw=width, label=label2)
plt.plot(x, yupp, color = color2, alpha = 0.9, lw=width)
f = smf.ols('TotalAbundance ~ Encounters', dat3).fit()
st, data, ss2 = summary_table(f, alpha=0.05)
mean_ci_low, mean_ci_upp = data[:,4:6].T
x = dat3['Encounters'].tolist()
y = dat3['TotalAbundance'].tolist()
ylow = mean_ci_low.tolist()
yupp = mean_ci_upp.tolist()
x, y, yupp, ylow = zip(*sorted(zip(x, y, yupp, ylow)))
Xs.extend(x)
Ys.extend(y)
colors.extend([color3]*len(x))
plt.scatter(x, y, color = color3, alpha = 0.3 , s = 5, linewidths = 0.0)
plt.plot(x, ylow, color = color3, alpha = 0.9, lw=width, label=label3)
plt.plot(x, yupp, color = color3, alpha = 0.9, lw=width)
indices = range(0, len(Xs))
shuffle(indices)
for i in indices:
plt.scatter(Xs[i], Ys[i], color = colors[i], alpha = 0.6 , s = 5, linewidths = 0.0)
plt.ylabel(ylab, fontsize=fs+5)
plt.xlabel(xlab, fontsize=fs+5)
#plt.xlim(0.15, 300)
plt.ylim(0.5, 3.1)
plt.tick_params(axis='both', which='major', labelsize=fs)
#### PLOT 4 #################################################################
fig.add_subplot(2, 2, 4)
Xs = []
Ys = []
colors = []
xlab = 'Average encounters, '+'$log$'+r'$_{10}$'
ylab = 'Active abundance, '+'$log$'+r'$_{10}$'
f = smf.ols('ActiveAbundance ~ Encounters', dat1).fit()
st, data, ss2 = summary_table(f, alpha=0.05)
mean_ci_low, mean_ci_upp = data[:,4:6].T
x = dat1['Encounters'].tolist()
y = dat1['ActiveAbundance'].tolist()
ylow = mean_ci_low.tolist()
yupp = mean_ci_upp.tolist()
x, y, yupp, ylow = zip(*sorted(zip(x, y, yupp, ylow)))
Xs.extend(x)
Ys.extend(y)
colors.extend([color1]*len(x))
plt.scatter(x, y, color = color1, alpha = 0.3 , s = 5, linewidths = 0.0)
plt.plot(x, ylow, color = color1, alpha = 0.9, lw=width, label=label1)
plt.plot(x, yupp, color = color1, alpha = 0.9, lw=width)
f = smf.ols('ActiveAbundance ~ Encounters', dat2).fit()
st, data, ss2 = summary_table(f, alpha=0.05)
mean_ci_low, mean_ci_upp = data[:,4:6].T
x = dat2['Encounters'].tolist()
y = dat2['ActiveAbundance'].tolist()
ylow = mean_ci_low.tolist()
yupp = mean_ci_upp.tolist()
x, y, yupp, ylow = zip(*sorted(zip(x, y, yupp, ylow)))
Xs.extend(x)
Ys.extend(y)
colors.extend([color2]*len(x))
plt.scatter(x, y, color = color2, alpha = 0.3 , s = 5, linewidths = 0.0)
plt.plot(x, ylow, color = color2, alpha = 0.9, lw=width, label=label2)
plt.plot(x, yupp, color = color2, alpha = 0.9, lw=width)
f = smf.ols('ActiveAbundance ~ Encounters', dat3).fit()
st, data, ss2 = summary_table(f, alpha=0.05)
mean_ci_low, mean_ci_upp = data[:,4:6].T
x = dat3['Encounters'].tolist()
y = dat3['ActiveAbundance'].tolist()
ylow = mean_ci_low.tolist()
yupp = mean_ci_upp.tolist()
x, y, yupp, ylow = zip(*sorted(zip(x, y, yupp, ylow)))
Xs.extend(x)
Ys.extend(y)
colors.extend([color3]*len(x))
plt.scatter(x, y, color = color3, alpha = 0.3 , s = 5, linewidths = 0.0)
plt.plot(x, ylow, color = color3, alpha = 0.9, lw=width, label=label3)
plt.plot(x, yupp, color = color3, alpha = 0.9, lw=width)
indices = range(0, len(Xs))
shuffle(indices)
for i in indices:
plt.scatter(Xs[i], Ys[i], color = colors[i], alpha = 0.6 , s = 5, linewidths = 0.0)
plt.ylabel(ylab, fontsize=fs+5)
plt.xlabel(xlab, fontsize=fs+5)
#plt.xlim(0.15, 1000)
plt.ylim(-0.5, 3.1)
plt.tick_params(axis='both', which='major', labelsize=fs)
#### Final Format and Save #####################################################
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.savefig(mydir + '/results/figures/Fig2-Spatial_RC2-TC1.png', dpi=600, bbox_inches = "tight")
#plt.show()
| gpl-3.0 |
chengsoonong/crowdastro | crowdastro/active_learning/sampler.py | 1 | 2545 | """Active learning sampler base class.
Pool-based. Binary class labels.
Matthew Alger
The Australian National University
2016
"""
import numpy
import sklearn.metrics
from crowdastro.crowd.util import balanced_accuracy
class Sampler(object):
"""Pool-based active learning base class."""
def __init__(self, pool, labels, Classifier, classifier_params=None):
"""
pool: (n_samples, n_features) array of partially labelled data points.
labels: (n_samples,) masked array of binary labels.
classifier: Binary classifier class implementing a sklearn interface.
classifier_params: Parameters to pass to Classifier. Default None.
"""
self.pool = pool
self.labels = labels
self.Classifier = Classifier
self.classifier_params = classifier_params or {}
self.train()
def sample_index(self):
"""Finds index of the unlabelled point to sample."""
raise NotImplementedError()
def sample_indices(self, n):
"""Finds indices of the top n unlabelled points to sample."""
raise NotImplementedError()
def add_label(self, index, label, retrain=True):
"""Adds a label from an oracle.
index: Index of data point to label.
label: Label from the oracle.
"""
self.labels[index] = label
if retrain:
self.retrain()
def add_labels(self, indices, labels, retrain=True):
"""Adds labels from an oracle.
indices: Indices of data points to label.
labels: Labels from the oracle.
"""
for index, label in zip(indices, labels):
self.add_label(index, label, retrain=False)
if retrain:
self.retrain()
def train(self):
"""Trains the classifier."""
self.classifier = self.Classifier(**self.classifier_params)
self.classifier.fit(self.pool[~self.labels.mask],
self.labels[~self.labels.mask])
def retrain(self):
"""Retrains the classifier."""
# TODO(MatthewJA): Not sure if we should use warm starts here, so for
# now I won't.
self.train()
def score(self, test_xs, test_ts):
"""Finds cross-entropy error on test data."""
return sklearn.metrics.log_loss(
test_ts,
self.classifier.predict(test_xs))
def ba(self, test_xs, test_ts):
"""Finds balanced accuracy on test data."""
return balanced_accuracy(test_ts, self.classifier.predict(test_xs))
| mit |
andretadeu/jhu-immuno | code/logRegrPropImportance.py | 1 | 2462 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 22 13:49:37 2015
@author: brian
"""
import pandas as pd
props = pd.read_csv('../data/peptide_9_props.csv')
immun = pd.read_excel('../input/journal.pcbi.1003266.s001-2.XLS')
# understanding the apply method
immun['length'] = immun.Peptide.apply(len)
immun = immun[immun.length ==9]
both = pd.merge(props, immun, left_on='PEPTIDE', right_on='Peptide')
train = both
train['y']=train.Immunogenicity.map({'non-immunogenic':0, 'immunogenic':1})
# dum2=pd.get_dummies(train.MHC)
# pd.concat(dum2,train)
train = train.drop(['Immunogenicity','PEPTIDE','Peptide','Species','MHC','Unnamed: 4897'],axis=1)
X = train.drop(['y'],axis=1)
y = train.y
# TASK 3: split the data into training and testing sets
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
# TASK 4: fit a logistic regression model and examine the coefficients
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression(C=1e1,penalty='l1')
logreg.fit(X_train, y_train)
propImp =zip(X.columns, logreg.coef_[0])
pd.DataFrame(propImp).to_clipboard()
from sklearn import metrics
# predict and calculate AUC
y_pred_prob = logreg.predict_proba(X_test)[:, 1]
y_pred_class = logreg.predict(X_test)
print metrics.roc_auc_score(y_test, y_pred_prob)
# 0.858192133098 with Tdf vectorizer and stop_words='english',ngram_range=(1, 2), max_features=1000
# 0.846367492831 adding 'OL_GENERAL','INSITE_PAGE','BROWSER_OS','BROWSER2'
# 0.846423953016 ngram_range=(1,3)
# 0.810 max_feature 100
# 0.8399 rang=(1,4) max features 2000
print metrics.accuracy_score(y_test, y_pred_class)
# 74.7% with CountVectorizer()
# 65%
# 76% with TfidfVectorizer(stop_words='english',ngram_range=(1, 2), max_features=1000)
confusion = metrics.confusion_matrix(y_test, y_pred_class)
print confusion
sensitivity =float(confusion[1,1])/(confusion[1,0] + confusion[1,1])
print 'sensitivity is %f' % sensitivity
# sensitivity is 50, 56% with CountVectorizer()
#Q why is sensitivity .09 with tdf if
specificity =float(confusion[0,0])/(confusion[0,0] + confusion[0,1])
print 'specificity is %f' % specificity
import matplotlib.pyplot as plt
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_prob)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate (1 - Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.plot(fpr, tpr)
| mit |
galactics/space-api | doc/source/_static/hohmann.py | 2 | 2636 |
"""Example of Hohmann transfer
The orbit we are starting with is a Tle of the ISS. The amplitude of the maneuver is greatly
exagerated regarding the ISS's capability, but has the convenience to be particularly visual.
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from beyond.io.tle import Tle
from beyond.dates import timedelta
from beyond.propagators.keplernum import KeplerNum
from beyond.env.solarsystem import get_body
from beyond.orbits.man import ImpulsiveMan
from beyond.propagators.listeners import ApsideListener, find_event
orb = Tle("""ISS (ZARYA)
1 25544U 98067A 18124.55610684 .00001524 00000-0 30197-4 0 9997
2 25544 51.6421 236.2139 0003381 47.8509 47.6767 15.54198229111731""").orbit()
start = orb.date
stop = timedelta(minutes=300)
step = timedelta(seconds=60)
# Changing the propagator to Keplerian, as SGP4 is not able to perform maneuvers
orb.propagator = KeplerNum(step, bodies=get_body("Earth"))
# Research for the next perigee
perigee = find_event(orb.iter(stop=stop, listeners=ApsideListener()), 'Periapsis')
man1 = ImpulsiveMan(perigee.date, (280, 0, 0), frame="TNW")
orb.maneuvers = [man1]
dates1, alt1 = [], []
# Research for the next apogee after the first maneuver
apogee = find_event(orb.iter(start=perigee.date - step * 10, stop=stop, listeners=ApsideListener()), 'Apoapsis')
# apogee = find_event(orb.iter(stop=stop, listeners=ApsideListener()), 'Apoapsis', offset=1)
# Adding the second maneuver to the orbit
man2 = ImpulsiveMan(apogee.date, (270, 0, 0), frame="TNW")
orb.maneuvers.append(man2)
print(man1.date)
print(man2.date)
# Propagation throught the two maneuvers
ephem = orb.ephem(start=start, stop=stop, step=step)
# graphs
plt.figure()
data = np.array(ephem)
dates = [x.date for x in ephem]
# Altitude in km
alt = (np.linalg.norm(data[:, :3], axis=1) - orb.frame.center.body.r) / 1000
events_dates = [perigee.date, apogee.date]
events_alt = (np.linalg.norm([perigee[:3], apogee[:3]], axis=1) - orb.frame.center.body.r) / 1000
plt.plot(dates, alt)
plt.plot([events_dates[0]], [events_alt[0]], 'ro', label="perigee")
plt.plot([events_dates[1]], [events_alt[1]], 'ko', label="apogee")
plt.ylabel("altitude (km)")
plt.legend()
plt.grid(linestyle=':', alpha=0.4)
plt.tight_layout()
fig = plt.figure()
ax = plt.gca(projection='3d')
ax.view_init(elev=52, azim=140)
x, y, z = zip(perigee[:3], apogee[:3])
plt.plot(data[:, 0], data[:, 1], data[:, 2])
plt.plot([perigee[0]], [perigee[1]], [perigee[2]], 'ro')
plt.plot([apogee[0]], [apogee[1]], [apogee[2]], 'ko')
if "no-display" not in sys.argv:
plt.show()
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.