repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
vladimiroff/humble-media | humblemedia/causes/tests.py | 1 | 2783 | import os
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase, client
from .models import Cause
class CauseTest(TestCase):
def setUp(self):
self.client = client.Client()
self.user = User.objects.create_user('panda', '[email protected]', 'lovebamboo')
self.bad_user = User.objects.create_user('redpanda', '[email protected]', 'redhearts')
self.valid_record = {
'title': 'Save the pandas',
'description': 'I want to save them all',
'creator': self.user,
'target': 50,
'is_published': True,
'tags': ['big', 'white', 'black', 'fluffy']
}
def test_logged_user_add_cause(self):
self.client.login(username='panda', password='lovebamboo')
before_add = Cause.objects.count()
response = self.client.post('/causes/add/', self.valid_record)
after_add = Cause.objects.count()
self.assertEqual(before_add + 1, after_add)
def test_not_logged_user_add_cause(self):
before_add = Cause.objects.count()
response = self.client.post('/causes/add/', self.valid_record)
after_add = Cause.objects.count()
self.assertEqual(before_add, after_add)
def test_admin_edit_cause(self):
self.client.login(username='panda', password='lovebamboo')
self.client.post('/causes/add/', self.valid_record)
cause = Cause.objects.filter(title='Save the pandas')[0]
self.valid_record['description'] += " now"
response = self.client.post(
'/causes/{}/edit/'.format(cause.pk),
self.valid_record, follow=True)
edit_project = Cause.objects.filter(title='Save the pandas')[0]
self.assertEqual(edit_project.description, 'I want to save them all now')
def test_not_admin_edit_cause(self):
self.client.login(username='redpanda', password='redhearts')
self.client.post('/causes/add/', self.valid_record)
cause = Cause.objects.filter(title='Save the pandas')[0]
response = self.client.post('/causes/{}/edit/'.format(cause.pk), self.valid_record)
edit_project = Cause.objects.filter(title='Save the pandas')[0]
self.assertEqual(edit_project.description, 'I want to save them all')
def test_admin_delete_cause(self):
self.client.login(username='panda', password='lovebamboo')
self.client.post('/causes/add/', self.valid_record)
cause = Cause.objects.filter(title='Save the pandas')[0]
before_delete = Cause.objects.count()
response = self.client.post('/causes/{}/delete/'.format(cause.pk))
after_delete = Cause.objects.count()
self.assertEqual(before_delete - 1, after_delete)
| mit |
trichter/sito | bin/codacorr.py | 1 | 5149 | #!/usr/bin/env python
# by TR
#from glob import glob
#from miic.core.stretch_mod import stretch_mat_creation, velocity_change_estimete, time_windows_creation
#from sito import util
#from sito.stream import Stream, read
#from sito.trace import Trace
#from sito.util.imaging import getDataWindow
#from sito.util.main import daygen, streamyeargen2, streamdaygen, timegen, streamtimegen, \
# yeargen
#from sito.xcorr import xcorrf, timeNorm
#import logging
#import matplotlib.pyplot as plt
#import numpy as np
#import os.path
#import warnings
#from obspy.core.util.decorator import deprecated
#import itertools
import logging
from sito import Stream, read
from obspy.core.event import readEvents
from sito.data import IPOC
from obspy.core.util.attribdict import AttribDict
import numpy as np
import os.path
import glob
from progressbar import ProgressBar
log = logging.getLogger(__name__)
data = IPOC()
def get_event_id(expr):
if '/' in expr:
expr = expr.split('/', 1)[1]
expr = expr.replace('NLL.', '').replace('Origin#', '')
return expr
def cut_events(in_, out):
print 'read events...'
catalog = readEvents(in_, 'QUAKEML')
print 'cut events...'
for event in ProgressBar()(catalog):
oid = get_event_id(event.origins[0].resource_id.getQuakeMLURI())
ori = event.origins[0]
etime = ori.time
#print 'Select', event
st = Stream()
for arrival in ori.arrivals:
arrival.pick_id.convertIDToQuakeMLURI()
pick = arrival.pick_id.getReferredObject()
if not pick:
print 'FAIL to get pick from arrival'
continue
ptime = pick.time
seed_id = pick.waveform_id.getSEEDString()
try:
st1 = Stream(data.client.getWaveform(*(seed_id.split('.') + [ptime - 50, ptime + 250])))
except Exception as ex:
print '%s for %s' % (ex, seed_id)
continue
st1.merge()
#print 'load %s %s %.1f' % (seed_id, pick.phase_hint, ptime - etime)
st1[0].stats['event'] = AttribDict(
id=event.resource_id.resource_id,
origin_id=oid,
etime=etime, ptime=ptime,
lat=ori.latitude, lon=ori.longitude,
depth=ori.depth, rms=ori.quality.standard_error,
mag=event.magnitudes[0].mag)
st += st1
st.write(out % oid, 'Q')
def acorr(in_, out, tw, filter_):
print 'acorr events'
for fname in ProgressBar()(glob.glob(in_)):
st1 = read(fname)
st1.setHI('filter', '')
st1.filter2(*filter_)
for tr in st1:
etime = tr.stats.event.etime
ptime = tr.stats.event.ptime
stime = tr.stats.starttime
start_corr = etime + 2.2 * (ptime - etime)
end_corr = start_corr + tw
data_before = tr.slice(stime + 10, ptime - 10).data
rms_before = np.sqrt(np.sum(data_before ** 2) / len(data_before))
data_after = tr.slice(start_corr, end_corr).data
rms_after = np.sqrt(np.sum(data_after ** 2) / len(data_after))
#if rms_after < rms_before * 1.5:
# continue
tr.stats.event.rms_ratio = rms_after / rms_before
tr.trim(start_corr, end_corr)
tr.timeNorm('runningmean', 20)
tr.taper(p=5. / (end_corr - start_corr))
tr.addZeros(tw)
tr.acorr(shift=tw)
ofname = os.path.splitext(os.path.basename(fname))[0]
st1.write(out % ofname, 'Q')
def stack(in_, out):
print 'loading files for stacking...'
st_sum = Stream()
st1 = read(in_)
print 'stack traces...'
for station in ProgressBar()('PB01 PB02 PB03 PB04 PB05 PB06 PB07 PB08 PB09 PB10 PB11 PB12 PB13 PB14 PB15 HMBCX MNMCX PATCX PSGCX'.split()):
#st2 = st1.select(station=station, expr='st.event.rms_ratio > 1.5 and st.sampling_rate>40 and -25<st.event.lat<-18 and -67.8<st.event.lon<-66.0')
st2 = st1.select(station=station, expr='st.event.rms_ratio > 1.5 and st.sampling_rate>40')
if len(st2) == 0:
print 'No traces for station %s' % station
continue
tr = st2.simpleStack()
tr.stats.label = station
st_sum += tr
st_sum.write(out, 'Q')
if __name__ == '__main__':
EVENTS = '/home/richter/Data/picks/seiscomp_quakeml/filter/R?_mag>3.xml'
OUT = '/home/richter/Data/IPOC/local_events/2011_mag>3/%s'
#cut_events(EVENTS, OUT)
IN = '/home/richter/Data/IPOC/local_events/2011_mag>3/*.QHD'
OUT = '/home/richter/Data/IPOC/local_events/acorr/2011_mag>3_2Hz/%s'
#acorr(IN, OUT, 50, (2, None))
IN = '/home/richter/Data/IPOC/local_events/acorr/2011_mag>3_2Hz/*.QHD'
OUT = '/home/richter/Data/IPOC/local_events/acorr/2011_mag>3_2Hz_stacked'
stack(IN, OUT)
#plotting:
#ms_s.plot_(annotate=True, start=45, figtitle='loacal coda acorr >2Hz', plotinfo=['count'])
| mit |
wei-Z/Python-Machine-Learning | self_practice/Chapter 5 Principal Component Analysis.py | 1 | 6721 | # Chapter 5 Compressing Data via Dimensionality Reduction
# Principal Components Analysis
# Total and explained variance
'''
First, we will start by loading the Wine dataset that we have been working with
in Chapter 4'''
import pandas as pd
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
'''
Next, we will process the Wine data into separate training and test sets using 70
percent and 30 percent of the data, respectively - and standardize it to unit variance'''
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.3, random_state=0)
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
'''
We will use the linalg.eig function from Numpy to obtain the eigenpairs of the Wine
covariance matrix'''
import numpy as np
cov_mat = np.cov(X_train_std.T)
eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)
print '\nEigenvalues \n%s' % eigen_vals
'''
Using the numpy.cov function, we computed the covariance matrix of the
standardized training dataset. Using the linalg.eig function, we performed the
eigendecomposition that yielded a vector (eigen_vals) consisting of 13 eigenvalues
and the corresponding eigenvectors stored as columns in a 13x13 -dimensional
matrix (eigen_vecs).
'''
'''
Since we want to reduce the dimensionality of our dataset by compressing it onto
a new feature subspace, we only select the subset of the eigenvectors(principle
components) that contains most of the information (variance). Since the eigenvalues
define the magitude of the eigenvectors, we have to sort the eigenvalues by
decreasing magnitude; we are interested in the top k eigenvectors based on the
values of their corresponding eigenvalues.
But before we collect those k most informative eigenvectors, let's plot the variance
explained ratios of the eigenvalues
'''
'''
Using the NumPy cumsum function, we can then calculate the cumulative sum of
explained variances, which we will plot via matplotlib's step function: '''
tot = sum(eigen_vals)
var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)]
cum_var_exp = np.cumsum(var_exp)
import matplotlib.pyplot as plt
plt.bar(range(1, 14), var_exp, alpha=0.5, align='center', label='individual explained variance')
plt.step(range(1,14), cum_var_exp, where='mid', label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.show()
'''
The resulting plot indicates that the first principal component alone accounts for
40 percent of the variance. Also, we can see that the first two principal components
combined explain almost 60 percent of the variance in the data:
'''
# Feature transformation
# We start by sorting the eigenpairs by decreasing order of the eigenvalues:
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i]) for i in range(len(eigen_vals))]
eigen_pairs.sort(reverse=True)
w = np.hstack((eigen_pairs[0][1][:, np.newaxis], # eigen_pairs[0][1].shape =(13,)
eigen_pairs[1][1][:, np.newaxis])) # eigen_pairs[0][1][:, np.newaxis].shape= (13, 1)
print 'Matrix W: \n', w
'''
By executing the preceding code, we have created a 13x2-dimensional projection
matrix w from the top two eigenvectors. Using the projection matrix, we can now
transform a sample x (represented as 1x13-dimensional row vector) onto the PCA
subspace obraining x', a now two-dimensional sample vector consisting of two new
features: x' = xW
'''
X_train_std[0].dot(w) # 1x13 13x2 = 1x2
# similarly, we can transform the entire 124 x 13-dimensional training dataset onto the two principal components
# by calculating the matrix dot product:
X_train_pca = X_train_std.dot(w) # 124x13 13x2 = 124x2
'''
Lastly, let's visualize the transformed Wine training set, now stored as an
124 x 2-dimensional matrix, in a two-dimensional scatterplot:
'''
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_pca[y_train==l, 0], X_train_pca[y_train==l, 1],c=c, label=1, marker = m)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
# Principal component analysis in scikit-learn
from Plot_Decision_Regions import plot_decision_regions
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
pca = PCA(n_components=2) # Number of components to keep. if n_components is not set all components are kept
lr = LogisticRegression()
X_train_pca = pca.fit_transform(X_train_std)
X_test_pca = pca.transform(X_test_std)
lr.fit(X_train_pca, y_train)
plot_decision_regions(X_train_pca, y_train, classifier=lr)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend(loc='lower left')
plt.show()
'''
Let's plot the decision regions of the logistic regression on the transformed test
dataset to see if it can separate the classes well.
'''
plot_decision_regions(X_test_pca, y_test, classifier=lr)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend(loc='lower left')
plt.show()
'''
If we are interested in the explained variance ratios of the different principal
components, we can simply intialize the PCA class with the n_components parameter
set to None, so all principal components are kept and the explained variance ratio can
then be accessed via the explained _variance_ratio_ attribute
'''
pca = PCA(n_components=None)
X_train_pca = pca.fit_transform(X_train_std)
pca.explained_variance_ratio_
'''
From Wikipedia:
The eigendecomposition of a symmetric positive semidefinite (PSD) matrix yields an
orthogonal basis of eigenvectors, each of which has a nonnegative eigenvalue. The
orthogonal decomposition of a PSD matrix is used in multivariate analysis, where the
sample covariance matrices are PSD. This orthogonal decomposition is called principal
components analysis (PCA) in statistics. PCA studies linear relations among variables.
PCA is performed on the covariance matrix or the correlation matrix (in which each
variable is scaled to have its sample variance equal to one). For the covariance or
correlation matrix, the eigenvectors correspond to principal components and the
eigenvalues to the variance explained by the principal components. Principal
component analysis of the correlation matrix provides an orthonormal eigen-basis for
the space of the observed data: In this basis, the largest eigenvalues correspond to
the principal components that are associated with most of the covariability among a
number of observed data.
'''
| mit |
soulmachine/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 15 | 2130 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 4))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i+1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree %d" % degrees[i])
plt.show()
| bsd-3-clause |
russel1237/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
RachitKansal/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
spillai/procgraph | src/procgraph_mpl/plot_generic.py | 1 | 2727 | import warnings
from numpy.ma.testutils import assert_equal
__all__ = ['PlotGeneric']
class PlotGeneric(object):
'''
Produces images using matplotlib. Good for custom animation.
'''
def __init__(self, width, height, transparent, tight, keep=False):
self.width = width
self.height = height
self.transparent = transparent
self.tight = tight
self.keep = keep
self.figure = None
self.warned = False
def init_figure(self):
# TODO: remove from here
from . import pylab, pylab2rgb
pylab.rc('xtick', labelsize=8)
pylab.rc('ytick', labelsize=8)
''' Creates figure object and axes '''
self.figure = pylab.figure(frameon=False,
figsize=(self.width / 100.0,
self.height / 100.0))
self.axes = pylab.axes()
self.figure.add_axes(self.axes)
pylab.draw_if_interactive = lambda: None
pylab.figure(self.figure.number)
def get_rgb(self, function):
""" function(pylab) """
if self.figure is None:
self.init_figure()
from . import pylab, pylab2rgb
pylab.figure(self.figure.number)
function(pylab)
# http://matplotlib.sourceforge.net/users/tight_layout_guide.html
try:
pylab.tight_layout()
except Exception as e:
msg = ('Could not call tight_layout(); available only on '
'Matplotlib >=1.1 (%s)' % e)
if not self.warned:
warnings.warn(msg)
self.warned = True
# There is a bug that makes the image smaller than desired
# if tight is True
pixel_data = pylab2rgb(transparent=self.transparent, tight=self.tight)
from procgraph_images import image_pad # need here otherwise circular
# So we check and compensate
shape = pixel_data.shape[0:2]
shape_expected = (self.height, self.width)
if shape != shape_expected:
msg = ('pylab2rgb() returned size %s instead of %s.' %
(shape, shape_expected))
msg += ' I will pad the image with white.'
warnings.warn(msg)
pixel_data = image_pad(pixel_data, shape_expected,
bgcolor=[1, 1, 1])
assert_equal(pixel_data.shape[0:2], shape_expected)
if not self.keep:
pylab.close(self.figure.number)
self.figure = None
else:
pass
# pylab.cla()
# pylab.clf()
return pixel_data
| lgpl-3.0 |
jungla/ICOM-fluidity-toolbox | Detectors/offline_advection/plot_Richardson_2D_const_both_fast.py | 1 | 9057 | #!~/python
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
import myfun
import numpy as np
import lagrangian_stats
import csv
import advect_functions
# read offline
print 'reading particles'
exp = 'm_25_1b'
label = 'm_25_1b'
filename2D_BW = 'RD_2D_m_25_2b_particles_big.csv'
filename2D_B = 'RD_2D_m_25_1b_particles_big.csv'
tt_BW = 500 # IC + 24-48 included
tt_B = 500 # IC + 24-48 included
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = 1.*np.cumsum(dl)
depths = [5, 10, 15]
depthid = [1, 2, 3]
nl = len(depths)
RD_2D_BW = [] #np.zeros((tt_BW,nl))
time2D_BW = []
RD_2D_B = [] #np.zeros((tt_B,nl))
time2D_B = []
with open(filename2D_B, 'r') as csvfile:
spamreader = csv.reader(csvfile)
spamreader.next()
for row in spamreader:
time2D_B.append(row[0])
RD_2D_B.append(row[1:])
with open(filename2D_BW, 'r') as csvfile:
spamreader = csv.reader(csvfile)
spamreader.next()
for row in spamreader:
time2D_BW.append(row[0])
RD_2D_BW.append(row[1:])
time2D_B = np.asarray(time2D_B).astype(float)
RD_2D_B = np.asarray(RD_2D_B).astype(float)
time2D_BW = np.asarray(time2D_BW).astype(float)
RD_2D_BW = np.asarray(RD_2D_BW).astype(float)
time = time2D_BW[:-1]
# cut particles to time of interest
timeD = np.asarray(range(0,3*86400,1440))
vtime = time - time[0]
# read 3D eps and get eps at particle's location
drateD_BW = np.zeros((len(timeD),len(Zlist)))
drateD_B = np.zeros((len(timeD),len(Zlist)))
for t in range(len(timeD)):
# print 'read drate', t
with open('../../2D/U/drate_3day/z/drate_m_25_2b_particles_'+str(t)+'_z.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
drateD_BW[t,:] = row[:]
for t in range(len(timeD)):
# print 'read drate', t
with open('../../2D/U/drate_3day/z/drate_m_25_1b_particles_'+str(t)+'_z.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
drateD_B[t,:] = row[:]
# test drate
#plt.contourf(timeD/86400.,Zlist,np.log10(np.rot90(drateD_B)),30)
#plt.colorbar()
#plt.savefig('./plot/'+label+'/drate_B_'+label+'.eps')
#plt.close()
#
#plt.contourf((timeD)/3600.+48,Zlist,np.log10(np.rot90(drateD_BW)),30)
#plt.colorbar()
#plt.savefig('./plot/'+label+'/drate_BW_'+label+'.eps')
#print './plot/'+label+'/drate_BW_'+label+'.eps'
#plt.close()
# normalized RD
fig = plt.figure(figsize=(8, 5))
R2D5_B, = plt.plot(np.log10(time2D_B[:]/3600.),np.log10(RD_2D_B[:,0]/time2D_B[:]**3),'b',linewidth=1)
R2D10_B, = plt.plot(np.log10(time2D_B[:]/3600.),np.log10(RD_2D_B[:,1]/time2D_B[:]**3),'b--',linewidth=1)
R2D15_B, = plt.plot(np.log10(time2D_B[:]/3600.),np.log10(RD_2D_B[:,2]/time2D_B[:]**3),'b-.',linewidth=1)
R2D5_BW, = plt.plot(np.log10(time2D_BW[:]/3600.),np.log10(RD_2D_BW[:,0]/time2D_BW[:]**3),'k',linewidth=1)
R2D10_BW, = plt.plot(np.log10(time2D_BW[:]/3600.),np.log10(RD_2D_BW[:,1]/time2D_BW[:]**3),'k--',linewidth=1)
R2D15_BW, = plt.plot(np.log10(time2D_BW[:]/3600.),np.log10(RD_2D_BW[:,2]/time2D_BW[:]**3),'k-.',linewidth=1)
#B
#intm = 0.3*86400; intM = 1.5*86400; interval = (vtime > intm) * (vtime < intM)
#R2D5_B, = plt.plot(time2D_B[interval],RD_2D_B[interval,0]/time2D_B[interval]**3,'b',linewidth=3.5)
#
#intm = 0.4*86400; intM = 3*86400; interval = (vtime > intm) * (vtime < intM)
#R2D10_B, = plt.plot(time2D_B[interval],RD_2D_B[interval,1]/time2D_B[interval]**3,'b--',linewidth=3.5)
#
#intm = 0.5*86400; intM = 3*86400; interval = (vtime > intm) * (vtime < intM)
#R2D15_B, = plt.plot(time2D_B[interval],RD_2D_B[interval,2]/time2D_B[interval]**3,'b-.',linewidth=3.5)
intm = 0.3*86400; intM = 2.5*86400; interval = (vtime > intm) * (vtime < intM)
R2D5_BW, = plt.plot(np.log10(time2D_BW[interval]/3600.),np.log10(RD_2D_BW[interval,0]/time2D_BW[interval]**3),'k',linewidth=3.5)
intm = 0.4*86400; intM = 3*86400; interval = (vtime > intm) * (vtime < intM)
R2D10_BW, = plt.plot(np.log10(time2D_BW[interval]/3600.),np.log10(RD_2D_BW[interval,1]/time2D_BW[interval]**3),'k--',linewidth=3.5)
intm = 0.45*86400; intM = 4*86400; interval = (vtime > intm) * (vtime < intM)
R2D15_BW, = plt.plot(np.log10(time2D_BW[interval]/3600.),np.log10(RD_2D_BW[interval,2]/time2D_BW[interval]**3),'k-.',linewidth=3.5)
#plt.legend((R2D5_BW,R2D10_BW,R2D15_BW,R2D5_B,R2D10_B,R2D15_B),('$BW25_m$ 5m','$BW25_m$ 10m','$BW25_m$ 15m','$B25_m$ 5m','$B25_m$ 10m','$B25_m$ 15m'), loc=1,fontsize=16,ncol=2)
plt.legend((R2D5_BW,R2D10_BW,R2D15_BW),('5m','10m','15m'), loc=1,fontsize=16,ncol=3)
plt.xlabel('Time $[hr]$',fontsize=20)
plt.ylabel('$log(\sigma^2_D t^{-3})$ ',fontsize=20)
plt.yticks(fontsize=16)
plt.ylim()
#ind = [0.,12.,24.,36.,48.,60.,72.,84.,96.,108.,120.,132.,144.,156.,168.,180.,192.]
ind = np.asarray([0.,12.,24.,48.,96.,192.])
#ind = np.linspace(0,24*8,7)
ind[0] = 1440/3600.
vind = np.log10(ind);# vind[0]=np.log10(1440/3600.)
plt.xticks(vind,['48.4','60','72','96','144','240'],fontsize=16)
plt.tight_layout()
plt.savefig('./plot/'+label+'/RDt3_2_BW_'+label+'.eps')
print './plot/'+label+'/RDt3_2_BW_'+label+'.eps'
plt.close()
# Rich 2D-3D
#fig = plt.figure(figsize=(8, 5))
fig, ax1 = plt.subplots(figsize=(8, 5))
#intm = 0.3*86400; intM = 1.5*86400; interval = (vtime > intm) * (vtime < intM)
#Rich = RD_2D_B[interval,0]/time2D_B[interval]**3/(drateD_B[interval,depths[0]])
#print '2D 5m: mean', np.mean(Rich), 'std', np.std(Rich)
#R2D5_B, = plt.plot(time2D_B[interval]/3600+48.,Rich,'b.',linewidth=2)
#
#intm = 0.4*86400; intM = 3*86400; interval = (vtime > intm) * (vtime < intM)
#Rich = RD_2D_B[interval,1]/time2D_B[interval]**3/(drateD_B[interval,depths[1]])
#print '2D 10m: mean', np.mean(Rich), 'std', np.std(Rich)
#R2D10_B, = plt.plot(time2D_B[interval]/3600+48.,Rich,'b--',linewidth=2)
#
#intm = 0.5*86400; intM = 3*86400; interval = (vtime > intm) * (vtime < intM)
#Rich = RD_2D_B[interval,2]/time2D_B[interval]**3/(drateD_B[interval,depths[2]])
#print '2D 15m: mean', np.mean(Rich), 'std', np.std(Rich)
#R2D15_B, = plt.plot(time2D_B[interval]/3600+48.,Rich,'b',linewidth=2)
#
# BW
intm = 0.3*86400; intM = 2.5*86400; interval = (vtime > intm) * (vtime < intM)
Rich = RD_2D_BW[interval,0]/time2D_BW[interval]**3/(drateD_BW[interval,depths[0]])
print 'Rich 2D 5m: mean', np.mean(Rich), 'std', np.std(Rich)
print 'Drate 2D 5m: mean', np.mean(drateD_BW[interval,depths[0]]), 'std', np.std(drateD_BW[interval,depths[0]])
R2D5_BW, = ax1.plot(time2D_BW[interval]/3600+48.,Rich,'r',linewidth=2)
intm = 0.4*86400; intM = 3*86400; interval = (vtime > intm) * (vtime < intM)
Rich = RD_2D_BW[interval,1]/time2D_BW[interval]**3/(drateD_BW[interval,depths[1]])
print 'Rich 2D 10m: mean', np.mean(Rich), 'std', np.std(Rich)
print 'Drate 2D 10m: mean', np.mean(drateD_BW[interval,depths[1]]), 'std', np.std(drateD_BW[interval,depths[1]])
R2D10_BW, = ax1.plot(time2D_BW[interval]/3600.+48,Rich,'r--',linewidth=2)
intm = 0.45*86400; intM = 3*86400; interval = (vtime > intm) * (vtime < intM)
Rich = RD_2D_BW[interval,2]/time2D_BW[interval]**3/(drateD_BW[interval,depths[2]])
print 'Rich 2D 15m: mean', np.mean(Rich), 'std', np.std(Rich)
print 'Drate 2D 15m: mean', np.mean(drateD_BW[interval,depths[2]]), 'std', np.std(drateD_BW[interval,depths[2]])
R2D15_BW, = ax1.plot(time2D_BW[interval]/3600.+48,Rich,'r-.',linewidth=2)
#for tic in plt.xaxis.get_minor_ticks():
# tic.tick1On = tic.tick2On = False
#plt.legend((R2D1,R3D1,R2D5,R3D5,R2D17,R3D17),('2D 5m','3D 5m','2D 10m','3D 10m','2D 15m','3D 15m'),loc=3,fontsize=16,ncol=3)
#plt.legend((R2D5_BW,R2D10_BW,R2D15_BW,R2D5_B,R2D10_B,R2D15_B),('$BW25_m$ 5m','$BW25_m$ 10m','$BW25_m$ 15m','$B25_m$ 5m','$B25_m$ 10m','$B25_m$ 15m'),loc=2,fontsize=16,ncol=2)
dummy5, = ax1.plot([],[],'k',linewidth=2)
dummy10, = ax1.plot([],[],'k--',linewidth=2)
dummy15, = ax1.plot([],[],'k-.',linewidth=2)
ax1.legend((dummy5,dummy10,dummy15),('5m','10m','15m'),loc=1,fontsize=14,ncol=3)
#import matplotlib.lines as mlines
#l5 = mlines.Line2D([], [],'-',color='black', label='5m')
#l10 = mlines.Line2D([], [],'--',color='black', label='10m')
#l15 = mlines.Line2D([], [],'-.',color='black', label='15m')
#ax1.legend(handles=[l5,l10,l15],loc=1,fontsize=16,ncol=3)
ax1.set_xlabel('Time $[hr]$',fontsize=20)
ax1.set_ylabel('$\sigma^2_D t^{-3} \epsilon^{-1}$ ',fontsize=20,color='r')
for tl in ax1.get_yticklabels():
tl.set_color('r')
#plt.ylim(0.02,0.18)
#plt.yticks(fontsize=16)
ind = np.linspace(48,24*3+48,13)
ind[0] = 52
ax2 = ax1.twinx()
DR5, = ax2.plot(drateD_BW[:,depths[0]],'b',linewidth=2)
DR10, = ax2.plot(drateD_BW[:,depths[1]],'b--',linewidth=2)
DR15, = ax2.plot(drateD_BW[:,depths[2]],'b-.',linewidth=2)
#ax2.legend((DR5,DR10,DR15),('5m','10m','15m'),loc=1,fontsize=14,ncol=3)
ax2.set_ylabel('$\epsilon$ ',fontsize=20,color='b')
for tl in ax2.get_yticklabels():
tl.set_color('b')
plt.xlim(ind[0],ind[-1])
plt.xticks(ind,['','54','60','66','72','78','84','90','96','102','108','114','120'],fontsize=16)
plt.tight_layout()
plt.savefig('./plot/'+label+'/Rich_2_BW_'+label+'.eps')
print './plot/'+label+'/Rich_2_BW_'+label+'.eps'
plt.close()
| gpl-2.0 |
combust-ml/mleap | python/tests/pyspark/feature/math_unary_test.py | 2 | 4359 | import math
import os
import shutil
import tempfile
import unittest
import mleap.pyspark # noqa
from mleap.pyspark.spark_support import SimpleSparkSerializer # noqa
import pandas as pd
from pandas.testing import assert_frame_equal
from pyspark.ml import Pipeline
from pyspark.sql.types import FloatType
from pyspark.sql.types import StructType
from pyspark.sql.types import StructField
from mleap.pyspark.feature.math_unary import MathUnary
from mleap.pyspark.feature.math_unary import UnaryOperation
from tests.pyspark.lib.spark_session import spark_session
INPUT_SCHEMA = StructType([
StructField('f1', FloatType()),
])
class MathUnaryTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.spark = spark_session()
@classmethod
def tearDownClass(cls):
cls.spark.stop()
def setUp(self):
self.input = self.spark.createDataFrame([
(
float(i),
)
for i in range(1, 10)
], INPUT_SCHEMA)
self.expected_sin = pd.DataFrame(
[(
math.sin(i),
)
for i in range(1, 10)],
columns=['sin(f1)'],
)
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_sin_math_unary(self):
sin_transformer = MathUnary(
operation=UnaryOperation.Sin,
inputCol="f1",
outputCol="sin(f1)",
)
result = sin_transformer.transform(self.input).toPandas()[['sin(f1)']]
assert_frame_equal(self.expected_sin, result)
def test_math_unary_pipeline(self):
sin_transformer = MathUnary(
operation=UnaryOperation.Sin,
inputCol="f1",
outputCol="sin(f1)",
)
exp_transformer = MathUnary(
operation=UnaryOperation.Exp,
inputCol="sin(f1)",
outputCol="exp(sin(f1))",
)
expected = pd.DataFrame(
[(
math.exp(math.sin(i)),
)
for i in range(1, 10)],
columns=['exp(sin(f1))'],
)
pipeline = Pipeline(
stages=[sin_transformer, exp_transformer]
)
pipeline_model = pipeline.fit(self.input)
result = pipeline_model.transform(self.input).toPandas()[['exp(sin(f1))']]
assert_frame_equal(expected, result)
def test_can_instantiate_all_math_unary(self):
for unary_operation in UnaryOperation:
transformer = MathUnary(
operation=unary_operation,
inputCol="f1",
outputCol="operation",
)
def test_serialize_deserialize_math_unary(self):
sin_transformer = MathUnary(
operation=UnaryOperation.Sin,
inputCol="f1",
outputCol="sin(f1)",
)
file_path = '{}{}'.format('jar:file:', os.path.join(self.tmp_dir, 'math_unary.zip'))
sin_transformer.serializeToBundle(file_path, self.input)
deserialized_math_unary = SimpleSparkSerializer().deserializeFromBundle(file_path)
result = deserialized_math_unary.transform(self.input).toPandas()[['sin(f1)']]
assert_frame_equal(self.expected_sin, result)
def test_serialize_deserialize_pipeline(self):
sin_transformer = MathUnary(
operation=UnaryOperation.Sin,
inputCol="f1",
outputCol="sin(f1)",
)
exp_transformer = MathUnary(
operation=UnaryOperation.Exp,
inputCol="sin(f1)",
outputCol="exp(sin(f1))",
)
expected = pd.DataFrame(
[(
math.exp(math.sin(i)),
)
for i in range(1, 10)],
columns=['exp(sin(f1))'],
)
pipeline = Pipeline(
stages=[sin_transformer, exp_transformer]
)
pipeline_model = pipeline.fit(self.input)
file_path = '{}{}'.format('jar:file:', os.path.join(self.tmp_dir, 'math_unary_pipeline.zip'))
pipeline_model.serializeToBundle(file_path, self.input)
deserialized_pipeline = SimpleSparkSerializer().deserializeFromBundle(file_path)
result = pipeline_model.transform(self.input).toPandas()[['exp(sin(f1))']]
assert_frame_equal(expected, result)
| apache-2.0 |
jls713/jfactors | flattened/process_data.py | 1 | 2570 | ## Produce Table 5 of SEG (2016)
## ============================================================================
import numpy as np
import pandas as pd
import sys
import os.path
sys.path.append('/home/jls/work/data/jfactors/spherical/')
from J_D_table import posh_latex_names
## ============================================================================
def load_files(name):
''' Load in three sample files for dwarf <name> '''
name='triaxial_results/'+name
if os.path.isfile(name+'_nop') and os.path.isfile(name+'_ma') and os.path.isfile(name+'_sj'):
return np.genfromtxt(name+'_nop'),np.genfromtxt(name+'_ma'),np.genfromtxt(name+'_sj')
else:
return None,None,None
def write(l):
''' Output median and \pm 1\sigma errors for correction factors in ascii
form '''
l = l.T[4]
return r'$%0.2f^{+%0.2f}_{-%0.2f}$'%(np.median(l),np.percentile(l,84.1)-np.median(l),np.median(l)-np.percentile(l,15.9))
def write_ascii(l):
''' Output median and \pm 1\sigma errors for correction factors in latex
form '''
l = l.T[4]
return '%0.2f %0.2f %0.2f '%(np.median(l),np.percentile(l,84.1)-np.median(l),np.median(l)-np.percentile(l,15.9))
## ============================================================================
## 1. Read in data file and write headers to tables
data = pd.read_csv('../data/data.dat',sep=' ')
ff = open('corr_triax_table.dat','w')
ffa = open('corr_triax_table_ascii.dat','w')
ff.write('\\begin{tabular}{lcccc}\n')
ff.write('\\hline\n\\hline\n')
ff.write('Name & Ellipticity & $\mathcal{F}_{\mathrm{J},U}$& $\mathcal{F}_{\mathrm{J},R}$& $\mathcal{F}_{\mathrm{J},T}$\\\\ \n')
ff.write('\\hline\n')
## 2. Loop over dwarfs and compute median and \pm 1 \sigma for correction factors
for i in data.ellip.argsort():
d,e,f=load_files(data.Name[i])
ellip_string='&$%0.2f^{+%0.2f}_{-%0.2f}$&'%(data.ellip[i],data.ellip_e1[i],data.ellip_e2[i])
if(data.ellip_e1[i]!=data.ellip_e1[i]):
ellip_string='&$<%0.2f$&'%(data.ellip[i])
if(d==None):
ff.write(posh_latex_names[data['Name'][i]]+ellip_string+'NaN&NaN&NaN\\\\\n')
ffa.write(data['Name'][i]+' %0.2f %0.2f %0.2f '%(data.ellip[i],data.ellip_e1[i],data.ellip_e2[i])+'NaN '*9+'\n')
else:
ff.write(posh_latex_names[data['Name'][i]]+ellip_string
+write(d)+'&'+write(e)+'&'+write(f)+'\\\\\n')
ffa.write(data['Name'][i]+' %0.2f %0.2f %0.2f '%(data.ellip[i],data.ellip_e1[i],data.ellip_e2[i])+write_ascii(d)+write_ascii(e)+write_ascii(f)+'\n')
ff.write('\\hline\n\\end{tabular}\n')
ff.close()
ffa.close()
## ============================================================================
| mit |
vshtanko/scikit-learn | sklearn/preprocessing/tests/test_data.py | 113 | 38432 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
makelove/OpenCV-Python-Tutorial | ch37-特征匹配/37.5-FLANN匹配器.py | 1 | 1640 | # -*- coding: utf-8 -*-
# @Time : 2017/7/13 下午4:22
# @Author : play4fun
# @File : 37.5-FLANN匹配器.py
# @Software: PyCharm
"""
37.5-FLANN匹配器.py:
FLANN 是快速最近邻搜索包 Fast_Library_for_Approximate_Nearest_Neighbors 的简称。
它是一个对大数据集和高维特征进行最近邻搜索的算法的集合
而且这些算法 已经被优化 了。
在面对大数据集时它的效果 好于 BFMatcher
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
img1 = cv2.imread('../data/box.png', 0) # queryImage
img2 = cv2.imread('../data/box_in_scene.png', 0) # trainImage
# Initiate SIFT detector
# sift = cv2.SIFT()
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0, 0] for i in range(len(matches))]
# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
if m.distance < 0.7 * n.distance:
matchesMask[i] = [1, 0]
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=(255, 0, 0),
matchesMask=matchesMask,
flags=0)
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)
plt.imshow(img3, ), plt.show()
| mit |
ak681443/mana-deep | evaluation/allmods/new_train/FindBestMatch1.py | 1 | 5115 |
# coding: utf-8
# In[1]:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras.callbacks import TensorBoard
from keras.models import model_from_json
from keras.models import load_model
from keras import regularizers
from os import listdir
from os.path import isfile, join
import numpy as np
from matplotlib import pyplot as plt
import cv2
import scipy.misc
from scipy import spatial
from PIL import Image
import heapq
import sys
# In[2]:
th = int(sys.argv[1])
v = int(sys.argv[2])
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
masks = np.zeros((224,224))
#for filen1 in files1:
# img1 = cv2.imread(mypath1+filen1)
# img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
# img1[img1<th] = v
# img1[img1>=th] = 0
# masks = masks + img1
#masks = masks / v
for filen1 in files1:
img = cv2.imread(mypath1+filen1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = np.invert(img)
img = img/np.float32(np.max(img))
img[img>0.50] = v
img[img!=v] = 0
img = cv2.resize(img, (224,224))
masks = masks + img
masks = masks / v
#masks = np.zeros(masks.shape)
#img1[masks>20] = 0
#print np.average(masks)
#plt.imshow(img1)
# In[3]:
input_img = Input(shape=(224, 224,1))
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same', input_shape=(224,224,1))(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same', activity_regularizer=regularizers.activity_l1(10e-5))(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x)
model = Model(input_img, encoded)
model.compile(loss='binary_crossentropy', optimizer='adagrad', verbose=0)
# In[4]:
model.load_weights(sys.argv[3], by_name=True)
# In[5]:
def push_pqueue(queue, priority, value):
if len(queue)>20:
heapq.heappushpop(queue, (priority, value))
else:
heapq.heappush(queue, (priority, value))
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test_new/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1 = np.invert(img1)
img1 = img1 / np.float32(np.max(img1))
#img1[img1<th] = v
img1[img1>0.50] = v
img1[img1!=v] = 0
img1[masks>60] = 0
#img1[img1>=th] = 0
X_test.append(np.array([img1]))
X_test = np.array(X_test).astype('float32')#/ float(np.max(X))
X_test = np.reshape(X_test, (len(X_test), 224, 224, 1))
X_test_pred = model.predict(X_test, verbose=0)
# In[8]:
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train_new/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_train = []
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1 = np.invert(img1)
img1 = img1/np.float32(np.max(img1))
img1[img1>0.50] = v
img1[img!=v] = 0
img1[masks>60] = 0
X_train.append(np.array([img1]))
X_train = np.array(X_train).astype('float32')#/ float(np.max(X))
X_train = np.reshape(X_train, (len(X_train), 224, 224, 1))
X_train_pred = model.predict(X_train, verbose=0)
# In[9]:
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test_new/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
top10_correct = 0
top20_correct = 0
top5_correct = 0
top1_correct = 0
for i in np.arange(0, len(files1)):
filen1 = files1[i]
pred = X_test_pred[i]
mypath = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train_new/'
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
masks = np.zeros((224,224))
max_confidence = 0.0
max_file = None
pqueue = []
for j in np.arange(0, len(files)):
filen = files[j]
tpred = X_train_pred[j]
score = 1 - spatial.distance.cosine(tpred.sum(axis=2).flatten(), pred.sum(axis=2).flatten())
push_pqueue(pqueue, score, filen)
if max_confidence < score:
max_confidence = score
max_file = filen
i = 0
for top20 in heapq.nlargest(len(pqueue), pqueue):
i += 1
if top20[1].split('_')[1].split('.')[0] == filen1.split('_')[1].split('.')[0]:
if i==1:
top20_correct+=1
top10_correct+=1
top5_correct+=1
top1_correct+=1
elif i>10:
top20_correct+=1
elif i>5:
top10_correct+=1
top20_correct+=1
elif i>=1:
top10_correct+=1
top20_correct+=1
top5_correct+=1
break
print "\n!@#$", top20_correct/float(len(files1)) , top10_correct/float(len(files1)), top5_correct/float(len(files1)), top1_correct,"\n"
| apache-2.0 |
qifeigit/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
tarballs-are-good/sympy | sympy/physics/quantum/tensorproduct.py | 6 | 10647 | """Abstract tensor product."""
from sympy import Expr, Add, Mul, Matrix, Pow
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.qexpr import QuantumError, split_commutative_parts
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.matrixutils import (
numpy_ndarray,
scipy_sparse_matrix,
matrix_tensor_product
)
__all__ = [
'TensorProduct',
'tensor_product_simp'
]
#-----------------------------------------------------------------------------
# Tensor product
#-----------------------------------------------------------------------------
class TensorProduct(Expr):
"""The tensor product of two or more arguments.
For matrices, this uses ``matrix_tensor_product`` to compute the
Kronecker or tensor product matrix. For other objects a symbolic
``TensorProduct`` instance is returned. The tensor product is a
non-commutative multiplication that is used primarily with operators
and states in quantum mechanics.
Currently, the tensor product distinguishes between commutative and non-
commutative arguments. Commutative arguments are assumed to be scalars
and are pulled out in front of the ``TensorProduct``. Non-commutative
arguments remain in the resulting ``TensorProduct``.
Parameters
==========
args : tuple
A sequence of the objects to take the tensor product of.
Examples
========
Start with a simple tensor product of sympy matrices::
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum import TensorProduct
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> TensorProduct(m1, m2)
[1, 0, 2, 0]
[0, 1, 0, 2]
[3, 0, 4, 0]
[0, 3, 0, 4]
>>> TensorProduct(m2, m1)
[1, 2, 0, 0]
[3, 4, 0, 0]
[0, 0, 1, 2]
[0, 0, 3, 4]
We can also construct tensor products of non-commutative symbols::
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> tp = TensorProduct(A, B)
>>> tp
AxB
We can take the dagger of a tensor product (note the order does NOT
reverse like the dagger of a normal product)::
>>> from sympy.physics.quantum import Dagger
>>> Dagger(tp)
Dagger(A)xDagger(B)
Expand can be used to distribute a tensor product across addition::
>>> C = Symbol('C',commutative=False)
>>> tp = TensorProduct(A+B,C)
>>> tp
(A + B)xC
>>> tp.expand(tensorproduct=True)
AxC + BxC
"""
def __new__(cls, *args, **assumptions):
if isinstance(args[0], (Matrix, numpy_ndarray, scipy_sparse_matrix)):
return matrix_tensor_product(*args)
c_part, new_args = cls.flatten(args)
c_part = Mul(*c_part)
if len(new_args) == 0:
return c_part
elif len(new_args) == 1:
return c_part*new_args[0]
else:
tp = Expr.__new__(cls, *new_args, **{'commutative': False})
return c_part*tp
@classmethod
def flatten(cls, args):
# TODO: disallow nested TensorProducts.
c_part = []
nc_parts = []
for arg in args:
if isinstance(arg, Mul):
cp, ncp = split_commutative_parts(arg)
ncp = Mul(*ncp)
else:
if arg.is_commutative:
cp = [arg]; ncp = 1
else:
cp = []; ncp = arg
c_part.extend(cp)
nc_parts.append(ncp)
return c_part, nc_parts
def _eval_dagger(self):
return TensorProduct(*[Dagger(i) for i in self.args])
def _sympystr(self, printer, *args):
from sympy.printing.str import sstr
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + '('
s = s + sstr(self.args[i])
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + ')'
if i != length-1:
s = s + 'x'
return s
def _pretty(self, printer, *args):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (Add, Mul)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length-1:
pform = prettyForm(*pform.right(u'\u2a02' + u' '))
return pform
def _latex(self, printer, *args):
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\left('
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
s = s + '{' + printer._print(self.args[i], *args) + '}'
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\right)'
if i != length-1:
s = s + '\\otimes '
return s
def doit(self, **hints):
return TensorProduct(*[item.doit(**hints) for item in self.args])
def _eval_expand_tensorproduct(self, **hints):
"""Distribute TensorProducts across addition."""
args = self.args
add_args = []
stop = False
for i in range(len(args)):
if isinstance(args[i], Add):
for aa in args[i].args:
add_args.append(
TensorProduct(
*args[:i]+(aa,)+args[i+1:]
).expand(**hints)
)
stop = True
if stop: break
if add_args:
return Add(*add_args).expand(**hints)
else:
return self
def expand(self, **hints):
tp = TensorProduct(*[item.expand(**hints) for item in self.args])
return Expr.expand(tp, **hints)
def tensor_product_simp_Mul(e):
"""Simplify a Mul with TensorProducts.
Current the main use of this is to simplify a ``Mul`` of
``TensorProduct``s to a ``TensorProduct`` of ``Muls``. It currently only
works for relatively simple cases where the initial ``Mul`` only has
scalars and raw ``TensorProduct``s, not ``Add``, ``Pow``, ``Commutator``s
of ``TensorProduct``s.
Parameters
==========
e : Expr
A ``Mul`` of ``TensorProduct``s to be simplified.
Returns
=======
e : Expr
A ``TensorProduct`` of ``Mul``s.
Examples
========
This is an example of the type of simplification that this function
performs::
>>> from sympy.physics.quantum.tensorproduct import tensor_product_simp_Mul, TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp_Mul(e)
(A*C)x(B*D)
"""
# TODO: This won't work with Muls that have other composites of
# TensorProducts, like an Add, Pow, Commutator, etc.
# TODO: This only works for the equivalent of single Qbit gates.
if not isinstance(e, Mul):
return e
c_part, nc_part = split_commutative_parts(e)
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return e
elif e.has(TensorProduct):
current = nc_part[0]
if not isinstance(current, TensorProduct):
raise TypeError('TensorProduct expected, got: %r' % current)
n_terms = len(current.args)
new_args = list(current.args)
for next in nc_part[1:]:
# TODO: check the hilbert spaces of next and current here.
if isinstance(next, TensorProduct):
if n_terms != len(next.args):
raise QuantumError(
'TensorProducts of different lengths: %r and %r' % \
(current, next)
)
for i in range(len(new_args)):
new_args[i] = new_args[i]*next.args[i]
else:
# this won't quite work as we don't want next in the TensorProduct
for i in range(len(new_args)):
new_args[i] = new_args[i]*next
current = next
return Mul(*c_part)*TensorProduct(*new_args)
else:
return e
def tensor_product_simp(e, **hints):
"""Try to simplify and combine TensorProducts.
In general this will try to pull expressions inside of ``TensorProducts``.
It currently only works for relatively simple cases where the products
have only scalars, raw ``TensorProduct``s, not ``Add``, ``Pow``,
``Commutator``s of ``TensorProduct``s. It is best to see what it does by
showing examples.
Examples
========
>>> from sympy.physics.quantum import tensor_product_simp
>>> from sympy.physics.quantum import TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
First see what happens to products of tensor products::
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp(e)
(A*C)x(B*D)
This is the core logic of this function, and it works inside, powers,
sums, commutators and anticommutators as well::
>>> tensor_product_simp(e**2)
(A*C)x(B*D)**2
"""
if isinstance(e, Add):
return Add(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return tensor_product_simp(e.base)**e.exp
elif isinstance(e, Mul):
return tensor_product_simp_Mul(e)
elif isinstance(e, Commutator):
return Commutator(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, AntiCommutator):
return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args])
else:
return e
| bsd-3-clause |
harisbal/pandas | pandas/core/arrays/interval.py | 3 | 38927 | import textwrap
import numpy as np
from operator import le, lt
from pandas._libs.interval import (Interval, IntervalMixin,
intervals_to_interval_bounds)
from pandas.compat import add_metaclass
from pandas.compat.numpy import function as nv
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.core.dtypes.cast import maybe_convert_platform
from pandas.core.dtypes.common import (is_categorical_dtype, is_float_dtype,
is_integer_dtype, is_interval_dtype,
is_scalar, is_string_dtype,
is_datetime64_any_dtype,
is_timedelta64_dtype, is_interval,
pandas_dtype)
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.core.dtypes.generic import (ABCDatetimeIndex, ABCPeriodIndex,
ABCSeries, ABCIntervalIndex,
ABCInterval)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.indexes.base import Index, ensure_index
from pandas.util._decorators import Appender
from pandas.util._doctools import _WritableDoc
from . import ExtensionArray, Categorical
_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_interval_shared_docs = {}
# TODO(jschendel) remove constructor key when IntervalArray is public (GH22860)
_shared_docs_kwargs = dict(
klass='IntervalArray',
constructor='pd.core.arrays.IntervalArray',
name=''
)
_interval_shared_docs['class'] = """
%(summary)s
.. versionadded:: %(versionadded)s
.. warning::
The indexing behaviors are provisional and may change in
a future version of pandas.
Parameters
----------
data : array-like (1-dimensional)
Array-like containing Interval objects from which to build the
%(klass)s.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both or
neither.
dtype : dtype or None, default None
If None, dtype will be inferred.
.. versionadded:: 0.23.0
copy : bool, default False
Copy the input data.
%(name)s\
verify_integrity : bool, default True
Verify that the %(klass)s is valid.
Attributes
----------
left
right
closed
mid
length
values
is_non_overlapping_monotonic
Methods
-------
from_arrays
from_tuples
from_breaks
set_closed
%(extra_methods)s\
%(examples)s\
Notes
------
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/advanced.html#intervalindex>`_
for more.
See Also
--------
Index : The base pandas Index type
Interval : A bounded slice-like interval; the elements of an %(klass)s
interval_range : Function to create a fixed frequency IntervalIndex
cut : Bin values into discrete Intervals
qcut : Bin values into equal-sized Intervals based on rank or sample quantiles
"""
# TODO(jschendel) use a more direct call in Examples when made public (GH22860)
@Appender(_interval_shared_docs['class'] % dict(
klass="IntervalArray",
summary="Pandas array for interval data that are closed on the same side.",
versionadded="0.24.0",
name='',
extra_methods='',
examples=textwrap.dedent("""\
Examples
--------
A new ``IntervalArray`` can be constructed directly from an array-like of
``Interval`` objects:
>>> pd.core.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
IntervalArray([(0, 1], (1, 5]],
closed='right',
dtype='interval[int64]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalArray.from_arrays`,
:meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`.
"""),
))
@add_metaclass(_WritableDoc)
class IntervalArray(IntervalMixin, ExtensionArray):
dtype = IntervalDtype()
ndim = 1
can_hold_na = True
_na_value = _fill_value = np.nan
def __new__(cls, data, closed=None, dtype=None, copy=False,
verify_integrity=True):
if isinstance(data, ABCSeries) and is_interval_dtype(data):
data = data.values
if isinstance(data, (cls, ABCIntervalIndex)):
left = data.left
right = data.right
closed = closed or data.closed
else:
# don't allow scalars
if is_scalar(data):
msg = ("{}(...) must be called with a collection of some kind,"
" {} was passed")
raise TypeError(msg.format(cls.__name__, data))
# might need to convert empty or purely na data
data = maybe_convert_platform_interval(data)
left, right, infer_closed = intervals_to_interval_bounds(
data, validate_closed=closed is None)
closed = closed or infer_closed
return cls._simple_new(left, right, closed, copy=copy, dtype=dtype,
verify_integrity=verify_integrity)
@classmethod
def _simple_new(cls, left, right, closed=None,
copy=False, dtype=None, verify_integrity=True):
result = IntervalMixin.__new__(cls)
closed = closed or 'right'
left = ensure_index(left, copy=copy)
right = ensure_index(right, copy=copy)
if dtype is not None:
# GH 19262: dtype must be an IntervalDtype to override inferred
dtype = pandas_dtype(dtype)
if not is_interval_dtype(dtype):
msg = 'dtype must be an IntervalDtype, got {dtype}'
raise TypeError(msg.format(dtype=dtype))
elif dtype.subtype is not None:
left = left.astype(dtype.subtype)
right = right.astype(dtype.subtype)
# coerce dtypes to match if needed
if is_float_dtype(left) and is_integer_dtype(right):
right = right.astype(left.dtype)
elif is_float_dtype(right) and is_integer_dtype(left):
left = left.astype(right.dtype)
if type(left) != type(right):
msg = ('must not have differing left [{ltype}] and right '
'[{rtype}] types')
raise ValueError(msg.format(ltype=type(left).__name__,
rtype=type(right).__name__))
elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalArray')
raise TypeError(msg)
elif isinstance(left, ABCPeriodIndex):
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
raise ValueError(msg)
elif (isinstance(left, ABCDatetimeIndex) and
str(left.tz) != str(right.tz)):
msg = ("left and right must have the same time zone, got "
"'{left_tz}' and '{right_tz}'")
raise ValueError(msg.format(left_tz=left.tz, right_tz=right.tz))
result._left = left
result._right = right
result._closed = closed
if verify_integrity:
result._validate()
return result
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_factorized(cls, values, original):
return cls(values, closed=original.closed)
_interval_shared_docs['from_breaks'] = """
Construct an %(klass)s from an array of splits.
Parameters
----------
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : boolean, default False
copy the data
dtype : dtype or None, default None
If None, dtype will be inferred
.. versionadded:: 0.23.0
Examples
--------
>>> pd.%(klass)s.from_breaks([0, 1, 2, 3])
%(klass)s([(0, 1], (1, 2], (2, 3]]
closed='right',
dtype='interval[int64]')
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex
%(klass)s.from_arrays : Construct from a left and right array
%(klass)s.from_tuples : Construct from a sequence of tuples
"""
@classmethod
@Appender(_interval_shared_docs['from_breaks'] % _shared_docs_kwargs)
def from_breaks(cls, breaks, closed='right', copy=False, dtype=None):
breaks = maybe_convert_platform_interval(breaks)
return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy,
dtype=dtype)
_interval_shared_docs['from_arrays'] = """
Construct from two arrays defining the left and right bounds.
Parameters
----------
left : array-like (1-dimensional)
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : boolean, default False
Copy the data.
dtype : dtype, optional
If None, dtype will be inferred.
.. versionadded:: 0.23.0
Returns
-------
%(klass)s
Notes
-----
Each element of `left` must be less than or equal to the `right`
element at the same position. If an element is missing, it must be
missing in both `left` and `right`. A TypeError is raised when
using an unsupported type for `left` or `right`. At the moment,
'category', 'object', and 'string' subtypes are not supported.
Raises
------
ValueError
When a value is missing in only one of `left` or `right`.
When a value in `left` is greater than the corresponding value
in `right`.
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
%(klass)s.from_tuples : Construct an %(klass)s from an
array-like of tuples.
Examples
--------
>>> %(klass)s.from_arrays([0, 1, 2], [1, 2, 3])
%(klass)s([(0, 1], (1, 2], (2, 3]]
closed='right',
dtype='interval[int64]')
"""
@classmethod
@Appender(_interval_shared_docs['from_arrays'] % _shared_docs_kwargs)
def from_arrays(cls, left, right, closed='right', copy=False, dtype=None):
left = maybe_convert_platform_interval(left)
right = maybe_convert_platform_interval(right)
return cls._simple_new(left, right, closed, copy=copy,
dtype=dtype, verify_integrity=True)
_interval_shared_docs['from_intervals'] = """
Construct an %(klass)s from a 1d array of Interval objects
.. deprecated:: 0.23.0
Parameters
----------
data : array-like (1-dimensional)
Array of Interval objects. All intervals must be closed on the same
sides.
copy : boolean, default False
by-default copy the data, this is compat only and ignored
dtype : dtype or None, default None
If None, dtype will be inferred
..versionadded:: 0.23.0
Examples
--------
>>> pd.%(klass)s.from_intervals([pd.Interval(0, 1),
... pd.Interval(1, 2)])
%(klass)s([(0, 1], (1, 2]]
closed='right', dtype='interval[int64]')
The generic Index constructor work identically when it infers an array
of all intervals:
>>> pd.Index([pd.Interval(0, 1), pd.Interval(1, 2)])
%(klass)s([(0, 1], (1, 2]]
closed='right', dtype='interval[int64]')
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex
%(klass)s.from_arrays : Construct an %(klass)s from a left and
right array
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits
%(klass)s.from_tuples : Construct an %(klass)s from an
array-like of tuples
"""
_interval_shared_docs['from_tuples'] = """
Construct an %(klass)s from an array-like of tuples
Parameters
----------
data : array-like (1-dimensional)
Array of tuples
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : boolean, default False
by-default copy the data, this is compat only and ignored
dtype : dtype or None, default None
If None, dtype will be inferred
..versionadded:: 0.23.0
Examples
--------
>>> pd.%(klass)s.from_tuples([(0, 1), (1, 2)])
%(klass)s([(0, 1], (1, 2]],
closed='right', dtype='interval[int64]')
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex
%(klass)s.from_arrays : Construct an %(klass)s from a left and
right array
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits
"""
@classmethod
@Appender(_interval_shared_docs['from_tuples'] % _shared_docs_kwargs)
def from_tuples(cls, data, closed='right', copy=False, dtype=None):
if len(data):
left, right = [], []
else:
# ensure that empty data keeps input dtype
left = right = data
for d in data:
if isna(d):
lhs = rhs = np.nan
else:
name = cls.__name__
try:
# need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...]
lhs, rhs = d
except ValueError:
msg = ('{name}.from_tuples requires tuples of '
'length 2, got {tpl}').format(name=name, tpl=d)
raise ValueError(msg)
except TypeError:
msg = ('{name}.from_tuples received an invalid '
'item, {tpl}').format(name=name, tpl=d)
raise TypeError(msg)
left.append(lhs)
right.append(rhs)
return cls.from_arrays(left, right, closed, copy=False,
dtype=dtype)
def _validate(self):
"""Verify that the IntervalArray is valid.
Checks that
* closed is valid
* left and right match lengths
* left and right have the same missing values
* left is always below right
"""
if self.closed not in _VALID_CLOSED:
raise ValueError("invalid option for 'closed': {closed}"
.format(closed=self.closed))
if len(self.left) != len(self.right):
raise ValueError('left and right must have the same length')
left_mask = notna(self.left)
right_mask = notna(self.right)
if not (left_mask == right_mask).all():
raise ValueError('missing values must be missing in the same '
'location both left and right sides')
if not (self.left[left_mask] <= self.right[left_mask]).all():
raise ValueError('left side of interval must be <= right side')
# ---------
# Interface
# ---------
def __iter__(self):
return iter(np.asarray(self))
def __len__(self):
return len(self.left)
def __getitem__(self, value):
left = self.left[value]
right = self.right[value]
# scalar
if not isinstance(left, Index):
if isna(left):
return self._fill_value
return Interval(left, right, self.closed)
return self._shallow_copy(left, right)
def __setitem__(self, key, value):
# na value: need special casing to set directly on numpy arrays
needs_float_conversion = False
if is_scalar(value) and isna(value):
if is_integer_dtype(self.dtype.subtype):
# can't set NaN on a numpy integer array
needs_float_conversion = True
elif is_datetime64_any_dtype(self.dtype.subtype):
# need proper NaT to set directly on the numpy array
value = np.datetime64('NaT')
elif is_timedelta64_dtype(self.dtype.subtype):
# need proper NaT to set directly on the numpy array
value = np.timedelta64('NaT')
value_left, value_right = value, value
# scalar interval
elif is_interval_dtype(value) or isinstance(value, ABCInterval):
self._check_closed_matches(value, name="value")
value_left, value_right = value.left, value.right
else:
# list-like of intervals
try:
array = IntervalArray(value)
value_left, value_right = array.left, array.right
except TypeError:
# wrong type: not interval or NA
msg = "'value' should be an interval type, got {} instead."
raise TypeError(msg.format(type(value)))
# Need to ensure that left and right are updated atomically, so we're
# forced to copy, update the copy, and swap in the new values.
left = self.left.copy(deep=True)
if needs_float_conversion:
left = left.astype('float')
left.values[key] = value_left
self._left = left
right = self.right.copy(deep=True)
if needs_float_conversion:
right = right.astype('float')
right.values[key] = value_right
self._right = right
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should be either Interval objects or NA/NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
(Not implemented yet for IntervalArray)
Method to use for filling holes in reindexed Series
limit : int, default None
(Not implemented yet for IntervalArray)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : IntervalArray with NA/NaN filled
"""
if method is not None:
raise TypeError('Filling by method is not supported for '
'IntervalArray.')
if limit is not None:
raise TypeError('limit is not supported for IntervalArray.')
if not isinstance(value, ABCInterval):
msg = ("'IntervalArray.fillna' only supports filling with a "
"scalar 'pandas.Interval'. Got a '{}' instead."
.format(type(value).__name__))
raise TypeError(msg)
value = getattr(value, '_values', value)
self._check_closed_matches(value, name="value")
left = self.left.fillna(value=value.left)
right = self.right.fillna(value=value.right)
return self._shallow_copy(left, right)
@property
def dtype(self):
return IntervalDtype(self.left.dtype)
def astype(self, dtype, copy=True):
"""
Cast to an ExtensionArray or NumPy array with dtype 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ExtensionArray or ndarray
ExtensionArray or NumPy ndarray with 'dtype' for its dtype.
"""
dtype = pandas_dtype(dtype)
if is_interval_dtype(dtype):
if dtype == self.dtype:
return self.copy() if copy else self
# need to cast to different subtype
try:
new_left = self.left.astype(dtype.subtype)
new_right = self.right.astype(dtype.subtype)
except TypeError:
msg = ('Cannot convert {dtype} to {new_dtype}; subtypes are '
'incompatible')
raise TypeError(msg.format(dtype=self.dtype, new_dtype=dtype))
return self._shallow_copy(new_left, new_right)
elif is_categorical_dtype(dtype):
return Categorical(np.asarray(self))
# TODO: This try/except will be repeated.
try:
return np.asarray(self).astype(dtype, copy=copy)
except (TypeError, ValueError):
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
@classmethod
def _concat_same_type(cls, to_concat):
"""
Concatenate multiple IntervalArray
Parameters
----------
to_concat : sequence of IntervalArray
Returns
-------
IntervalArray
"""
closed = {interval.closed for interval in to_concat}
if len(closed) != 1:
raise ValueError("Intervals must all be closed on the same side.")
closed = closed.pop()
left = np.concatenate([interval.left for interval in to_concat])
right = np.concatenate([interval.right for interval in to_concat])
return cls._simple_new(left, right, closed=closed, copy=False)
def _shallow_copy(self, left=None, right=None, closed=None):
"""
Return a new IntervalArray with the replacement attributes
Parameters
----------
left : array-like
Values to be used for the left-side of the the intervals.
If None, the existing left and right values will be used.
right : array-like
Values to be used for the right-side of the the intervals.
If None and left is IntervalArray-like, the left and right
of the IntervalArray-like will be used.
closed : {'left', 'right', 'both', 'neither'}, optional
Whether the intervals are closed on the left-side, right-side, both
or neither. If None, the existing closed will be used.
"""
if left is None:
# no values passed
left, right = self.left, self.right
elif right is None:
# only single value passed, could be an IntervalArray
# or array of Intervals
if not isinstance(left, (type(self), ABCIntervalIndex)):
left = type(self)(left)
left, right = left.left, left.right
else:
# both left and right are values
pass
closed = closed or self.closed
return self._simple_new(
left, right, closed=closed, verify_integrity=False)
def copy(self, deep=False):
"""
Return a copy of the array.
Parameters
----------
deep : bool, default False
Also copy the underlying data backing this array.
Returns
-------
IntervalArray
"""
left = self.left.copy(deep=True) if deep else self.left
right = self.right.copy(deep=True) if deep else self.right
closed = self.closed
# TODO: Could skip verify_integrity here.
return type(self).from_arrays(left, right, closed=closed)
def _formatting_values(self):
return np.asarray(self)
def isna(self):
return isna(self.left)
@property
def nbytes(self):
return self.left.nbytes + self.right.nbytes
@property
def size(self):
# Avoid materializing self.values
return self.left.size
@property
def shape(self):
return self.left.shape
def take(self, indices, allow_fill=False, fill_value=None, axis=None,
**kwargs):
"""
Take elements from the IntervalArray.
Parameters
----------
indices : sequence of integers
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : Interval or NA, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
axis : any, default None
Present for compat with IntervalIndex; does nothing.
Returns
-------
IntervalArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
"""
from pandas.core.algorithms import take
nv.validate_take(tuple(), kwargs)
fill_left = fill_right = fill_value
if allow_fill:
if fill_value is None:
fill_left = fill_right = self.left._na_value
elif is_interval(fill_value):
self._check_closed_matches(fill_value, name='fill_value')
fill_left, fill_right = fill_value.left, fill_value.right
elif not is_scalar(fill_value) and notna(fill_value):
msg = ("'IntervalArray.fillna' only supports filling with a "
"'scalar pandas.Interval or NA'. Got a '{}' instead."
.format(type(fill_value).__name__))
raise ValueError(msg)
left_take = take(self.left, indices,
allow_fill=allow_fill, fill_value=fill_left)
right_take = take(self.right, indices,
allow_fill=allow_fill, fill_value=fill_right)
return self._shallow_copy(left_take, right_take)
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each interval.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
# TODO: implement this is a non-naive way!
from pandas.core.algorithms import value_counts
return value_counts(np.asarray(self), dropna=dropna)
# Formatting
def _format_data(self):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option(
'display.max_seq_items') or n) // 10, 10)
formatter = str
if n == 0:
summary = '[]'
elif n == 1:
first = formatter(self[0])
summary = '[{first}]'.format(first=first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[{first}, {last}]'.format(first=first, last=last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
summary = '[{head} ... {tail}]'.format(
head=', '.join(head), tail=', '.join(tail))
else:
tail = [formatter(x) for x in self]
summary = '[{tail}]'.format(tail=', '.join(tail))
return summary
def __repr__(self):
tpl = textwrap.dedent("""\
{cls}({data},
{lead}closed='{closed}',
{lead}dtype='{dtype}')""")
return tpl.format(cls=self.__class__.__name__,
data=self._format_data(),
lead=' ' * len(self.__class__.__name__) + ' ',
closed=self.closed, dtype=self.dtype)
def _format_space(self):
space = ' ' * (len(self.__class__.__name__) + 1)
return "\n{space}".format(space=space)
@property
def left(self):
"""
Return the left endpoints of each Interval in the IntervalArray as
an Index
"""
return self._left
@property
def right(self):
"""
Return the right endpoints of each Interval in the IntervalArray as
an Index
"""
return self._right
@property
def closed(self):
"""
Whether the intervals are closed on the left-side, right-side, both or
neither
"""
return self._closed
_interval_shared_docs['set_closed'] = """
Return an %(klass)s identical to the current one, but closed on the
specified side
.. versionadded:: 0.24.0
Parameters
----------
closed : {'left', 'right', 'both', 'neither'}
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
new_index : %(klass)s
Examples
--------
>>> index = pd.interval_range(0, 3)
>>> index
%(klass)s([(0, 1], (1, 2], (2, 3]]
closed='right',
dtype='interval[int64]')
>>> index.set_closed('both')
%(klass)s([[0, 1], [1, 2], [2, 3]]
closed='both',
dtype='interval[int64]')
"""
@Appender(_interval_shared_docs['set_closed'] % _shared_docs_kwargs)
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = "invalid option for 'closed': {closed}"
raise ValueError(msg.format(closed=closed))
return self._shallow_copy(closed=closed)
@property
def length(self):
"""
Return an Index with entries denoting the length of each Interval in
the IntervalArray
"""
try:
return self.right - self.left
except TypeError:
# length not defined for some types, e.g. string
msg = ('IntervalArray contains Intervals without defined length, '
'e.g. Intervals with string endpoints')
raise TypeError(msg)
@property
def mid(self):
"""
Return the midpoint of each Interval in the IntervalArray as an Index
"""
try:
return 0.5 * (self.left + self.right)
except TypeError:
# datetime safe version
return self.left + 0.5 * self.length
@property
def is_non_overlapping_monotonic(self):
"""
Return True if the IntervalArray is non-overlapping (no Intervals share
points) and is either monotonic increasing or monotonic decreasing,
else False
"""
# must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )
# or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)
# we already require left <= right
# strict inequality for closed == 'both'; equality implies overlapping
# at a point when both sides of intervals are included
if self.closed == 'both':
return bool((self.right[:-1] < self.left[1:]).all() or
(self.left[:-1] > self.right[1:]).all())
# non-strict inequality when closed != 'both'; at least one side is
# not included in the intervals, so equality does not imply overlapping
return bool((self.right[:-1] <= self.left[1:]).all() or
(self.left[:-1] >= self.right[1:]).all())
# Conversion
def __array__(self, dtype=None):
"""
Return the IntervalArray's data as a numpy array of Interval
objects (with dtype='object')
"""
left = self.left
right = self.right
mask = self.isna()
closed = self._closed
result = np.empty(len(left), dtype=object)
for i in range(len(left)):
if mask[i]:
result[i] = np.nan
else:
result[i] = Interval(left[i], right[i], closed)
return result
_interval_shared_docs['to_tuples'] = """\
Return an %(return_type)s of tuples of the form (left, right)
Parameters
----------
na_tuple : boolean, default True
Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA
value itself if False, ``nan``.
..versionadded:: 0.23.0
Returns
-------
tuples: %(return_type)s
%(examples)s\
"""
@Appender(_interval_shared_docs['to_tuples'] % dict(
return_type='ndarray',
examples='',
))
def to_tuples(self, na_tuple=True):
tuples = com.asarray_tuplesafe(zip(self.left, self.right))
if not na_tuple:
# GH 18756
tuples = np.where(~self.isna(), tuples, np.nan)
return tuples
def repeat(self, repeats, **kwargs):
"""
Repeat elements of an IntervalArray.
Returns a new IntervalArray where each element of the current
IntervalArray is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
IntervalArray
Newly created IntervalArray with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
"""
left_repeat = self.left.repeat(repeats, **kwargs)
right_repeat = self.right.repeat(repeats, **kwargs)
return self._shallow_copy(left=left_repeat, right=right_repeat)
_interval_shared_docs['overlaps'] = """
Check elementwise if an Interval overlaps the values in the %(klass)s.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
.. versionadded:: 0.24.0
Parameters
----------
other : Interval
Interval to check against for an overlap.
Returns
-------
ndarray
Boolean array positionally indicating where an overlap occurs.
Examples
--------
>>> intervals = %(constructor)s.from_tuples([(0, 1), (1, 3), (2, 4)])
>>> intervals
%(klass)s([(0, 1], (1, 3], (2, 4]],
closed='right',
dtype='interval[int64]')
>>> intervals.overlaps(pd.Interval(0.5, 1.5))
array([ True, True, False])
Intervals that share closed endpoints overlap:
>>> intervals.overlaps(pd.Interval(1, 3, closed='left'))
array([ True, True, True])
Intervals that only have an open endpoint in common do not overlap:
>>> intervals.overlaps(pd.Interval(1, 2, closed='right'))
array([False, True, False])
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
"""
@Appender(_interval_shared_docs['overlaps'] % _shared_docs_kwargs)
def overlaps(self, other):
if isinstance(other, (IntervalArray, ABCIntervalIndex)):
raise NotImplementedError
elif not isinstance(other, Interval):
msg = '`other` must be Interval-like, got {other}'
raise TypeError(msg.format(other=type(other).__name__))
# equality is okay if both endpoints are closed (overlap at a point)
op1 = le if (self.closed_left and other.closed_right) else lt
op2 = le if (other.closed_left and self.closed_right) else lt
# overlaps is equivalent negation of two interval being disjoint:
# disjoint = (A.left > B.right) or (B.left > A.right)
# (simplifying the negation allows this to be done in less operations)
return op1(self.left, other.right) & op2(other.left, self.right)
def maybe_convert_platform_interval(values):
"""
Try to do platform conversion, with special casing for IntervalArray.
Wrapper around maybe_convert_platform that alters the default return
dtype in certain cases to be compatible with IntervalArray. For example,
empty lists return with integer dtype instead of object dtype, which is
prohibited for IntervalArray.
Parameters
----------
values : array-like
Returns
-------
array
"""
if isinstance(values, (list, tuple)) and len(values) == 0:
# GH 19016
# empty lists/tuples get object dtype by default, but this is not
# prohibited for IntervalArray, so coerce to integer instead
return np.array([], dtype=np.int64)
elif is_categorical_dtype(values):
values = np.asarray(values)
return maybe_convert_platform(values)
| bsd-3-clause |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/tseries/period.py | 9 | 37251 | # pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
import numpy as np
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
from pandas._period import Period
import pandas._period as period
from pandas._period import (
get_period_field_arr,
_validate_end_alias,
_quarter_to_myear,
)
import pandas.core.common as com
from pandas.core.common import (isnull, _INT64_DTYPE, _maybe_box,
_values_from_object, ABCSeries,
is_integer, is_float, is_object_dtype,
is_float_dtype)
from pandas import compat
from pandas.util.decorators import cache_readonly
from pandas.lib import Timestamp, Timedelta
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.algos as _algos
from pandas.compat import zip, u
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
return get_period_field_arr(alias, self.values, base)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _get_ordinals(data, freq):
f = lambda x: Period(x, freq=freq).ordinal
if isinstance(data[0], Period):
return period.extract_ordinals(data, freq)
else:
return lib.map_infer(data, f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX
def _period_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self.values, opname)
other_base, _ = _gfc(other.freq)
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise ValueError(msg)
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise ValueError(msg)
result = getattr(self.values, opname)(other.values)
mask = (com.mask_missing(self.values, tslib.iNaT) |
com.mask_missing(other.values, tslib.iNaT))
if mask.any():
result[mask] = nat_result
return result
else:
other = Period(other, freq=self.freq)
func = getattr(self.values, opname)
result = func(other.ordinal)
if other.ordinal == tslib.iNaT:
result.fill(nat_result)
mask = self.values == tslib.iNaT
if mask.any():
result[mask] = nat_result
return result
return wrapper
class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
dtype : NumPy dtype (default: i8)
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_typ = 'periodindex'
_attributes = ['name','freq']
_datetimelike_ops = ['year','month','day','hour','minute','second',
'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'qyear', 'freq', 'days_in_month', 'daysinmonth']
_is_numeric_dtype = False
freq = None
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, tz=None, **kwargs):
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, kwargs)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = np.array(ordinal, dtype=np.int64, copy=False)
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
field_count = len(fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _from_arraylike(cls, data, freq, tz):
if not isinstance(data, (np.ndarray, PeriodIndex, DatetimeIndex, Int64Index)):
if np.isscalar(data) or isinstance(data, Period):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = com._ensure_int64(data)
if freq is None:
raise ValueError('freq not specified')
data = np.array([Period(x, freq=freq).ordinal for x in data],
dtype=np.int64)
except (TypeError, ValueError):
data = com._ensure_object(data)
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
data = _get_ordinals(data, freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = period.period_asfreq_arr(data.values,
base1, base2, 1)
else:
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
if data.dtype != np.int64:
if np.issubdtype(data.dtype, np.datetime64):
data = dt64arr_to_periodarr(data, freq, tz)
else:
try:
data = com._ensure_int64(data)
except (TypeError, ValueError):
data = com._ensure_object(data)
data = _get_ordinals(data, freq)
return data, freq
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
if not getattr(values,'dtype',None):
values = np.array(values,copy=False)
if is_object_dtype(values):
return PeriodIndex(values, name=name, freq=freq, **kwargs)
result = object.__new__(cls)
result._data = values
result.name = name
if freq is None:
raise ValueError('freq is not specified')
result.freq = Period._maybe_convert_freq(freq)
result._reset_identity()
return result
def _shallow_copy(self, values=None, infer=False, **kwargs):
""" we always want to return a PeriodIndex """
return super(PeriodIndex, self)._shallow_copy(values=values, infer=False, **kwargs)
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return PeriodIndex([item], **self._get_attributes_dict())
@property
def _na_value(self):
return self._box_func(tslib.iNaT)
def __contains__(self, key):
if not isinstance(key, Period) or key.freq != self.freq:
if isinstance(key, compat.string_types):
try:
self.get_loc(key)
return True
except Exception:
return False
return False
return key.ordinal in self._engine
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if (func is np.add):
return self._add_delta(context[1][1])
elif (func is np.subtract):
return self._add_delta(-context[1][1])
elif isinstance(func, np.ufunc):
if 'M->M' not in func.types:
msg = "ufunc '{0}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg.format(func.__name__))
if com.is_bool_dtype(result):
return result
return PeriodIndex(result, freq=self.freq, name=self.name)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _convert_for_op(self):
""" Convert value to be insertable to ndarray """
return self._box_func(value)
def _to_embed(self, keep_tz=False):
""" return an array repr of this object, potentially casting to object """
return self.asobject.values
@property
def _formatter_func(self):
return lambda x: "'%s'" % x
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self.values[mask].searchsorted(where_idx.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx.values < self.values[first])] = -1
return result
def _array_values(self):
return self.asobject
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return Index(np.array(list(self), dtype), dtype)
elif dtype == _INT64_DTYPE:
return Index(self.values, dtype)
raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype)
def searchsorted(self, key, side='left'):
if isinstance(key, Period):
if key.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, key.freqstr)
raise ValueError(msg)
key = key.ordinal
elif isinstance(key, compat.string_types):
key = Period(key, freq=self.freq).ordinal
return self.values.searchsorted(key, side=side)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
def asfreq(self, freq=None, how='E'):
"""
Convert the PeriodIndex to the specified frequency `freq`.
Parameters
----------
freq : str
a frequency
how : str {'E', 'S'}
'E', 'END', or 'FINISH' for end,
'S', 'START', or 'BEGIN' for start.
Whether the elements should be aligned to the end
or start within pa period. January 31st ('END') vs.
Janury 1st ('START') for example.
Returns
-------
new : PeriodIndex with the new frequency
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
<class 'pandas.tseries.period.PeriodIndex'>
[2010, ..., 2015]
Length: 6, Freq: A-DEC
>>> pidx.asfreq('M')
<class 'pandas.tseries.period.PeriodIndex'>
[2010-12, ..., 2015-12]
Length: 6, Freq: M
>>> pidx.asfreq('M', how='S')
<class 'pandas.tseries.period.PeriodIndex'>
[2010-01, ..., 2015-01]
Length: 6, Freq: M
"""
how = _validate_end_alias(how)
freq = frequencies.get_standard_freq(freq)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
asi8 = self.asi8
# mult1 can't be negative or 0
end = how == 'E'
if end:
ordinal = asi8 + mult1 - 1
else:
ordinal = asi8
new_data = period.period_asfreq_arr(ordinal, base1, base2, end)
if self.hasnans:
mask = asi8 == tslib.iNaT
new_data[mask] = tslib.iNaT
return self._simple_new(new_data, self.name, freq=freq)
def to_datetime(self, dayfirst=False):
return self.to_timestamp()
year = _field_accessor('year', 0, "The year of the period")
month = _field_accessor('month', 3, "The month as January=1, December=12")
day = _field_accessor('day', 4, "The days of the period")
hour = _field_accessor('hour', 5, "The hour of the period")
minute = _field_accessor('minute', 6, "The minute of the period")
second = _field_accessor('second', 7, "The second of the period")
weekofyear = _field_accessor('week', 8, "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 10, "The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = day_of_year = _field_accessor('dayofyear', 9, "The ordinal day of the year")
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
days_in_month = _field_accessor('days_in_month', 11, "The number of days in the month")
daysinmonth = days_in_month
def _get_object_array(self):
freq = self.freq
return np.array([ Period._from_ordinal(ordinal=x, freq=freq) for x in self.values], copy=False)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self._get_object_array()
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'int64'):
try:
other = PeriodIndex(other)
except:
return False
return np.array_equal(self.asi8, other.asi8)
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeIndex
Parameters
----------
freq : string or DateOffset, default 'D' for week or longer, 'S'
otherwise
Target frequency
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = frequencies.get_to_timestamp_base(base)
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = period.periodarr_to_dt64arr(new_data.values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
def _maybe_convert_timedelta(self, other):
if isinstance(other, (timedelta, np.timedelta64, offsets.Tick, Timedelta)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if nanos % offset_nanos == 0:
return nanos // offset_nanos
elif isinstance(other, offsets.DateOffset):
freqstr = frequencies.get_standard_freq(other)
base = frequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
elif isinstance(other, np.ndarray):
if com.is_integer_dtype(other):
return other
elif com.is_timedelta64_dtype(other):
offset = frequencies.to_offset(self.freq)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if (nanos % offset_nanos).all() == 0:
return nanos // offset_nanos
msg = "Input has different freq from PeriodIndex(freq={0})"
raise ValueError(msg.format(self.freqstr))
def _add_delta(self, other):
ordinal_delta = self._maybe_convert_timedelta(other)
return self.shift(ordinal_delta)
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
Returns
-------
shifted : PeriodIndex
"""
mask = self.values == tslib.iNaT
values = self.values + n * self.freq.n
values[mask] = tslib.iNaT
return PeriodIndex(data=values, name=self.name, freq=self.freq)
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return self.inferred_type
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = _values_from_object(series)
try:
return _maybe_box(self, super(PeriodIndex, self).get_value(s, key), series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies.Resolution.get_freq_group(reso)
freqn = frequencies.get_freq_group(self.freq)
vals = self.values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self.values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
elif grp == freqn:
key = Period(asdt, freq=self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
else:
raise KeyError(key)
except TypeError:
pass
key = Period(key, self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
if hasattr(target, 'freq') and target.freq != self.freq:
raise ValueError('target and index have different freq: '
'(%s, %s)' % (target.freq, self.freq))
return Index.get_indexer(self, target, method, limit, tolerance)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if is_integer(key):
raise
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
key = Period(key, freq=self.freq)
try:
return Index.get_loc(self, key.ordinal, method, tolerance)
except KeyError:
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string or a datetime, cast it to Period.ordinal according to
resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : string / None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, compat.string_types):
try:
_, parsed, reso = parse_time_string(label, self.freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == 'left' else 1]
except Exception:
raise KeyError(label)
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice',label)
return label
def _parsed_string_to_bounds(self, reso, parsed):
if reso == 'year':
t1 = Period(year=parsed.year, freq='A')
elif reso == 'month':
t1 = Period(year=parsed.year, month=parsed.month, freq='M')
elif reso == 'quarter':
q = (parsed.month - 1) // 3 + 1
t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC')
elif reso == 'day':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
freq='D')
elif reso == 'hour':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, freq='H')
elif reso == 'minute':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, freq='T')
elif reso == 'second':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, second=parsed.second,
freq='S')
else:
raise KeyError(key)
return (t1.asfreq(self.freq, how='start'),
t1.asfreq(self.freq, how='end'))
def _get_string_slice(self, key):
if not self.is_monotonic:
raise ValueError('Partial indexing only valid for '
'ordered time series')
key, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies.Resolution.get_freq_group(reso)
freqn = frequencies.get_freq_group(self.freq)
if reso in ['day', 'hour', 'minute', 'second'] and not grp < freqn:
raise KeyError(key)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
return slice(self.searchsorted(t1.ordinal, side='left'),
self.searchsorted(t2.ordinal, side='right'))
def _convert_tolerance(self, tolerance):
tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance)
return self._maybe_convert_timedelta(tolerance)
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
self._assert_can_do_setop(other)
result = Int64Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
return self._apply_meta(result)
def _assert_can_do_setop(self, other):
super(PeriodIndex, self)._assert_can_do_setop(other)
if not isinstance(other, PeriodIndex):
raise ValueError('can only call with other PeriodIndex-ed objects')
if self.freq != other.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise ValueError(msg)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
result = self._apply_meta(result)
result.name = name
return result
def _apply_meta(self, rawarr):
if not isinstance(rawarr, PeriodIndex):
rawarr = PeriodIndex(rawarr, freq=self.freq)
return rawarr
def __getitem__(self, key):
getitem = self._data.__getitem__
if np.isscalar(key):
val = getitem(key)
return Period(ordinal=val, freq=self.freq)
else:
if com.is_bool_indexer(key):
key = np.asarray(key)
result = getitem(key)
if result.ndim > 1:
# MPL kludge
# values = np.asarray(list(values), dtype=object)
# return values.reshape(result.shape)
return PeriodIndex(result, name=self.name, freq=self.freq)
return PeriodIndex(result, name=self.name, freq=self.freq)
def _format_native_types(self, na_rep=u('NaT'), date_format=None, **kwargs):
values = np.array(list(self), dtype=object)
mask = isnull(self.values)
values[mask] = na_rep
imask = ~mask
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: u('%s') % dt
values[imask] = np.array([formatter(dt) for dt in values[imask]])
return values
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
indices = com._ensure_platform_int(indices)
taken = self.asi8.take(indices, axis=axis)
return self._simple_new(taken, self.name, freq=self.freq)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
if isinstance(to_concat[0], PeriodIndex):
if len(set([x.freq for x in to_concat])) > 1:
# box
to_concat = [x.asobject.values for x in to_concat]
else:
cat_values = np.concatenate([x.values for x in to_concat])
return PeriodIndex(cat_values, freq=self.freq, name=name)
to_concat = [x.values if isinstance(x, Index) else x
for x in to_concat]
return Index(com._concat_compat(to_concat), name=name)
def repeat(self, n):
"""
Return a new Index of the values repeated n times.
See also
--------
numpy.ndarray.repeat
"""
# overwrites method from DatetimeIndexOpsMixin
return self._shallow_copy(self.values.repeat(n))
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(PeriodIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backcompat
self.freq = Period._maybe_convert_freq(own_state[1])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(self, state)
self._data = data
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Note
----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
def tz_localize(self, tz, infer_dst=False):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz/dateutil),
or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
infer_dst : boolean, default False
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
Note
----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
PeriodIndex._add_numeric_methods_disabled()
PeriodIndex._add_logical_methods_disabled()
PeriodIndex._add_datetimelike_methods()
def _get_ordinal_range(start, end, periods, freq, mult=1):
if com._count_not_none(start, end, periods) < 2:
raise ValueError('Must specify 2 of start, end, periods')
if freq is not None:
_, mult = _gfc(freq)
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError('Start and end must have same freq')
if ((is_start_per and start.ordinal == tslib.iNaT) or
(is_end_per and end.ordinal == tslib.iNaT)):
raise ValueError('Start and end must not be NaT')
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError('Could not infer freq from start/end')
if periods is not None:
periods = periods * mult
if start is None:
data = np.arange(end.ordinal - periods + mult,
end.ordinal + 1, mult,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods, mult,
dtype=np.int64)
else:
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
return data, freq
def _range_from_fields(year=None, month=None, quarter=None, day=None,
hour=None, minute=None, second=None, freq=None):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = 'Q'
base = frequencies.FreqGroup.FR_QTR
else:
base, mult = _gfc(freq)
if base != frequencies.FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = _quarter_to_myear(y, q, freq)
val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
base, mult = _gfc(freq)
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(period.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError('Mismatched Period array lengths')
elif length is None:
length = len(x)
arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length) for x in fields]
return arrays
def pnow(freq=None):
return Period(datetime.now(), freq=freq)
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start : starting value, period-like, optional
end : ending value, period-like, optional
periods : int, default None
Number of periods in the index
freq : str/DateOffset, default 'D'
Frequency alias
name : str, default None
Name for the resulting PeriodIndex
Returns
-------
prng : PeriodIndex
"""
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
| artistic-2.0 |
lmcinnes/umap | umap/tests/test_umap_nn.py | 1 | 5288 | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors import KDTree
from sklearn.preprocessing import normalize
from umap import distances as dist
from umap.umap_ import (
nearest_neighbors,
smooth_knn_dist,
)
# ===================================================
# Nearest Neighbour Test cases
# ===================================================
# nearest_neighbours metric parameter validation
# -----------------------------------------------
def test_nn_bad_metric(nn_data):
with pytest.raises(ValueError):
nearest_neighbors(nn_data, 10, 42, {}, False, np.random)
def test_nn_bad_metric_sparse_data(sparse_nn_data):
with pytest.raises(ValueError):
nearest_neighbors(
sparse_nn_data, 10, "seuclidean", {}, False, np.random,
)
# -------------------------------------------------
# Utility functions for Nearest Neighbour
# -------------------------------------------------
def knn(indices, nn_data): # pragma: no cover
tree = KDTree(nn_data)
true_indices = tree.query(nn_data, 10, return_distance=False)
num_correct = 0.0
for i in range(nn_data.shape[0]):
num_correct += np.sum(np.in1d(true_indices[i], indices[i]))
return num_correct / (nn_data.shape[0] * 10)
def smooth_knn(nn_data, local_connectivity=1.0):
knn_indices, knn_dists, _ = nearest_neighbors(
nn_data, 10, "euclidean", {}, False, np.random
)
sigmas, rhos = smooth_knn_dist(
knn_dists, 10.0, local_connectivity=local_connectivity
)
shifted_dists = knn_dists - rhos[:, np.newaxis]
shifted_dists[shifted_dists < 0.0] = 0.0
vals = np.exp(-(shifted_dists / sigmas[:, np.newaxis]))
norms = np.sum(vals, axis=1)
return norms
@pytest.mark.skip()
def test_nn_descent_neighbor_accuracy(nn_data): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
nn_data, 10, "euclidean", {}, False, np.random
)
percent_correct = knn(knn_indices, nn_data)
assert (
percent_correct >= 0.85
), "NN-descent did not get 89% accuracy on nearest neighbors"
@pytest.mark.skip()
def test_nn_descent_neighbor_accuracy_low_memory(nn_data): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
nn_data, 10, "euclidean", {}, False, np.random, low_memory=True
)
percent_correct = knn(knn_indices, nn_data)
assert (
percent_correct >= 0.89
), "NN-descent did not get 89% accuracy on nearest neighbors"
@pytest.mark.skip()
def test_angular_nn_descent_neighbor_accuracy(nn_data): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
nn_data, 10, "cosine", {}, True, np.random
)
angular_data = normalize(nn_data, norm="l2")
percent_correct = knn(knn_indices, angular_data)
assert (
percent_correct >= 0.85
), "NN-descent did not get 89% accuracy on nearest neighbors"
@pytest.mark.skip()
def test_sparse_nn_descent_neighbor_accuracy(sparse_nn_data): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
sparse_nn_data, 20, "euclidean", {}, False, np.random
)
percent_correct = knn(knn_indices, sparse_nn_data.todense())
assert (
percent_correct >= 0.75
), "Sparse NN-descent did not get 90% accuracy on nearest neighbors"
@pytest.mark.skip()
def test_sparse_nn_descent_neighbor_accuracy_low_memory(
sparse_nn_data,
): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
sparse_nn_data, 20, "euclidean", {}, False, np.random, low_memory=True
)
percent_correct = knn(knn_indices, sparse_nn_data.todense())
assert (
percent_correct >= 0.85
), "Sparse NN-descent did not get 90% accuracy on nearest neighbors"
@pytest.mark.skip()
def test_nn_descent_neighbor_accuracy_callable_metric(nn_data): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
nn_data, 10, dist.euclidean, {}, False, np.random
)
percent_correct = knn(knn_indices, nn_data)
assert (
percent_correct >= 0.95
), "NN-descent did not get 95% accuracy on nearest neighbors with callable metric"
@pytest.mark.skip()
def test_sparse_angular_nn_descent_neighbor_accuracy(
sparse_nn_data,
): # pragma: no cover
knn_indices, knn_dists, _ = nearest_neighbors(
sparse_nn_data, 20, "cosine", {}, True, np.random
)
angular_data = normalize(sparse_nn_data, norm="l2").toarray()
percent_correct = knn(knn_indices, angular_data)
assert (
percent_correct >= 0.90
), "Sparse NN-descent did not get 90% accuracy on nearest neighbors"
def test_smooth_knn_dist_l1norms(nn_data):
norms = smooth_knn(nn_data)
assert_array_almost_equal(
norms,
1.0 + np.log2(10) * np.ones(norms.shape[0]),
decimal=3,
err_msg="Smooth knn-dists does not give expected" "norms",
)
def test_smooth_knn_dist_l1norms_w_connectivity(nn_data):
norms = smooth_knn(nn_data, local_connectivity=1.75)
assert_array_almost_equal(
norms,
1.0 + np.log2(10) * np.ones(norms.shape[0]),
decimal=3,
err_msg="Smooth knn-dists does not give expected"
"norms for local_connectivity=1.75",
)
| bsd-3-clause |
lukeiwanski/tensorflow-opencl | tensorflow/examples/learn/iris_custom_model.py | 50 | 2613 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
xinghalo/DMInAction | src/tensorflow/LN.py | 1 | 1926 | import matplotlib.pyplot as plt
import numpy
from src import tensorflow as tf
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 2000
display_step = 50
# Training Data
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Create Model
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
activation = tf.add(tf.multiply(X, W), b)
# Minimize the squared errors
cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
print("cost=", sess.run(cost, feed_dict={X: train_X, Y: train_Y}), \
"W=", sess.run(W), "b=", sess.run(b))
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
writer = tf.summary.FileWriter("/log",tf.get_default_graph())
writer.close() | apache-2.0 |
spallavolu/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
has2k1/plotnine | plotnine/animation.py | 1 | 6744 | from copy import copy
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import ArtistAnimation
from .exceptions import PlotnineError
class PlotnineAnimation(ArtistAnimation):
"""
Animation using ggplot objects
Parameters
----------
plots : iterable
ggplot objects that make up the the frames of the animation
interval : number, optional
Delay between frames in milliseconds. Defaults to 200.
repeat_delay : number, optional
If the animation in repeated, adds a delay in milliseconds
before repeating the animation. Defaults to `None`.
repeat : bool, optional
Controls whether the animation should repeat when the sequence
of frames is completed. Defaults to `True`.
blit : bool, optional
Controls whether blitting is used to optimize drawing. Defaults
to `False`.
Notes
-----
1. The plots should have the same `facet` and
the facet should not have fixed x and y scales.
2. The scales of all the plots should have the same limits. It is
a good idea to create a scale (with limits) for each aesthetic
and add them to all the plots.
3. For plots with legends or any other features that are cutoff,
use the :class:`~plotnine.themes.themeable.subplots_adjust`
themeable to create space for it.
"""
def __init__(self, plots, interval=200, repeat_delay=None,
repeat=True, blit=False):
figure, artists = self._draw_plots(plots)
ArtistAnimation.__init__(
self,
figure,
artists,
interval=interval,
repeat_delay=repeat_delay,
repeat=repeat,
blit=blit
)
def _draw_plots(self, plots):
with pd.option_context('mode.chained_assignment', None):
return self.__draw_plots(plots)
def __draw_plots(self, plots):
"""
Plot and return the figure and artists
Parameters
----------
plots : iterable
ggplot objects that make up the the frames of the animation
Returns
-------
figure : matplotlib.figure.Figure
Matplotlib figure
artists : list
List of :class:`Matplotlib.artist.artist`
"""
# For keeping track of artists for each frame
artist_offsets = {
'collections': [],
'patches': [],
'lines': [],
'texts': [],
'artists': []
}
scale_limits = dict()
def initialise_artist_offsets(n):
"""
Initilise artists_offsets arrays to zero
Parameters
----------
n : int
Number of axes to initialise artists for.
The artists for each axes are tracked separately.
"""
for artist_type in artist_offsets:
artist_offsets[artist_type] = [0] * n
def get_frame_artists(plot):
"""
Parameters
----------
plot : ggplot
Drawn ggplot object from which to extract
artists.
"""
# The axes accumulate artists for all frames
# For each frame we pickup the newly added artists
# We use offsets to mark the end of the previous frame
# e.g ax.collections[start:]
frame_artists = []
for i, ax in enumerate(plot.axs):
for name in artist_offsets:
start = artist_offsets[name][i]
new_artists = getattr(ax, name)[start:]
frame_artists.extend(new_artists)
artist_offsets[name][i] += len(new_artists)
return frame_artists
def set_scale_limits(plot):
"""
Set limits of all the scales in the animation
Should be called before :func:`check_scale_limits`.
Parameters
----------
plot : ggplot
First ggplot object that has been drawn
"""
for sc in plot.scales:
ae = sc.aesthetics[0]
scale_limits[ae] = sc.limits
def check_scale_limits(plot, frame_no):
"""
Check limits of the scales of a plot in the animation
Raises a PlotnineError if any of the scales has limits
that do not match those of the first plot/frame.
Should be called after :func:`set_scale_limits`.
Parameters
----------
plot : ggplot
ggplot object that has been drawn
frame_no : int
Frame number
"""
if len(scale_limits) != len(plot.scales):
raise PlotnineError(
"All plots must have the same number of scales "
"as the first plot of the animation."
)
for sc in plot.scales:
ae = sc.aesthetics[0]
if ae not in scale_limits:
raise PlotnineError(
"The plot for frame {} does not have a scale "
"for the {} aesthetic.".format(frame_no, ae)
)
if sc.limits != scale_limits[ae]:
raise PlotnineError(
"The {} scale of plot for frame {} has different "
"limits from those of the first frame."
"".format(ae, frame_no)
)
figure = None
axs = None
artists = []
# The first ggplot creates the figure, axes and the initial
# frame of the animation. The rest of the ggplots draw
# onto the figure and axes created by the first ggplot and
# they create the subsequent frames.
for frame_no, p in enumerate(plots):
if figure is None:
figure, plot = p.draw(return_ggplot=True)
axs = plot.axs
initialise_artist_offsets(len(axs))
set_scale_limits(plot)
else:
p = copy(p)
plot = p._draw_using_figure(figure, axs)
try:
check_scale_limits(plot, frame_no)
except PlotnineError as err:
plt.close(figure)
raise err
artists.append(get_frame_artists(plot))
if figure is None:
figure = plt.figure()
# Prevent Jupyter from plotting any static figure
plt.close(figure)
return figure, artists
| gpl-2.0 |
jjs0sbw/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/finance.py | 3 | 23033 | """
A collection of modules for collecting, analyzing and plotting
financial data. User contributions welcome!
"""
#from __future__ import division
import os, warnings
from urllib2 import urlopen
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
import datetime
import numpy as np
from matplotlib import verbose, get_configdir
from matplotlib.dates import date2num
from matplotlib.cbook import iterable
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.colors import colorConverter
from matplotlib.lines import Line2D, TICKLEFT, TICKRIGHT
from matplotlib.patches import Rectangle
from matplotlib.transforms import Affine2D
configdir = get_configdir()
cachedir = os.path.join(configdir, 'finance.cache')
stock_dt = np.dtype([('date', object),
('year', np.int16),
('month', np.int8),
('day', np.int8),
('d', np.float), # mpl datenum
('open', np.float),
('close', np.float),
('high', np.float),
('low', np.float),
('volume', np.float),
('aclose', np.float)])
def parse_yahoo_historical(fh, adjusted=True, asobject=False):
"""
Parse the historical data in file handle fh from yahoo finance.
*adjusted*
If True (default) replace open, close, high, and low prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
*asobject*
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, close, high, low, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, close, high, low,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
"""
lines = fh.readlines()
results = []
datefmt = '%Y-%m-%d'
for line in lines[1:]:
vals = line.split(',')
if len(vals)!=7:
continue # add warning?
datestr = vals[0]
#dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
# Using strptime doubles the runtime. With the present
# format, we don't need it.
dt = datetime.date(*[int(val) for val in datestr.split('-')])
dnum = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = float(vals[5])
aclose = float(vals[6])
results.append((dt, dt.year, dt.month, dt.day,
dnum, open, close, high, low, volume, aclose))
results.reverse()
d = np.array(results, dtype=stock_dt)
if adjusted:
scale = d['aclose'] / d['close']
scale[np.isinf(scale)] = np.nan
d['open'] *= scale
d['close'] *= scale
d['high'] *= scale
d['low'] *= scale
if not asobject:
# 2-D sequence; formerly list of tuples, now ndarray
ret = np.zeros((len(d), 6), dtype=np.float)
ret[:,0] = d['d']
ret[:,1] = d['open']
ret[:,2] = d['close']
ret[:,3] = d['high']
ret[:,4] = d['low']
ret[:,5] = d['volume']
if asobject is None:
return ret
return [tuple(row) for row in ret]
return d.view(np.recarray) # Close enough to former Bunch return
def fetch_historical_yahoo(ticker, date1, date2, cachename=None,dividends=False):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are date or datetime instances, or (year, month, day) sequences.
Ex:
fh = fetch_historical_yahoo('^GSPC', (2000, 1, 1), (2001, 12, 31))
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
set dividends=True to return dividends instead of price data. With
this option set, parse functions will not work
a file handle is returned
"""
ticker = ticker.upper()
if iterable(date1):
d1 = (date1[1]-1, date1[2], date1[0])
else:
d1 = (date1.month-1, date1.day, date1.year)
if iterable(date2):
d2 = (date2[1]-1, date2[2], date2[0])
else:
d2 = (date2.month-1, date2.day, date2.year)
if dividends:
g='v'
verbose.report('Retrieving dividends instead of prices')
else:
g='d'
urlFmt = 'http://table.finance.yahoo.com/table.csv?a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=%s&ignore=.csv'
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker, g)
if cachename is None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if os.path.exists(cachename):
fh = file(cachename)
verbose.report('Using cachefile %s for %s'%(cachename, ticker))
else:
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
urlfh = urlopen(url)
fh = file(cachename, 'w')
fh.write(urlfh.read())
fh.close()
verbose.report('Saved %s data to cache file %s'%(ticker, cachename))
fh = file(cachename, 'r')
return fh
def quotes_historical_yahoo(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None):
"""
Get historical data for ticker between date1 and date2. date1 and
date2 are datetime instances or (year, month, day) sequences.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Ex:
sp = f.quotes_historical_yahoo('^GSPC', d1, d2,
asobject=True, adjusted=True)
returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
[n,bins,patches] = hist(returns, 100)
mu = mean(returns)
sigma = std(returns)
x = normpdf(bins, mu, sigma)
plot(bins, x, color='red', lw=2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
"""
# Maybe enable a warning later as part of a slow transition
# to using None instead of False.
#if asobject is False:
# warnings.warn("Recommend changing to asobject=None")
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try:
ret = parse_yahoo_historical(fh, asobject=asobject,
adjusted=adjusted)
if len(ret) == 0:
return None
except IOError, exc:
warnings.warn('fh failure\n%s'%(exc.strerror[1]))
return None
return ret
def plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""
quotes is a sequence of (time, open, close, high, low, ...) sequences
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
time must be in float date format - see date2num
ax : an Axes instance to plot to
ticksize : open/close tick marker in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
lines = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open : color = colorup
else : color = colordown
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(
xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(
xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
quotes is a sequence of (time, open, close, high, low, ...) sequences.
As long as the first 5 elements are these values,
the record can be as long as you want (eg it may store volume).
time must be in float days format - see date2num
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
ax : an Axes instance to plot to
width : fraction of a day for the rectangle width
colorup : the color of the rectangle where close >= open
colordown : the color of the rectangle where close < open
alpha : the rectangle alpha level
return value is lines, patches where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width/2.0
lines = []
patches = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open :
color = colorup
lower = open
height = close-open
else :
color = colordown
lower = close
height = open-close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color='k',
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy = (t-OFFSET, lower),
width = width,
height = height,
facecolor = color,
edgecolor = color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def plot_day_summary2(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
ax : an Axes instance to plot to
ticksize : size of open and close ticks in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [ ((-ticksize, 0), (0, 0)) ]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [ ((0, 0), (ticksize, 0)) ]
offsetsOpen = [ (i, open) for i, open in zip(xrange(len(opens)), opens) if open != -1 ]
offsetsClose = [ (i, close) for i, close in zip(xrange(len(closes)), closes) if close != -1 ]
scale = ax.figure.dpi * (1.0/72.0)
tickTransform = Affine2D().scale(scale, 0.0)
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,1
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,1
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(rangeSegments)==len(offsetsOpen))
assert(len(offsetsOpen)==len(offsetsClose))
assert(len(offsetsClose)==len(colors))
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors = colors,
linewidths = lw,
antialiaseds = useAA,
)
openCollection = LineCollection(openSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsOpen,
transOffset = ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsClose,
transOffset = ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""
Represent the open, close as a bar line and high low range as a
vertical line.
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
return value is lineCollection, barCollection
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
delta = width/2.
barVerts = [ ( (i-delta, open), (i-delta, close), (i+delta, close), (i+delta, open) ) for i, open, close in zip(xrange(len(opens)), opens, closes) if open != -1 and close!=-1 ]
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(barVerts)==len(rangeSegments))
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors = ( (0,0,0,1), ),
linewidths = lw,
antialiaseds = useAA,
)
barCollection = PolyCollection(barVerts,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
ax.add_collection(rangeCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
delta = width/2.
bars = [ ( (i-delta, 0), (i-delta, v), (i+delta, v), (i+delta, 0)) for i, v in enumerate(volumes) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = (0,),
linewidths = (0.5,),
)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
nb: first point is not displayed - it is used only for choosing the
right color
"""
return volume_overlay(ax,closes[:-1],closes[1:],volumes[1:],colorup,colordown,width,alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. quotes is a list of (d,
open, close, high, low, volume) and close-open is used to
determine the color of the bar
kwarg
width : the bar width in points
colorup : the color of the lines where close1 >= close0
colordown : the color of the lines where close1 < close0
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
dates, opens, closes, highs, lows, volumes = zip(*quotes)
colors = [colord[close1>=close0] for close0, close1 in zip(closes[:-1], closes[1:]) if close0!=-1 and close1 !=-1]
colors.insert(0,colord[closes[0]>=opens[0]])
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, volume), (right, volume), (right, 0)) for d, open, close, high, low, volume in quotes]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
dates = [d for d, open, close, high, low, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, close, high, low, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.bounds
#print 'viewlim', ax.viewLim.bounds
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""
Add a bar collection graph with height vals (-1 is missing).
ax : an Axes instance to plot to
width : the bar width in points
alpha : bar transparency
"""
facecolors = (colorConverter.to_rgba(facecolor, alpha),)
edgecolors = (colorConverter.to_rgba(edgecolor, alpha),)
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, v), (right, v), (right, 0)) for v in vals if v != -1 ]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
offsetsBars = [ (i, 0) for i,v in enumerate(vals) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = facecolors,
edgecolors = edgecolors,
antialiaseds = (0,),
linewidths = (0.5,),
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v!=-1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
| gpl-3.0 |
rajat1994/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
arokem/MRS-old | MRS/tests/test_analysis.py | 1 | 3134 | import os
import tempfile
import numpy as np
import numpy.testing as npt
import matplotlib
matplotlib.use('agg')
import nitime as nt
import nibabel as nib
import MRS
import MRS.utils as ut
import MRS.analysis as ana
test_path = os.path.join(MRS.__path__[0], 'tests')
file_name = os.path.join(test_path, 'pure_gaba_P64024.nii.gz')
def test_separate_signals():
"""
Test separation of signals for water-suppressed and other signals
"""
data = np.transpose(nib.load(file_name).get_data(), [1,2,3,4,5,0]).squeeze()
w_data, w_supp_data = ana.separate_signals(data)
# Very simple sanity checks
npt.assert_equal(w_data.shape[-1], data.shape[-1])
npt.assert_equal(w_supp_data.shape[-1], data.shape[-1])
npt.assert_array_equal(data[1], w_data[0])
def test_coil_combine():
"""
Test combining of information from different coils
"""
data = np.transpose(nib.load(file_name).get_data(), [1,2,3,4,5,0]).squeeze()
w_data, w_supp_data = ana.coil_combine(data)
# Make sure that the time-dimension is still correct:
npt.assert_equal(w_data.shape[-1], data.shape[-1])
npt.assert_equal(w_supp_data.shape[-1], data.shape[-1])
# Check that the phase for the first data point is approximately the
# same and approximately 0 for all the water-channels:
npt.assert_array_almost_equal(np.angle(w_data)[:,:,0],
np.zeros_like(w_data[:,:,0]),
decimal=1) # We're not being awfully strict
# about it.
def test_get_spectra():
"""
Test the function that does the spectral analysis
"""
data = np.transpose(nib.load(file_name).get_data(), [1,2,3,4,5,0]).squeeze()
w_data, w_supp_data = ana.coil_combine(data)
# XXX Just basic smoke-testing for now:
f_nonw, nonw_sig1 = ana.get_spectra(nt.TimeSeries(w_supp_data,
sampling_rate=5000))
f_nonw, nonw_sig2 = ana.get_spectra(nt.TimeSeries(w_supp_data,
sampling_rate=5000),
line_broadening=5)
f_nonw, nonw_sig3 = ana.get_spectra(nt.TimeSeries(w_supp_data,
sampling_rate=5000),
line_broadening=5,
zerofill=1000)
def test_mrs_analyze():
"""
Test the command line utility
"""
mrs_path = MRS.__path__[0]
out_name = tempfile.NamedTemporaryFile().name
# Check that it runs through:
cmd = '%s/../bin/mrs-analyze.py %s '%(mrs_path, file_name)
npt.assert_equal(os.system(cmd),0)
# XXX We might want to analyze the output file here...
def test_bootstrap_stat():
"""
Test simple bootstrapping statistics
"""
rand_array = np.random.randn(100, 1000)
arr_mean, mean_ci = ana.bootstrap_stat(rand_array)
npt.assert_array_equal(np.mean(rand_array, 0), arr_mean)
arr_var, var_ci = ana.bootstrap_stat(rand_array, stat=np.var)
npt.assert_array_equal(np.var(rand_array, 0), arr_var)
| mit |
Sperlea/cittex | main.py | 1 | 18931 |
# 11.04.16
# This is a tool for literature - or rather citation organization. It is based on the widely used bibtex format, which
# makes it easy to use it with applications such as Latex. Quotes are in the center of this program. The user can append
# quotes to every publication and these quotes are indexed by keywords, contain a short summary and
# the text of the quote, all as provided by the user.
import Publication
import requests
import textwrap
import unicodedata
import arxiv2bibtex
from collections import Counter
import isbnlib
from isbnlib.registry import bibformatters
from textblob import TextBlob as tb
from tqdm import tqdm
import math
import inspect
class Library(object):
def __init__(self, papers, keywords, loc):
self.textblobcorpus = []
self.publications = papers
self.keywords = keywords
self.location = loc
self.typedict = {subclass.type_of_publication: subclass for subclass in Publication.Publication.__subclasses__()}
if papers:
self.latest_paper = self.publications[len(self.publications) - 1]
self.brainmodules = []
def __add__(self, other):
newpubs = list(set(self.publications + other.publications))
newkeywords = self.keywords + other.keywords
newlib = Library(newpubs, newkeywords, self.location)
for bm in self.brainmodules:
newlib.add_brainmodule(bm)
return newlib
def add_brainmodule(self, brainmodule):
self.brainmodules.append(brainmodule)
def append_publication(self, publication):
self.publications.append(publication)
self.latest_paper = publication
def list_years(self):
yearlist = []
for paper in self.publications:
yearlist.append(paper.year)
return yearlist
def get_oldest_publication(self):
min_year = 3000
oldest_paper = None
for paper in self.publications:
if int(paper.year) < min_year:
min_year = int(paper.year)
oldest_paper = paper
print(oldest_paper)
def add_publication_with_doi(self, doi):
bibtex = self._get_bibtex_from_internet(doi)
self.add_publication_from_bibtex(bibtex)
def add_publication_from_arxiv(self, arxivid):
bibtex = arxiv2bibtex.arxiv2bib([arxivid])[0]
bibtex = bibtex.bibtex().replace(bibtex.id+",", bibtex.authors[0].split(" ")[-1] + "_" + bibtex.year + ",")
self.add_publication_from_bibtex([b for b in bibtex.split(",\n")])
def add_publication_from_bibtex(self, bibtex):
try:
if "@techreport{" in bibtex[0]:
bibtex = self._turn_techreport_to_type(bibtex, Publication.Article)
new_paper = self.typedict[bibtex[0].split("{")[0][1:]](bibtex, "READ_BIBTEX", self)
if not self._already_contains_publication(new_paper):
oldshorties = [b.short_identifier for b in self.publications if new_paper.short_identifier in b.short_identifier]
if oldshorties:
try:
oldletter = new_paper.short_identifier.split("_")[1][4]
newshort_identifier = new_paper.short_identifier[:-1] + chr(ord(oldletter)+1)
except IndexError:
newshort_identifier = new_paper.short_identifier +"a"
new_paper.bibtex = new_paper.bibtex.replace(new_paper.short_identifier, newshort_identifier)
self.add_publication_from_bibtex(new_paper.bibtex.split(",\n"))
else:
self.append_publication(new_paper)
else:
print("This paper is already in the Library.")
self.latest_paper = new_paper
except TypeError:
print("ERROR: This doi couldn't be resolved. Skipping...")
def add_publication_from_isbn(self, isbn):
isbnlib.config.add_apikey("isbndb", "2TCD2CVI")
bibtex = bibformatters['bibtex']
data = isbnlib.meta(isbn, "isbndb")
for part in data:
if not data[part]:
data[part] = input("Missing Value! Please input value for the field " + part + ". ")
data["ISBN-13"] = data["Authors"][0].split(", ")[0] + "_" + str(data["Year"])
new_bibtex = bibtex(data).replace(" ", "").replace("\n ", "\n").split("\n")
self.add_publication_from_bibtex(new_bibtex)
def save(self):
self.export_as_bibtex(self.location)
for bm in self.brainmodules:
bm.save()
def _already_contains_publication(self, new_paper):
found = False
for piece in self.publications:
if piece.short_identifier == new_paper.short_identifier:
if piece.title.upper().replace(".", "") == new_paper.title.upper().replace(".", ""):
found = True
break
return found
def export_as_bibtex(self, location, verbose = True):
handle = open(location, "w")
#TODO: Use tqdm instead of all print statements
for paper in tqdm(self.publications, desc = "Saving as BIBTEX... ", unit = " paper", disable = not verbose,
bar_format = "{l_bar}{bar}| {n_fmt}/{total_fmt} paper\n"):
handle.write(paper.as_bibtex_with_quotes())
print()
def search_in_quotes(self, query):
counter = 0
for paper in self.publications:
for cit in paper.quotes:
if query in cit.text:
print("Query: " + query + " - hit " + str(counter))
print(cit)
print()
counter += 1
def _get_bibtex_from_internet(self, doi):
bibtex_text = self._doi2bibtex(doi)
if "title = {" in bibtex_text:
output = []
for line in bibtex_text.split(",\n"):
output.append(line)
return output
def _doi2bibtex(self, doi):
# Gets the information about a given publication from the internet in bibtex format
# adapted from http://www.michelepasin.org/blog/2014/12/03/dereference-a-doi-using-python/
self.headers = {'accept': 'application/x-bibtex'}
if doi.startswith("http://"):
url = doi
else:
url = "http://dx.doi.org/" + doi
r = requests.get(url, headers=self.headers)
return r.text
def list_publications(self):
'Lists the publications that are saved in the bibliography.'
for counter, paper in enumerate(self.publications):
print(counter + 1, ".", paper.authors, paper.year, "--", paper.title)
def list_keywords(self):
'Lists the keywords that are attached to quotes from the publications in the bibliography.'
output = []
for keyword in self.keywords.words:
output.append(keyword + "\t -- Number: " + str(len(self.keywords.words[keyword])))
output = sorted(output, key=lambda v: v.upper())
for counter, line in enumerate(output):
print(counter, line)
def list_authors(self):
'Lists all the authors whose work is present in the bibliography.'
def is_this_author_new(this_author_list, this_author):
output = True
for aut in this_author_list:
if aut[0] == this_author:
output = False
break
return output
def add_paper_to_author(this_author_list, this_author, this_paper):
for counter, __ in enumerate(this_author_list):
if this_author_list[counter][0] in this_author:
tmp = this_author_list[counter][1] + ", " + this_paper.short_identifier
this_author_list[counter] = (this_author_list[counter][0], tmp)
return this_author_list
author_list = []
for paper in self.publications:
for author in paper.authors.split(" and "):
author = author.split(" ")[len(author.split(" "))-1]
if is_this_author_new(author_list, author):
author_list.append((author, paper.short_identifier))
else:
author_list = add_paper_to_author(author_list, author, paper)
print("Found " + str(len(author_list)) + " authors in " + str(len(self.publications)) + " publications.")
for line in sorted(author_list):
print(line[0] + "\t- " + line[1])
def quotes_with_keyword(self, chosen_keyword):
'Lists all quotes that have the chosen keyword attached to them.'
print("Chosen keyword: " + chosen_keyword)
for counter, cit in enumerate(self.keywords.words[chosen_keyword]):
print(counter, cit.summary, "\t||\t" + cit.publication.short_identifier + "\t||\t" + cit.publication.title)
def read_full_quote(self, chosen_keyword, index):
'Shows the full quote specified by the chosen keyword and the index. Chosen keyword and index must be separated by " ".'
print("Chosen keyword: " + chosen_keyword)
print("Summary: " + self.keywords.words[chosen_keyword][index].summary)
print("Keywords: " + self.keywords.words[chosen_keyword][index].keywords)
print("Paper: " + str(self.keywords.words[chosen_keyword][index].publication))
print("Short: " + self.keywords.words[chosen_keyword][index].publication.short_identifier)
print(textwrap.fill(self.keywords.words[chosen_keyword][index].text, 100))
def show_publications_with_keyword(self, chosen_keyword):
print("Chosen keyword: " + chosen_keyword)
relevant_papers = []
for cit in self.keywords.words[chosen_keyword]:
relevant_papers.append(cit.paper)
output = list(set(relevant_papers))
for counter in range(0, len(output)):
print(counter, output[counter])
def add_a_publication(self):
'Is used to add a paper to the bibliography via doi or manually'
doi = input("Please input doi. If no doi is available, leave empty. ")
if doi:
print(doi)
self.add_publication_with_doi(doi)
for bm in self.brainmodules:
bm.add_paper(self.latest_paper)
else:
arxivid = input("Please input arxiv ID. If no doi is available, leave empty. ")
if arxivid:
self.add_publication_from_arxiv(arxivid)
for bm in self.brainmodules:
bm.add_paper(self.latest_paper)
else:
isbn = input("Please input ISBN-13. If no doi is available, leave empty. ")
if isbn:
self.add_publication_from_isbn(isbn)
for bm in self.brainmodules:
bm.add_paper(self.latest_paper)
else:
type_short = input("Type of publication. 'a' for article, 'b' for book, 'i' for inbook: ")
if type_short is 'a':
new_publication = Publication.Article(None, "READ_BIBTEX_INPUT", self)
elif type_short is 'b':
new_publication = Publication.Book(None, "READ_BIBTEX_INPUT", self)
elif type_short is "i":
new_publication = Publication.InBook(None, "READ_BIBTEX_INPUT", self)
else:
new_publication = Publication.Publication(None, "READ_BIBTEX_INPUT", self)
if not self._already_contains_publication(new_publication):
self.append_publication(new_publication)
for bm in self.brainmodules:
bm.add_paper(new_publication)
else:
print("This paper is already in the Library.")
self.latest_paper = new_publication
#TODO: Write a function that plots the authors as network. But also count the number of non-connected networks.
def _turn_techreport_to_type(self, bibtex, new_type):
bibtex.insert(1, "\tjournal = {bioRxiv}")
fields = [line.strip().split(" = ")[0] for line in bibtex]
if len([part for part in new_type.required_fields if part in fields]) == 4:
bibtex[0] = bibtex[0].replace("techreport", new_type.type_of_publication)
return bibtex
def find_double_keywords(self):
for i, paper in enumerate(self.publications):
print(i, paper)
for j, quote in enumerate(paper.quotes):
doubles = [kword for kword in Counter(quote.keywords.split(", "))
if Counter(quote.keywords.split(", "))[kword] > 1]
if doubles:
print(doubles)
def update_brain_modules(self, paper):
for bm in self.brainmodules:
bm.add_information_on_paper(paper)
def plot_years(self):
import matplotlib.pyplot as plt
import seaborn as sns
years = [int(pub.year) for pub in self.publications]
plt.hist(years, bins=(max(years) - min(years)) + 1)
plt.show()
def loop_through_fulltext_quotes(self, chosen_keyword):
print("Chosen keyword: " + chosen_keyword)
for counter, cit in enumerate(self.keywords.words[chosen_keyword]):
print(str(counter) + " of " + str(len(self.keywords.words[chosen_keyword])))
print("Summary: " + self.keywords.words[chosen_keyword][counter].summary)
print("Keywords: " + self.keywords.words[chosen_keyword][counter].keywords)
print("Paper: " + str(self.keywords.words[chosen_keyword][counter].publication))
print("Short: " + self.keywords.words[chosen_keyword][counter].publication.short_identifier)
print(textwrap.fill(self.keywords.words[chosen_keyword][counter].text, 100))
input("(enter)")
print("")
def get_quoteless_papers(self):
return [paper for paper in self.publications if len(paper.quotes) == 0]
def remove_paper(self, paper):
papernumber = [i for i, pap in enumerate(self.publications) if pap == paper][0]
del self.publications[papernumber]
def move_quoteless_papers_to_bookshelf_of_shame(self):
bos = [brain for brain in self.brainmodules if getattr(brain, "add_without_mother", None)][0]
for i, pub in enumerate(self.get_quoteless_papers()[::-1]):
answer = input("Do you want to keep '" + pub.title + "'? (y/n) ")
if answer == "y":
bos.add_without_mother(pub)
else:
None
self.remove_paper(pub)
self.save()
class Citation(object):
def __init__(self, block_of_text, publication, key):
if publication.is_booklike:
self.publication = publication
self.summary = block_of_text[0]
self.keywords = block_of_text[1]
self.logic = block_of_text[2]
self.pages = block_of_text[3]
self.text = block_of_text[4]
publication.Biblio.textblobcorpus.append(tb(self.text))
for word in self.keywords.split(", "):
key.add_word(word, self)
else:
self.publication = publication
self.summary = block_of_text[0]
self.keywords = block_of_text[1]
self.logic = block_of_text[2]
self.text = block_of_text[3]
publication.Biblio.textblobcorpus.append(tb(self.text))
for word in self.keywords.split(", "):
key.add_word(word, self)
def __repr__(self):
output = "Summary: " + self.summary + "\nKeywords: " + self.keywords
try:
output += "\nPages: " + self.pages + "\nPaper: " + str(self.publication) + "\n" + self.text
except:
output += "\nPaper: " + str(self.publication) + "\n" + self.text
return output
def __eq__(self, other):
return self.text is other.text
def __hash__(self):
return hash(str(self.text))
def _to_bibtex_string(self):
line = "\tquote = {" + self.summary + "__" + self.keywords + "__" + self.logic + "__"
try:
line += self.pages + "__" + self.text.replace("\n", "") + "},"
except:
line += self.text.replace("\n", "") + "},"
return line
def _in_list(self):
return "\t" + self.__repr__().replace("\n", "\n\t")
def _add_logic(self, line):
self.logic = line
class Keywords(object):
def __init__(self, words = {}):
self.words = words
def __add__(self, other):
return Keywords({**self.words, **other.words})
def add_word(self, word, quote):
if word in self.words:
self.words[word].append(quote)
else:
self.words[word] = [quote]
def overwrite(self, words):
self.words = words
def replace_keyword(self, old_kw, new_kw):
for citation in self.words[old_kw]:
citation.keywords = citation.keywords.replace(old_kw, new_kw)
citation.keywords = ", ".join(list(set(citation.keywords.split(", "))))
self.words[new_kw].extend(self.words.pop(old_kw))
self.words[new_kw] = list(set(self.words[new_kw]))
class BrainModule():
## A sort of API for brain modules, which will be able to be added to a literature organizer and standard functions
# will automatically call functions that are implemented in the modules
def save(self):
raise NotImplementedError
def add_information_on_paper(self, paper):
raise NotImplementedError
def add_paper(self, paper):
raise NotImplementedError
def read_bibtex(location):
file = open(location, "r")
record = []
bib = open_empty_library(location, key=Keywords())
for i, line in enumerate(file):
if "quote = {" in line:
record.append(line.replace(",\n", "").replace("\n", ""))
else:
record.append(line.replace(",\n", "").replace("\n", "").replace(", ", ""))
if line == "}\n" or line == "}":
publication = bib.typedict[record[0].split("{")[0][1:]](record, "READ_BIBTEX", bib)
bib.append_publication(publication)
record = []
return bib
def open_empty_library(location = None, key = Keywords()):
return Library([], key, location)
def caseless_equal(left, right):
return unicodedata.normalize("NFKD", left.casefold()) == unicodedata.normalize("NFKD", right.casefold())
# store builtin print
old_print = print
def new_print(*args, **kwargs):
# if tqdm.tqdm.write raises error, use builtin print
try:
tqdm.write(*args, **kwargs)
except:
old_print(*args, ** kwargs)
# globaly replace print with new_print
inspect.builtins.print = new_print | gpl-3.0 |
jpzk/evopy | evopy/examples/experiments/constraints_dsessvc/simulate.py | 1 | 2681 | '''
This file is part of evopy.
Copyright 2012 - 2013, Jendrik Poloczek
evopy is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
evopy is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with
evopy. If not, see <http://www.gnu.org/licenses/>.
'''
from sys import path
path.append("../../../..")
from pickle import dump
from copy import deepcopy
from numpy import matrix, log10
from evopy.strategies.ori_dses_svc_repair import ORIDSESSVCR
from evopy.strategies.ori_dses_svc import ORIDSESSVC
from evopy.strategies.ori_dses import ORIDSES
from evopy.simulators.simulator import Simulator
from evopy.external.playdoh import map as pmap
from evopy.problems.sphere_problem_origin_r1 import SphereProblemOriginR1
from evopy.problems.sphere_problem_origin_r2 import SphereProblemOriginR2
from evopy.problems.schwefels_problem_26 import SchwefelsProblem26
from evopy.problems.tr_problem import TRProblem
from evopy.metamodel.dses_svc_linear_meta_model import DSESSVCLinearMetaModel
from sklearn.cross_validation import KFold
from evopy.operators.scaling.scaling_standardscore import ScalingStandardscore
from evopy.operators.scaling.scaling_dummy import ScalingDummy
from evopy.metamodel.cv.svc_cv_sklearn_grid_linear import SVCCVSkGridLinear
from evopy.operators.termination.or_combinator import ORCombinator
from evopy.operators.termination.accuracy import Accuracy
from evopy.operators.termination.generations import Generations
from evopy.operators.termination.convergence import Convergence
from os.path import exists
from os import mkdir
from setup import *
# create simulators
for problem in problems:
optimizer = optimizers[problem]
simulators_op = []
for i in range(0, samples):
simulator = Simulator(optimizer(), problem(), termination)
simulators_op.append(simulator)
simulators[problem] = simulators_op
simulate = lambda simulator : simulator.simulate()
# run simulators
for problem in problems:
resulting_simulators = pmap(simulate, simulators[problem])
for simulator in resulting_simulators:
cfcs[problem].append(simulator.logger.all()['count_cfc'])
if not exists("output/"):
mkdir("output/")
cfcs_file = open("output/cfcs_file.save", "w")
dump(cfcs, cfcs_file)
cfcs_file.close()
| gpl-3.0 |
nvoron23/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 35 | 11709 | try:
# Python 2 compat
reload
except NameError:
# Regular Python 3+ import
from importlib import reload
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
clf.fit(X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
| bsd-3-clause |
lucidfrontier45/PyVB | pyvb/old_ver/vbgmm1d2.py | 1 | 8212 | #!/usr/bin/python
import numpy as np
from numpy.random import randn,dirichlet
from scipy.linalg import det, inv
from scipy.cluster import vq
from scipy.special import psi,gammaln
from core import *
try:
from _vbgmm1d import _evaluateHiddenState_C, _lnPD_C
ext_imported = True
except:
ext_imported = False
#print "warning, Cython extension module was not found"
#print "computation can be slower"
def testData1(n=100):
X = np.r_[randn(n*2)]
return X
def testData2(n=100):
X = np.r_[randn(n*2) / 0.3 , randn(n) + 10.0, randn(n*3) / 0.5 - 10.0, randn(3*n) / 0.1]
return X
def GaussianPDF(x,mu,s):
return np.exp(-((x - mu)**2)*s*0.5)*np.sqrt(s/(2.0*np.pi))
def lnZ_Wishart(nu,V):
# log normalization constant of 1D Wishart
lnZ = 0.5 * nu * np.log(2.0*V) + gammaln(nu * 0.5)
return lnZ
class VBGMM1D:
def __init__(self,nmix=10,u=0.5,m=0.0,beta=1,nu=1,s=0.01):
self._nstates = nmix
self._u0 = u # Jeffrey prior if u0 = 0.5
self._m0 = m
self._beta0 = beta
self._nu0 = nu
self._s0 = s
def _init_params(self,obs,adjust_prior=True,scale=0.1,use_emgmm=False):
if adjust_prior:
self._adjust_prior(obs,scale)
self._set_posterior(obs,use_emgmm)
def _adjust_prior(self,obs,scale=0.1):
self._m0 = np.mean(obs)
self._s0 = 1.0 / (np.std(obs) * self._nu0) * scale
def _set_posterior(self,obs,use_emgmm=False):
nobs = len(obs)
nmix = self._nstates
# hidden states
self.z = dirichlet(np.tile(1.0/nmix,nmix),nobs)
# mixing coefficients
self.u = np.tile(self._u0,nmix)
# posterior mean vector
self.m, temp = vq.kmeans2(obs,nmix)
self.beta = np.tile(self._beta0,nmix)
# posterior degree of freedom
self.nu = np.tile(float(nobs)/nmix,nmix)
# posterior precision
self.s = np.tile(self._s0,nmix)
def _VBE(self,obs,use_ext=True):
self._epi = self.u / self.u.sum() # <pi_k>
self._elnpi = psi(self.u) - psi(self.u.sum()) # <ln(pi_k)>
self._et = self.s * self.nu # <tau_k>
self._elnt = psi(self.nu*0.5) + np.log(2.0*self.s) # <ln(t_k)>
self.z = self._evaluateHiddenState(obs,use_ext)
def _evaluateHiddenState(self,obs,use_ext=True):
nobs = len(obs)
nmix = self._nstates
ln2pi = np.log(2.0 * np.pi)
z = np.tile(self._elnpi + 0.5 * self._elnt - 0.5 * ln2pi ,(nobs,1))
if use_ext and ext_imported :
pass
else :
for k in xrange(nmix):
# very slow! need Fortran or C codes
dobs = obs - self.m[k]
z[:,k] -= 0.5 * (1.0/self.beta[k] + self.nu[k]*self.s[k]*(dobs**2))
z = np.exp(z - z.max(1)[np.newaxis].T)
z = normalize(z,1)
return z
def _VBM(self,obs):
self._calcSufficientStatistic(obs)
self._updatePosteriorParameters(obs)
def _calcSufficientStatistic(self,obs):
self.N = self.z.sum(0)
self.xbar = np.dot(obs,self.z) / self.N
self.C = np.diag(np.dot(((obs - self.xbar[np.newaxis].T)**2),self.z))
def _updatePosteriorParameters(self,obs):
self.u = self._u0 + self.N
self.beta = self.N + self._beta0
self.m = (self._beta0 * self._m0 + self.N * self.xbar) / self.beta
self.nu = self._nu0 + self.N
self.s = 1.0 / (1.0/self._s0 + self.C + (self._beta0 *self.N / self.beta) \
* (self.xbar - self._m0)**2)
def _VBLowerBound1(self,obs,use_ext=True):
# variational lower bound
nmix = self._nstates
self.N = self.z.sum(0) # need to be updated !!
# <lnp(X|Z,theta)>
# very slow! neew Fortran or C codes
lnpX = np.dot(self.N,(self._elnpi + 0.5 * self._elnt))
for k in xrange(nmix):
dobs = obs - self.m[k]
lnpX -= self.N[k] * 1.0 / self.beta[k] + self.s[k] * self.nu[k] * \
(dobs**2).sum()
# H[q(z)] = -<lnq(z)>
Hz = 0.0
for k in xrange(nmix):
Hz -= np.dot(self.z[:,k],np.log(self.z[:,k]))
# KL[q(pi)||p(pi)]
KLpi = ( - gammaln(self.u) + self.N * psi(self.u)).sum()
# KL[q(mu,tau)||p(mu,tau)]
#KLmt = ((self.N * self._elnt + self.nu * (self.s / self._s0 - 1.0 - \
# np.log(2.0 * self.s)) + np.log(self.beta) + self._beta0 / self.beta + \
# self.nu * self.s * self._beta0 * (self.m - self._m0)**2) * 0.5 - \
# gammaln(self.nu * 0.5)).sum()
# Wishart part
KLmt = (self.N * self._elnt + self.nu * (self.s / self._s0 - 1.0)).sum() \
* 0.5 + nmix * lnZ_Wishart(self._nu0,self._s0)
for k in xrange(nmix):
KLmt -= lnZ_Wishart(self.nu[k],self.s[k])
# Conditional Gaussian part
KLmt += 0.5 * (np.log(self.beta/self._beta0) + self._beta0/self.beta - 1 \
+ self._beta0 * self.nu * self.s * (self.m-self._m0)**2).sum()
return lnpX + Hz - KLpi - KLmt
def _VBLowerBound2(self,obs,use_ext=True):
# variational lower bound
nobs = len(obs)
nmix = self._nstates
self.N = self.z.sum(0) # need to be updated !!
# H[q(z)] = -<lnq(z)>
Hz = 0.0
for k in xrange(nmix):
Hz -= np.dot(self.z[:,k],np.log(self.z[:,k]))
# KL[q(pi)||p(pi)]
KLpi = (gammaln(nmix * self._u0 + nobs) - gammaln(nmix * self._u0)) \
- gammaln(self.u).sum() + nmix * gammaln(self._u0)
# KL[q(mu,tau)||p(mu,tau)]
KLmt = (np.log(self.beta).sum() - nmix * self._beta0) * 0.5
KLmt += lnZ_Wishart(self._nu0,self._s0) * nmix
for k in xrange(nmix):
KLmt -= lnZ_Wishart(self.nu[k],self.s[k])
#print "%12.5e %12.5e %12.5e"%(Hz,-KLpi,-KLmt)
return Hz - KLpi - KLmt
def _VBFreeEnergy(self,obs,use_ext=True):
return - self._VBLowerBound2(obs,use_ext)
def fit(self,obs,niter=200,eps=1e-4,ifreq=50,init=True,plot=False,\
use_ext=False):
if init : self._init_params(obs)
F_old = 1.0e50
for i in range(niter):
old_u = np.copy(self.u)
old_m = np.copy(self.m)
old_s = np.copy(self.s)
self._VBE(obs,use_ext)
self._VBM(obs)
F_new = self._VBFreeEnergy(obs,use_ext)
dF = F_new - F_old
if abs(dF) < eps :
print "%8dth iter, Free Energy = %12.6e, dF = %12.6e" %(i,F_new,dF)
print "%12.6e < %12.6e Converged" %(dF, eps)
break
if i % ifreq == 0:
if dF < 0.0:
print "%8dth iter, Free Energy = %12.6e, dF = %12.6e" %(i,F_new,dF)
else :
print "%8dth iter, Free Energy = %12.6e, dF = %12.6e warning" \
%(i,F_new,dF)
#conv_u = np.allclose(self.u,old_u)
#conv_m = np.allclose(self.m,old_m)
#conv_s = np.allclose(self.s,old_s)
#if conv_u and conv_m and conv_s:
# break
F_old = F_new
if plot:
self.plotPDF(obs)
return self
def showModel(self,min_pi=0.01):
nmix = self._nstates
params = sorted(zip(self._epi,self.m,self._et),reverse=True)
relavent_clusters = []
for k in xrange(nmix):
if params[k][0] < min_pi:
break
relavent_clusters.append(params[k])
print "%dth component, pi = %8.3g, mu = %8.3g, tau = %8.3g" \
% (k+1,params[k][0],params[k][1],params[k][2])
return relavent_clusters
def pdf(self,x,min_pi=0.01):
params = self.showModel(min_pi)
pi = -np.sort(-self._epi)[:len(params)]
pi = pi / pi.sum()
y = np.array([GaussianPDF(x,p[1],p[2]) * pi[k] \
for k,p in enumerate(params)])
return y
def plotPDF(self,obs,bins=100,min_pi=0.01):
try :
import matplotlib.pyplot as plt
except ImportError :
print "cannot import pyplot"
return
x = np.linspace(min(obs),max(obs),bins)
y = self.pdf(x,min_pi)
plt.hist(obs,bins,label="observed",normed=True)
plt.plot(x,y.sum(0),label="sum",linewidth=8)
for k,yy in enumerate(y) :
plt.plot(x,yy,label="%dth cluster"%(k+1),linewidth=3)
plt.legend(loc=0)
plt.show()
def decode(self,obs):
z = self._evaluateHiddenState(readObs(obs))
codes = z.argmax(1)
clust = [[] for i in range(z.shape[1])]
for (o,c) in (obs,codes):
clust[c].append(obs)
for cl in clust:
cl = np.array(cl)
return codes,clust
def test1(nmix,niter=10000):
Y = testData2(2000)
Y = cnormalize(Y)
model = VBGMM1D(nmix)
model.fit(Y,niter)
model.showModel()
#model.plotPDF(Y,bins=200)
if __name__ == "__main__":
from sys import argv
nmix = int(argv[1])
test1(nmix)
| bsd-3-clause |
pranjalv123/hgt-nn | simhgt.py | 1 | 6116 | import dendropy
import subprocess
import numpy
from ASTRID import ASTRID
#you need ASTRID, which can be installed with
# pip install astrid-phylo
#and fasttree, which can be gotten from apt-get or at
#http://meta.microbesonline.org/fasttree/
def get_desc_edges(t, n):
desc = set()
nodes = [n]
while nodes:
n = nodes.pop()
desc.add(n.edge)
for n2 in n.child_nodes():
nodes.append(n2)
return list(desc)
class Simhgt(object):
def __init__(self, stree):
self.stree = stree
self.stree.suppress_unifurcations()
def dohgt(self, ltree, e1, e2):
#prune
length = e1.length
h = e1.head_node
ltree.prune_subtree(h, suppress_unifurcations=False)
h.edge_length=length
#regraft
tn = e2.tail_node
hn = e2.head_node
new = tn.new_child()
new.edge.length = e2.length - length
tn.remove_child(hn)
new.add_child(hn)
hn.edge.length = length
new.add_child(h)
ltree.update_splits(suppress_unifurcations=True)
return ltree
def dorandomhgt(self):
ltree = self.stree.clone()
top = numpy.random.choice(ltree.internal_nodes(), 1)[0]
ltree.update_bipartitions()
e1 = numpy.random.choice(get_desc_edges(ltree, top.child_nodes()[0]), 1)[0]
e2 = numpy.random.choice(get_desc_edges(ltree, top.child_nodes()[1]), 1)[0]
c1 = e1.split_bitmask
c2 = e2.split_bitmask
ltree = self.dohgt(ltree, e1, e2)
for edge in ltree.postorder_edge_iter():
edge.pop_size = 100000
return ltree, c1, c2
def simgtrees(self, seqlen=100, nhgt=10, nils=10):
gtrees = dendropy.TreeList()
hgtevents = []
for i in range(nhgt):
ltree, c1, c2 = self.dorandomhgt()
gtrees.append(ltree)
hgtevents.append((c1, c2))
for i in range(nils):
gene_to_species_map = dendropy.TaxonNamespaceMapping.create_contained_taxon_mapping(containing_taxon_namespace=ltree.taxon_namespace, num_contained=1, contained_taxon_label_fn = lambda a,b:a)
gtree = dendropy.simulate.treesim.contained_coalescent_tree(ltree, gene_to_containing_taxon_map=gene_to_species_map)
gtrees.append(gtree)
hgtevents.append((c1, c2))
seqs = []
for tree in gtrees:
seqs.append(dendropy.simulate.charsim.hky85_chars(seqlen, tree))
self.truetrees = gtrees
self.seqs = seqs
self.hgtevents = hgtevents
return gtrees, seqs, hgtevents
def estgtrees(self):
self.esttrees = dendropy.TreeList()
for mat in self.seqs:
tree, _ = subprocess.Popen(["fasttree", "-nt", "-gtr"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate(mat.as_string(schema='phylip'))
self.esttrees.append(dendropy.Tree.get_from_string(tree, 'newick'))
def gen_test_data(file=None, seed=None, nreps=50, ntaxa=15, ngenes=1000, seqlen=100):
if seed:
numpy.random.seed(seed)
dendropy.utility.GLOBAL_RNG.seed(seed)
if file:
sh = Simhgt(dendropy.Tree.get_from_file(file, 'newick'))
else:
stree = dendropy.simulate.treesim.birth_death_tree(birth_rate=1.0, death_rate=0.5, ntax=ntaxa)
sh = Simhgt(stree)
egts = []
tgts = []
seqs = []
hgts = []
for i in range(nreps):
sh.simgtrees(nhgt=ngenes, nils=1, seqlen=seqlen)
sh.estgtrees()
egts.append(sh.esttrees)
tgts.append(sh.truetrees)
seqs.append(sh.seqs)
hgts.append(sh.hgtevents)
return tgts, egts, seqs, hgts, sh.stree
from sklearn import svm, ensemble
from tree2vec import tree2vec
import scipy
def pipeline(truetrees, esttrees, seqs, hgtevents, stree, learner=None):
ast = ASTRID(esttrees)
ast.run('auto')
estspeciestree = ast.tree
print str(stree)
estspeciestree.migrate_taxon_namespace(stree.taxon_namespace)
print str(estspeciestree)
print "RF distance", (dendropy.treecalc.treecompare.false_positives_and_negatives(estspeciestree, stree)), len(stree.internal_edges())
sim = Simhgt(estspeciestree)
sim.simgtrees(seqlen=20, nhgt=100, nils=10)
sim.estgtrees()
traintrees = sim.truetrees + sim.esttrees
trainhgts = sim.hgtevents + sim.hgtevents
tn = traintrees[0].taxon_namespace
traintrees = [tree2vec(i) for i in traintrees]
esttrees = [tree2vec(i) for i in esttrees]
trainmat = scipy.sparse.csr_matrix((2**len(tn), len(traintrees)))
estmat = scipy.sparse.csr_matrix((2**len(tn), len(esttrees)))
trainhgtmat = scipy.sparse.csr_matrix((2**len(tn) * 2, len(traintrees)))
trainhgtlist1 = numpy.array([i[0] for i in trainhgts])
trainhgtlist2 = numpy.array([i[1] for i in trainhgts])
for i, hgt in enumerate(trainhgts):
trainhgtmat[hgt[0], i] = 1
trainhgtmat[hgt[1] + len(tn), i] = 1
for i, j in enumerate(traintrees):
trainmat[j, i] = 1
for i, j in enumerate(esttrees):
estmat[j, i] = 1
trainmat = trainmat.T
estmat = estmat.T
trainhgtmat = trainhgtmat.T
print trainmat.shape
print trainhgtmat.shape
print estmat.shape
learner.fit(trainmat, trainhgtlist1)
estimated_hgts = learner.predict(estmat)
print estimated_hgts
print learner
return estimated_hgts
def run(ngenes=50, ntaxa=5):
testdata = gen_test_data(ntaxa=ntaxa, ngenes=ngenes, nreps=1)
# learner = ensemble.RandomForestClassifier(n_estimators=100)
learner = svm.SVC()
ehgt = pipeline(testdata[0][0], testdata[1][0], testdata[2][0], testdata[3][0], testdata[4], learner = learner)
print ehgt
print testdata[3]
sources = [i[0] for i in testdata[3][0]]
print sources
for i in zip(ehgt, sources):
print i
return zip(ehgt, sources)
if __name__ == "__main__":
run()
| gpl-3.0 |
GEMScienceTools/gmpe-smtk | smtk/trellis/trellis_plots.py | 1 | 66310 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2017 GEM Foundation and G. Weatherill
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
'''
Sets up a simple rupture-site configuration to allow for physical comparison
of GMPEs
'''
import os
import sys
import re
import json
import numpy as np
from collections import OrderedDict
try: # https://stackoverflow.com/q/53978542
from collections.abc import Iterable # noqa
except ImportError:
from collections import Iterable # noqa
from cycler import cycler
from math import floor, ceil
import matplotlib
from copy import deepcopy
import matplotlib.pyplot as plt
from openquake.hazardlib import gsim, imt
from openquake.hazardlib.gsim.base import RuptureContext
from openquake.hazardlib.gsim.gmpe_table import GMPETable
from openquake.hazardlib.gsim.base import GMPE
from openquake.hazardlib.scalerel.wc1994 import WC1994
from smtk.sm_utils import _save_image_tight
import smtk.trellis.trellis_utils as utils
from smtk.trellis.configure import GSIMRupture, DEFAULT_POINT
# Default - defines a 21 color and line-type cycle
matplotlib.rcParams["axes.prop_cycle"] = \
cycler(u'color', ['b', 'g', 'r', 'c', 'm', 'y', 'k',
'b', 'g', 'r', 'c', 'm', 'y', 'k',
'b', 'g', 'r', 'c', 'm', 'y', 'k',
'b', 'g', 'r', 'c', 'm', 'y', 'k']) +\
cycler(u'linestyle', ["-", "-", "-", "-", "-", "-", "-",
"--", "--", "--", "--", "--", "--", "--",
"-.", "-.", "-.", "-.", "-.", "-.", "-.",
":", ":", ":", ":", ":", ":", ":"])
# Get a list of the available GSIMs
AVAILABLE_GSIMS = gsim.get_available_gsims()
# Generic dictionary of parameters needed for a trellis calculation
PARAM_DICT = {'magnitudes': [],
'distances': [],
'distance_type': 'rjb',
'vs30': [],
'strike': None,
'dip': None,
'rake': None,
'ztor': None,
'hypocentre_location': (0.5, 0.5),
'hypo_loc': (0.5, 0.5),
'msr': WC1994()}
# Defines the plotting units for given intensitiy measure type
PLOT_UNITS = {'PGA': 'g',
'PGV': 'cm/s',
'SA': 'g',
'SD': 'cm',
'IA': 'm/s',
'CSV': 'g-sec',
'RSD': 's',
'MMI': ''}
# Verbose label for each given distance type
DISTANCE_LABEL_MAP = {'repi': 'Epicentral Dist.',
'rhypo': 'Hypocentral Dist.',
'rjb': 'Joyner-Boore Dist.',
'rrup': 'Rupture Dist.',
'rx': 'Rx Dist.'}
# Default figure size
FIG_SIZE = (7, 5)
# RESET Axes tick labels
matplotlib.rc("xtick", labelsize=12)
matplotlib.rc("ytick", labelsize=12)
def simplify_contexts(rupture):
"""
Reduce a rupture to a set of basic openquake context objects
:returns:
openquake.hazardlib.gsim.base.SitesContext
openquake.hazardlib.gsim.base.DistancesContexts
openquake.hazardlib.gsim.base.RuptureContext
"""
sctx, rctx, dctx = rupture.get_gsim_contexts()
sctx.__dict__.update(rctx.__dict__)
for val in dctx.__dict__:
if getattr(dctx, val) is not None:
setattr(dctx, val, getattr(dctx, val)[0])
return sctx.__dict__, rctx.__dict__, dctx.__dict__
def _get_gmpe_name(gsim):
"""
Returns the name of the GMPE given an instance of the class
"""
if gsim.__class__.__name__.startswith("GMPETable"):
match = re.match(r'^GMPETable\(([^)]+?)\)$', str(gsim))
filepath = match.group(1).split("=")[1][1:-1]
# return a consistent name (see _check_gsim_list):
return 'GMPETable(gmpe_table=%s)' % filepath
else:
gsim_name = gsim.__class__.__name__
additional_args = []
for key in gsim.__dict__:
if key.startswith("kwargs"):
continue
additional_args.append("{:s}={:s}".format(key,
str(gsim.__dict__[key])))
if len(additional_args):
gsim_name_str = "({:s})".format(", ".join(additional_args))
gsim_name_str = gsim_name_str.replace("_", " ")
return gsim_name + gsim_name_str
else:
return gsim_name
def _check_gsim_list(gsim_list):
"""
Checks the list of GSIM models and returns a dict where each gsim in
`gsim_list` is mapped to its openquake.hazardlib.gsim class.
Raises error if GSIM is not supported in OpenQuake
:param gsim_list: list of GSIM names (str) or OpenQuake Gsims
:return: a dict of GSIM names (str) mapped to the associated GSIM
"""
output_gsims = {}
for gs in gsim_list:
if isinstance(gs, GMPE):
# retrieve the name of an instantated GMPE via `_get_gmpe_name`:
output_gsims[_get_gmpe_name(gs)] = gs
elif gs.startswith("GMPETable"):
# Get filename
match = re.match(r'^GMPETable\(([^)]+?)\)$', gs)
filepath = match.group(1).split("=")[1]
output_gsims[gs] = GMPETable(gmpe_table=filepath)
elif gs not in AVAILABLE_GSIMS:
raise ValueError('%s Not supported by OpenQuake' % gs)
else:
output_gsims[gs] = AVAILABLE_GSIMS[gs]()
return output_gsims
def _get_imts(imts):
"""
Reads a list of IMT strings and returns the corresponding
openquake.hazardlib.imt class
:param list imts:
List of IMTs(str)
"""
out_imts = []
for imtl in imts:
out_imts.append(imt.from_string(imtl))
return out_imts
class BaseTrellis(object):
"""
Base class for holding functions related to the trellis plotting
:param list or np.ndarray magnitudes:
List of rupture magnitudes
:param dict distances:
Dictionary of distance measures as a set of np.ndarrays -
{'repi', np.ndarray,
'rjb': np.ndarray,
'rrup': np.ndarray,
'rhypo': np.ndarray}
The number of elements in all arrays must be equal
:param list gsims:
List of instance of the openquake.hazardlib.gsim classes to represent
GMPEs
:param list imts:
List of intensity measures
:param dctx:
Distance context as instance of :class:
openquake.hazardlib.gsim.base.DistancesContext
:param rctx:
Rupture context as instance of :class:
openquake.hazardlib.gsim.base.RuptureContext
:param sctx:
Rupture context as instance of :class:
openquake.hazardlib.gsim.base.SitesContext
:param int nsites:
Number of sites
:param str stddevs:
Standard deviation types
:param str filename:
Name of output file for exporting the figure
:param str filetype:
String to indicate file type for exporting the figure
:param int dpi:
Dots per inch for export figure
:param str plot_type:
Type of plot (only used in distance Trellis)
:param str distance_type:
Type of source-site distance to be used in distances trellis
:param tuple figure_size:
Size of figure (passed to Matplotlib pyplot.figure() function)
:param tuple xlim:
Limits on the x-axis (will apply to all subplot axes)
:param tuple ylim:
Limits on the y-axis (will apply to all subplot axes)
:param float legend_fontsize:
Controls the fontsize of the legend (default 14)
:param int ncol:
Number of columns for the legend (default 1)
"""
def __init__(self, magnitudes, distances, gsims, imts, params,
stddevs="Total", rupture=None, **kwargs):
"""
"""
# Set default keyword arguments
kwargs.setdefault('filename', None)
kwargs.setdefault('filetype', "png")
kwargs.setdefault('dpi', 300)
kwargs.setdefault('plot_type', "loglog")
kwargs.setdefault('distance_type', "rjb")
kwargs.setdefault('figure_size', FIG_SIZE)
kwargs.setdefault('xlim', None)
kwargs.setdefault('ylim', None)
kwargs.setdefault("legend_fontsize", 14)
kwargs.setdefault("ncol", 1)
self.rupture = rupture
self.magnitudes = magnitudes
self.distances = distances
self.gsims = _check_gsim_list(gsims)
self.params = params
self.imts = imts
self.dctx = None
self.rctx = None
self.sctx = None
self.nsites = 0
self._preprocess_distances()
self._preprocess_ruptures()
self._preprocess_sites()
self.stddevs = stddevs
self.filename = kwargs['filename']
self.filetype = kwargs['filetype']
self.dpi = kwargs['dpi']
self.plot_type = kwargs['plot_type']
self.distance_type = kwargs['distance_type']
self.figure_size = kwargs["figure_size"]
self.xlim = kwargs["xlim"]
self.ylim = kwargs["ylim"]
self.legend_fontsize = kwargs["legend_fontsize"]
self.ncol = kwargs["ncol"]
def _preprocess_distances(self):
"""
Preprocesses the input distances to check that all the necessary
distance types required by the GSIMS are found in the
DistancesContext()
"""
self.dctx = gsim.base.DistancesContext()
required_dists = []
for gmpe_name, gmpe in self.gsims.items():
gsim_distances = [dist for dist in gmpe.REQUIRES_DISTANCES]
for dist in gsim_distances:
if dist not in self.distances:
raise ValueError('GMPE %s requires distance type %s'
% (gmpe_name, dist))
if dist not in required_dists:
required_dists.append(dist)
dist_check = False
for dist in required_dists:
if dist_check and not (len(self.distances[dist]) == self.nsites):
raise ValueError("Distances arrays not equal length!")
else:
self.nsites = len(self.distances[dist])
dist_check = True
setattr(self.dctx, dist, self.distances[dist])
def _preprocess_ruptures(self):
"""
Preprocesses rupture parameters to ensure all the necessary rupture
information for the GSIMS is found in the input parameters
"""
self.rctx = []
if (not isinstance(self.magnitudes, list) and not
isinstance(self.magnitudes, np.ndarray)):
self.magnitudes = np.array(self.magnitudes)
# Get all required rupture attributes
required_attributes = []
for gmpe_name, gmpe in self.gsims.items():
rup_params = [param for param in gmpe.REQUIRES_RUPTURE_PARAMETERS]
for param in rup_params:
if param == 'mag':
continue
elif param not in self.params:
raise ValueError("GMPE %s requires rupture parameter %s"
% (gmpe_name, param))
elif param not in required_attributes:
required_attributes.append(param)
else:
pass
for mag in self.magnitudes:
rup = gsim.base.RuptureContext()
setattr(rup, 'mag', mag)
for attr in required_attributes:
setattr(rup, attr, self.params[attr])
self.rctx.append(rup)
def _preprocess_sites(self):
"""
Preprocesses site parameters to ensure all the necessary rupture
information for the GSIMS is found in the input parameters
"""
slots = set()
for gmpe in self.gsims.values():
slots.update(gmpe.REQUIRES_SITES_PARAMETERS)
self.sctx = gsim.base.SitesContext(slots=slots)
required_attributes = []
for gmpe_name, gmpe in self.gsims.items():
site_params = [param for param in gmpe.REQUIRES_SITES_PARAMETERS]
for param in site_params:
if param not in self.params:
raise ValueError("GMPE %s requires site parameter %s"
% (gmpe_name, param))
elif param not in required_attributes:
required_attributes.append(param)
else:
pass
for param in required_attributes:
if isinstance(self.params[param], float):
setattr(self.sctx, param,
self.params[param] * np.ones(self.nsites, dtype=float))
if isinstance(self.params[param], bool):
if self.params[param]:
setattr(self.sctx, param, self.params[param] *
np.ones(self.nsites, dtype=bool))
else:
setattr(self.sctx, param, self.params[param] *
np.zeros(self.nsites, dtype=bool))
elif isinstance(self.params[param], Iterable):
if not len(self.params[param]) == self.nsites:
raise ValueError("Length of sites value %s not equal to"
" number of sites %" % (param,
self.nsites))
setattr(self.sctx, param, self.params[param])
else:
pass
@classmethod
def from_rupture_model(cls, rupture, gsims, imts, stddevs='Total',
**kwargs):
"""
Constructs the Base Trellis Class from a rupture model
:param rupture:
Rupture as instance of the :class:
smtk.trellis.configure.GSIMRupture
"""
kwargs.setdefault('filename', None)
kwargs.setdefault('filetype', "png")
kwargs.setdefault('dpi', 300)
kwargs.setdefault('plot_type', "loglog")
kwargs.setdefault('distance_type', "rjb")
kwargs.setdefault('xlim', None)
kwargs.setdefault('ylim', None)
assert isinstance(rupture, GSIMRupture)
magnitudes = [rupture.magnitude]
sctx, rctx, dctx = rupture.get_gsim_contexts()
# Create distances dictionary
distances = {}
for key in dctx._slots_:
distances[key] = getattr(dctx, key)
# Add all other parameters to the dictionary
params = {}
for key in rctx._slots_:
params[key] = getattr(rctx, key)
for key in sctx._slots_:
params[key] = getattr(sctx, key)
return cls(magnitudes, distances, gsims, imts, params, stddevs,
rupture=rupture, **kwargs)
def plot(self):
"""
Creates the plot!
"""
raise NotImplementedError("Cannot create plot of base class!")
def _get_ylabel(self, imt):
"""
Returns the label for plotting on a y axis
"""
raise NotImplementedError
class MagnitudeIMTTrellis(BaseTrellis):
"""
Class to generate plots showing the scaling of a set of IMTs with
magnitude
"""
def __init__(self, magnitudes, distances, gsims, imts, params,
stddevs="Total", **kwargs):
"""
Instantiate with list of magnitude and the corresponding distances
given in a dictionary
"""
for key in distances:
if isinstance(distances[key], float):
distances[key] = np.array([distances[key]])
super(MagnitudeIMTTrellis, self).__init__(
magnitudes, distances, gsims, imts, params, stddevs, **kwargs)
@classmethod
def from_rupture_properties(cls, properties, magnitudes, distance,
gsims, imts, stddevs='Total', **kwargs):
'''Constructs the Base Trellis Class from a dictionary of
properties. In this class, this method is simply an alias of
`from_rupture_model`
'''
return cls.from_rupture_model(properties, magnitudes, distance,
gsims, imts, stddevs=stddevs,
**kwargs)
@classmethod
def from_rupture_model(cls, properties, magnitudes, distance, gsims, imts,
stddevs='Total', **kwargs):
"""
Implements the magnitude trellis from a dictionary of properties,
magnitudes and distance
"""
kwargs.setdefault('filename', None)
kwargs.setdefault('filetype', "png")
kwargs.setdefault('dpi', 300)
kwargs.setdefault('plot_type', "loglog")
kwargs.setdefault('distance_type', "rjb")
kwargs.setdefault('xlim', None)
kwargs.setdefault('ylim', None)
# Properties
properties.setdefault("tectonic_region", "Active Shallow Crust")
properties.setdefault("rake", 0.)
properties.setdefault("ztor", 0.)
properties.setdefault("strike", 0.)
properties.setdefault("msr", WC1994())
properties.setdefault("initial_point", DEFAULT_POINT)
properties.setdefault("hypocentre_location", None)
properties.setdefault("line_azimuth", 90.)
properties.setdefault("origin_point", (0.5, 0.5))
properties.setdefault("vs30measured", True)
properties.setdefault("z1pt0", None)
properties.setdefault("z2pt5", None)
properties.setdefault("backarc", False)
properties.setdefault("distance_type", "rrup")
# Define a basic rupture configuration
rup = GSIMRupture(magnitudes[0], properties["dip"],
properties["aspect"], properties["tectonic_region"],
properties["rake"], properties["ztor"],
properties["strike"], properties["msr"],
properties["initial_point"],
properties["hypocentre_location"])
# Add the target sites
_ = rup.get_target_sites_point(distance, properties['distance_type'],
properties["vs30"],
properties["line_azimuth"],
properties["origin_point"],
properties["vs30measured"],
properties["z1pt0"],
properties["z2pt5"],
properties["backarc"])
# Get the contexts
sctx, rctx, dctx = rup.get_gsim_contexts()
# Create an equivalent 'params' dictionary by merging the site and
# rupture properties
sctx.__dict__.update(rctx.__dict__)
for val in dctx.__dict__:
if getattr(dctx, val) is not None:
setattr(dctx, val, getattr(dctx, val)[0])
return cls(magnitudes, dctx.__dict__, gsims, imts, sctx.__dict__,
**kwargs)
def plot(self):
fig = self.get_fig()
fig.show()
def get_fig(self):
"""
Creates the trellis plot!
"""
# Determine the optimum number of rows and columns
nrow, ncol = utils.best_subplot_dimensions(len(self.imts))
# Get means and standard deviations
gmvs = self.get_ground_motion_values()
fig = plt.figure(figsize=self.figure_size)
fig.set_tight_layout(True)
row_loc = 0
col_loc = 0
for i_m in self.imts:
if col_loc == ncol:
row_loc += 1
col_loc = 0
# Construct the plot
self._build_plot(
plt.subplot2grid((nrow, ncol), (row_loc, col_loc)), i_m, gmvs)
col_loc += 1
# Add legend
lgd = plt.legend(self.lines,
self.labels,
loc=3,
bbox_to_anchor=(1.1, 0.),
fontsize=self.legend_fontsize,
ncol=self.ncol)
_save_image_tight(fig, lgd, self.filename, self.filetype, self.dpi)
return fig
def _build_plot(self, ax, i_m, gmvs):
"""
Plots the lines for a given axis
:param ax:
Axes object
:param str i_m:
Intensity Measure
:param dict gmvs:
Ground Motion Values Dictionary
"""
self.labels = []
self.lines = []
for gmpe_name in self.gsims:
self.labels.append(gmpe_name)
line, = ax.semilogy(self.magnitudes, gmvs[gmpe_name][i_m][:, 0],
linewidth=2.0, label=gmpe_name)
self.lines.append(line)
ax.grid(True)
if isinstance(self.xlim, tuple):
ax.set_xlim(self.xlim[0], self.xlim[1])
else:
ax.set_xlim(floor(self.magnitudes[0]),
ceil(self.magnitudes[-1]))
if isinstance(self.ylim, tuple):
ax.set_ylim(self.ylim[0], self.ylim[1])
self._set_labels(i_m, ax)
def _set_labels(self, i_m, ax):
"""
Sets the labels on the specified axes
"""
ax.set_xlabel("Magnitude", fontsize=16)
ax.set_ylabel(self._get_ylabel(i_m), fontsize=16)
def _get_ylabel(self, i_m):
"""
Return the y-label for the magnitude IMT trellis
"""
if 'SA(' in i_m:
units = PLOT_UNITS['SA']
else:
units = PLOT_UNITS[i_m]
return "Median {:s} ({:s})".format(i_m, units)
def to_dict(self):
"""
Parse the ground motion values to a dictionary
"""
gmvs = self.get_ground_motion_values()
nrow, ncol = utils.best_subplot_dimensions(len(self.imts))
gmv_dict = OrderedDict([
("xvalues", self.magnitudes.tolist()),
("xlabel", "Magnitude")])
nvals = len(self.magnitudes)
gmv_dict["figures"] = []
row_loc = 0
col_loc = 0
for imt in self.imts:
if col_loc == ncol:
row_loc += 1
col_loc = 0
# Set the dictionary of y-values
ydict = {"ylabel": self._get_ylabel(imt),
"imt": imt,
"row": row_loc,
"column": col_loc,
"yvalues": OrderedDict([])}
for gsim in gmvs:
if not len(gmvs[gsim][imt]):
# GSIM missing, set None
ydict["yvalues"][gsim] = [None] * nvals
continue
iml_to_list = []
for val in gmvs[gsim][imt].flatten().tolist():
if np.isnan(val) or (val < 0.0):
iml_to_list.append(None)
else:
iml_to_list.append(val)
ydict["yvalues"][gsim] = iml_to_list
gmv_dict["figures"].append(ydict)
col_loc += 1
return gmv_dict
def to_json(self):
"""
Serializes the ground motion values to json
"""
return json.dumps(self.to_dict())
def get_ground_motion_values(self):
"""
Runs the GMPE calculations to retreive ground motion values
:returns:
Nested dictionary of values
{'GMPE1': {'IM1': , 'IM2': },
'GMPE2': {'IM1': , 'IM2': }}
"""
gmvs = OrderedDict()
for gmpe_name, gmpe in self.gsims.items():
gmvs.update([(gmpe_name, {})])
for i_m in self.imts:
gmvs[gmpe_name][i_m] = np.zeros(
[len(self.rctx), self.nsites], dtype=float)
for iloc, rct in enumerate(self.rctx):
try:
means, _ = gmpe.get_mean_and_stddevs(
self.sctx,
rct,
self.dctx,
imt.from_string(i_m),
[self.stddevs])
gmvs[gmpe_name][i_m][iloc, :] = \
np.exp(means)
except (KeyError, ValueError):
gmvs[gmpe_name][i_m] = np.array([], dtype=float)
break
return gmvs
def pretty_print(self, filename=None, sep=","):
"""
Format the ground motion for printing to file or to screen
:param str filename:
Path to file
:param str sep:
Separator character
"""
if filename:
fid = open(filename, "w")
else:
fid = sys.stdout
# Print Meta information
self._write_pprint_header_line(fid, sep)
# Print Distances
distance_str = sep.join(["{:s}{:s}{:s}".format(key, sep, str(val[0]))
for (key, val) in self.dctx.items()])
fid.write("Distances%s%s\n" % (sep, distance_str))
# Loop over IMTs
gmvs = self.get_ground_motion_values()
for imt in self.imts:
fid.write("%s\n" % imt)
header_str = "Magnitude" + sep + sep.join(self.gsims)
fid.write("%s\n" % header_str)
for i, mag in enumerate(self.magnitudes):
data_string = sep.join(["{:.8f}".format(
gmvs[gmpe_name][imt][i, 0]) for gmpe_name in self.gsims])
fid.write("{:s}{:s}{:s}\n".format(str(mag), sep, data_string))
fid.write("====================================================\n")
if filename:
fid.close()
def _write_pprint_header_line(self, fid, sep=","):
"""
Write the header lines of the pretty print function
"""
fid.write("Magnitude IMT Trellis\n")
fid.write("%s\n" % sep.join([
"{:s}{:s}{:s}".format(key, sep, str(val))
for (key, val) in self.params.items()]))
class MagnitudeSigmaIMTTrellis(MagnitudeIMTTrellis):
"""
Creates the Trellis plot for the standard deviations
"""
def _build_plot(self, ax, i_m, gmvs):
"""
Plots the lines for a given axis
:param ax:
Axes object
:param str i_m:
Intensity Measure
:param dict gmvs:
Ground Motion Values Dictionary
"""
self.labels = []
self.lines = []
for gmpe_name in self.gsims:
self.labels.append(gmpe_name)
line, = ax.plot(self.magnitudes,
gmvs[gmpe_name][i_m][:, 0],
linewidth=2.0,
label=gmpe_name)
self.lines.append(line)
ax.grid(True)
if isinstance(self.xlim, tuple):
ax.set_xlim(self.xlim[0], self.xlim[1])
else:
ax.set_xlim(floor(self.magnitudes[0]),
ceil(self.magnitudes[-1]))
if isinstance(self.ylim, tuple):
ax.set_ylim(self.ylim[0], self.ylim[1])
self._set_labels(i_m, ax)
def get_ground_motion_values(self):
"""
Runs the GMPE calculations to retreive ground motion values
:returns:
Nested dictionary of values
{'GMPE1': {'IM1': , 'IM2': },
'GMPE2': {'IM1': , 'IM2': }}
"""
gmvs = OrderedDict()
for gmpe_name, gmpe in self.gsims.items():
gmvs.update([(gmpe_name, {})])
for i_m in self.imts:
gmvs[gmpe_name][i_m] = np.zeros([len(self.rctx),
self.nsites],
dtype=float)
for iloc, rct in enumerate(self.rctx):
try:
_, sigmas = gmpe.get_mean_and_stddevs(
self.sctx,
rct,
self.dctx,
imt.from_string(i_m),
[self.stddevs])
gmvs[gmpe_name][i_m][iloc, :] = sigmas[0]
except KeyError:
gmvs[gmpe_name][i_m] = np.array([], dtype=float)
break
return gmvs
def get_ground_motion_values_from_rupture(self):
"""
"""
gmvs = OrderedDict()
rctx, dctx, sctx = self._get_context_sets()
for gmpe_name, gmpe in self.gsims.items():
gmvs.update([(gmpe_name, {})])
for i_m in self.imts:
gmvs[gmpe_name][i_m] = np.zeros(
[len(self.rctx), self.nsites], dtype=float)
for iloc, (rct, dct, sct) in enumerate(zip(rctx, dctx, sctx)):
try:
_, sigmas = gmpe.get_mean_and_stddevs(
sct,
rct,
dct,
imt.from_string(i_m),
[self.stddevs])
gmvs[gmpe_name][i_m][iloc, :] = sigmas[0]
except (KeyError, ValueError):
gmvs[gmpe_name][i_m] = np.array([], dtype=float)
break
return gmvs
def _get_ylabel(self, i_m):
"""
"""
return self.stddevs + " Std. Dev. ({:s})".format(str(i_m))
def _set_labels(self, i_m, ax):
"""
Sets the axes labels
"""
ax.set_xlabel("Magnitude", fontsize=16)
ax.set_ylabel(self._get_ylabel(i_m), fontsize=16)
def _write_pprint_header_line(self, fid, sep=","):
"""
Write the header lines of the pretty print function
"""
fid.write("Magnitude IMT %s Standard Deviations Trellis\n" %
self.stddevs)
fid.write("%s\n" % sep.join([
"{:s}{:s}{:s}".format(key, sep, str(val))
for (key, val) in self.params.items()]))
class DistanceIMTTrellis(MagnitudeIMTTrellis):
"""
Trellis class to generate a plot of the GMPE attenuation with distance
"""
XLABEL = "%s (km)"
YLABEL = "Median %s (%s)"
def __init__(self, magnitudes, distances, gsims, imts, params,
stddevs="Total", **kwargs):
"""
Instantiation
"""
if isinstance(magnitudes, float):
magnitudes = [magnitudes]
super(DistanceIMTTrellis, self).__init__(magnitudes, distances, gsims,
imts, params, stddevs,
**kwargs)
@classmethod
def from_rupture_properties(cls, properties, magnitude, distances,
gsims, imts, stddevs='Total', **kwargs):
'''Constructs the Base Trellis Class from a rupture properties.
It internally creates a Rupture object and calls
`from_rupture_model`. When not listed, arguments take the same
values as `from_rupture_model`
:param distances: a numeric array of chosen distances
'''
params = {k: properties[k] for k in ['rake', 'initial_point', 'ztor',
'hypocentre_location', 'strike',
'msr', 'tectonic_region']
if k in properties}
rupture = GSIMRupture(magnitude, properties['dip'],
properties['aspect'], **params)
params = {k: properties[k] for k in ['line_azimuth', 'as_log',
'vs30measured', 'z1pt0', 'z2pt5',
'origin_point', 'backarc']
if k in properties}
rupture.get_target_sites_line_from_given_distances(distances,
properties['vs30'],
**params)
return cls.from_rupture_model(rupture, gsims, imts,
stddevs=stddevs, **kwargs)
@classmethod
def from_rupture_model(cls, rupture, gsims, imts, stddevs='Total',
**kwargs):
"""
Constructs the Base Trellis Class from a rupture model
:param rupture:
Rupture as instance of the :class:
smtk.trellis.configure.GSIMRupture
"""
kwargs.setdefault('filename', None)
kwargs.setdefault('filetype', "png")
kwargs.setdefault('dpi', 300)
kwargs.setdefault('plot_type', "loglog")
kwargs.setdefault('distance_type', "rjb")
kwargs.setdefault('xlim', None)
kwargs.setdefault('ylim', None)
assert isinstance(rupture, GSIMRupture)
magnitudes = [rupture.magnitude]
sctx, rctx, dctx = rupture.get_gsim_contexts()
# Create distances dictionary
distances = {}
for key in dctx._slots_:
distances[key] = getattr(dctx, key)
# Add all other parameters to the dictionary
params = {}
for key in rctx._slots_:
params[key] = getattr(rctx, key)
for key in sctx._slots_:
params[key] = getattr(sctx, key)
return cls(magnitudes, distances, gsims, imts, params, stddevs,
**kwargs)
def _build_plot(self, ax, i_m, gmvs):
"""
Plots the lines for a given axis
:param ax:
Axes object
:param str i_m:
Intensity Measure
:param dict gmvs:
Ground Motion Values Dictionary
"""
self.labels = []
self.lines = []
distance_vals = getattr(self.dctx, self.distance_type)
assert (self.plot_type == "loglog") or (self.plot_type == "semilogy")
for gmpe_name in self.gsims:
self.labels.append(gmpe_name)
if self.plot_type == "semilogy":
line, = ax.semilogy(distance_vals,
gmvs[gmpe_name][i_m][0, :],
linewidth=2.0,
label=gmpe_name)
min_x = distance_vals[0]
max_x = distance_vals[-1]
else:
line, = ax.loglog(distance_vals,
gmvs[gmpe_name][i_m][0, :],
linewidth=2.0,
label=gmpe_name)
min_x = 0.5
max_x = distance_vals[-1]
self.lines.append(line)
ax.grid(True)
if isinstance(self.xlim, tuple):
ax.set_xlim(self.xlim[0], self.xlim[1])
else:
ax.set_xlim(min_x, max_x)
if isinstance(self.ylim, tuple):
ax.set_ylim(self.ylim[0], self.ylim[1])
self._set_labels(i_m, ax)
def _set_labels(self, i_m, ax):
"""
Sets the labels on the specified axes
"""
ax.set_xlabel("%s (km)" % DISTANCE_LABEL_MAP[self.distance_type],
fontsize=16)
ax.set_ylabel(self._get_ylabel(i_m), fontsize=16)
def _get_ylabel(self, i_m):
"""
Returns the y-label for the given IMT
"""
if 'SA(' in i_m:
units = PLOT_UNITS['SA']
else:
units = PLOT_UNITS[i_m]
return "Median {:s} ({:s})".format(i_m, units)
def to_dict(self):
"""
Parses the ground motion values to a dictionary
"""
gmvs = self.get_ground_motion_values()
nrow, ncol = utils.best_subplot_dimensions(len(self.imts))
dist_label = "{:s} (km)".format(DISTANCE_LABEL_MAP[self.distance_type])
gmv_dict = OrderedDict([
("xvalues", self.distances[self.distance_type].tolist()),
("xlabel", dist_label)])
gmv_dict["figures"] = []
row_loc = 0
col_loc = 0
for imt in self.imts:
if col_loc == ncol:
row_loc += 1
col_loc = 0
# Set the dictionary of y-values
ydict = {"ylabel": self._get_ylabel(imt),
"imt": imt,
"row": row_loc,
"column": col_loc,
"yvalues": OrderedDict([])}
for gsim in gmvs:
data = [None if np.isnan(val) else val
for val in gmvs[gsim][imt].flatten()]
ydict["yvalues"][gsim] = data
gmv_dict["figures"].append(ydict)
col_loc += 1
return gmv_dict
def to_json(self):
"""
Exports ground motion values to json
"""
return json.dumps(self.to_dict())
def pretty_print(self, filename=None, sep=","):
"""
Format the ground motion for printing to file or to screen
:param str filename:
Path to file
:param str sep:
Separator character
"""
if filename:
fid = open(filename, "w")
else:
fid = sys.stdout
# Print Meta information
self._write_pprint_header_line(fid, sep)
fid.write("Magnitude%s%.2f\n" % (sep, self.magnitudes[0]))
# Loop over IMTs
gmvs = self.get_ground_motion_values()
for imt in self.imts:
fid.write("%s\n" % imt)
header_str = sep.join([key for key in self.distances])
header_str = "{:s}{:s}{:s}".format(
header_str,
sep,
sep.join(self.gsims))
fid.write("%s\n" % header_str)
for i in range(self.nsites):
dist_string = sep.join(["{:.4f}".format(self.distances[key][i])
for key in self.distances])
data_string = sep.join(["{:.8f}".format(
gmvs[gmpe_name][imt][0, i]) for gmpe_name in self.gsims])
fid.write("{:s}{:s}{:s}\n".format(dist_string,
sep,
data_string))
fid.write("====================================================\n")
if filename:
fid.close()
def _write_pprint_header_line(self, fid, sep=","):
"""
Write the header lines of the pretty print function
"""
fid.write("Distance (km) IMT Trellis\n")
fid.write("%s\n" % sep.join(["{:s}{:s}{:s}".format(key, sep, str(val))
for (key, val) in self.params.items()]))
class DistanceSigmaIMTTrellis(DistanceIMTTrellis):
"""
"""
def get_ground_motion_values(self):
"""
Runs the GMPE calculations to retreive ground motion values
:returns:
Nested dictionary of values
{'GMPE1': {'IM1': , 'IM2': },
'GMPE2': {'IM1': , 'IM2': }}
"""
gmvs = OrderedDict()
for gmpe_name, gmpe in self.gsims.items():
gmvs.update([(gmpe_name, {})])
for i_m in self.imts:
gmvs[gmpe_name][i_m] = np.zeros([len(self.rctx),
self.nsites],
dtype=float)
for iloc, rct in enumerate(self.rctx):
try:
_, sigmas = gmpe.get_mean_and_stddevs(
self.sctx,
rct,
self.dctx,
imt.from_string(i_m),
[self.stddevs])
gmvs[gmpe_name][i_m][iloc, :] = sigmas[0]
except (KeyError, ValueError):
gmvs[gmpe_name][i_m] = np.array([], dtype=float)
break
return gmvs
def get_ground_motion_values_from_rupture(self):
"""
"""
gmvs = OrderedDict()
rctx, dctx, sctx = self._get_context_sets()
for gmpe_name, gmpe in self.gsims.items():
gmvs.update([(gmpe_name, {})])
for i_m in self.imts:
gmvs[gmpe_name][i_m] = np.zeros(
[len(self.rctx), self.nsites], dtype=float)
for iloc, (rct, dct, sct) in enumerate(zip(rctx, dctx, sctx)):
try:
_, sigmas = gmpe.get_mean_and_stddevs(
sct,
rct,
dct,
imt.from_string(i_m),
[self.stddevs])
gmvs[gmpe_name][i_m][iloc, :] = sigmas[0]
except KeyError:
gmvs[gmpe_name][i_m] = np.array([], dtype=float)
break
return gmvs
def _build_plot(self, ax, i_m, gmvs):
"""
Plots the lines for a given axis
:param ax:
Axes object
:param str i_m:
Intensity Measure
:param dict gmvs:
Ground Motion Values Dictionary
"""
self.labels = []
self.lines = []
distance_vals = getattr(self.dctx, self.distance_type)
assert (self.plot_type == "loglog") or (self.plot_type == "semilogy")
for gmpe_name in self.gsims:
self.labels.append(gmpe_name)
if self.plot_type == "loglog":
line, = ax.semilogx(distance_vals,
gmvs[gmpe_name][i_m][0, :],
linewidth=2.0,
label=gmpe_name)
min_x = 0.5
max_x = distance_vals[-1]
else:
line, = ax.plot(distance_vals,
gmvs[gmpe_name][i_m][0, :],
linewidth=2.0,
label=gmpe_name)
min_x = distance_vals[0]
max_x = distance_vals[-1]
self.lines.append(line)
ax.grid(True)
if isinstance(self.xlim, tuple):
ax.set_xlim(self.xlim[0], self.xlim[1])
else:
ax.set_xlim(min_x, max_x)
if isinstance(self.ylim, tuple):
ax.set_ylim(self.ylim[0], self.ylim[1])
self._set_labels(i_m, ax)
def _get_ylabel(self, i_m):
"""
"""
return self.stddevs + " Std. Dev. ({:s})".format(str(i_m))
def _set_labels(self, i_m, ax):
"""
Sets the labels on the specified axes
"""
ax.set_xlabel("%s (km)" % DISTANCE_LABEL_MAP[self.distance_type],
fontsize=16)
ax.set_ylabel(self._get_ylabel(i_m), fontsize=16)
def _write_pprint_header_line(self, fid, sep=","):
"""
Write the header lines of the pretty print function
"""
fid.write("Distance (km) %s Standard Deviations Trellis\n" %
self.stddevs)
fid.write("%s\n" % sep.join(["{:s}{:s}{:s}".format(key, sep, str(val))
for (key, val) in self.params.items()]))
class MagnitudeDistanceSpectraTrellis(BaseTrellis):
# In this case the preprocessor needs to be removed
def __init__(self, magnitudes, distances, gsims, imts, params,
stddevs="Total", **kwargs):
"""
Builds the trellis plots for variation in response spectra with
magnitude and distance.
In this case the class is instantiated with a set of magnitudes
and a dictionary indicating the different distance types.
:param imts: (numeric list or numpy array)
the Spectral Acceleration's
natural period(s) to be used
"""
imts = ["SA(%s)" % i_m for i_m in imts]
super(MagnitudeDistanceSpectraTrellis, self).__init__(magnitudes,
distances,
gsims,
imts,
params,
stddevs,
**kwargs)
def _preprocess_ruptures(self):
"""
In this case properties such as the rupture depth and width may change
with the magnitude. Where this behaviour is desired the use feeds
the function with a list of RuptureContext instances, in which each
rupture context contains the information specific to that magnitude.
If this behaviour is not desired then the pre-processing of the
rupture information proceeds as in the conventional case within the
base class
"""
# If magnitudes was provided with a list of RuptureContexts
if all([isinstance(mag, RuptureContext)
for mag in self.magnitudes]):
# Get all required rupture attributes
self.rctx = [mag for mag in self.magnitudes]
for gmpe_name, gmpe in self.gsims.items():
rup_params = [param
for param in gmpe.REQUIRES_RUPTURE_PARAMETERS]
for rctx in self.rctx:
for param in rup_params:
if param not in rctx.__dict__:
raise ValueError(
"GMPE %s requires rupture parameter %s"
% (gmpe_name, param))
return
# Otherwise instantiate in the conventional way
super(MagnitudeDistanceSpectraTrellis, self)._preprocess_ruptures()
def _preprocess_distances(self):
"""
In the case of distances one can pass either a dictionary containing
the distances, or a list of dictionaries each calibrated to a specific
magnitude (the list must be the same length as the number of
magnitudes)
"""
if isinstance(self.distances, dict):
# Copy the same distances across
self.distances = [deepcopy(self.distances)
for mag in self.magnitudes]
assert (len(self.distances) == len(self.magnitudes))
# Distances should be a list of dictionaries
self.dctx = []
required_distances = []
for gmpe_name, gmpe in self.gsims.items():
gsim_distances = [dist for dist in gmpe.REQUIRES_DISTANCES]
for mag_distances in self.distances:
for dist in gsim_distances:
if dist not in mag_distances:
raise ValueError('GMPE %s requires distance type %s'
% (gmpe_name, dist))
if dist not in required_distances:
required_distances.append(dist)
for distance in self.distances:
dctx = gsim.base.DistancesContext()
dist_check = False
for dist in required_distances:
if dist_check and not (len(distance[dist]) == self.nsites):
raise ValueError("Distances arrays not equal length!")
else:
self.nsites = len(distance[dist])
dist_check = True
setattr(dctx, dist, distance[dist])
self.dctx.append(dctx)
@classmethod
def from_rupture_properties(cls, properties, magnitudes, distance,
gsims, periods, stddevs='Total', **kwargs):
'''Constructs the Base Trellis Class from a dictionary of
properties. In this class, this method is simply an alias of
`from_rupture_model`
:param periods: (numeric list or numpy array)
the Spectral Acceleration's
natural period(s) to be used. Note that this parameter
is called `imt` in `from_rupture_model` where the name
`imt` has been kept for legacy code compatibility
'''
return cls.from_rupture_model(properties, magnitudes, distance,
gsims, periods, stddevs=stddevs,
**kwargs)
@classmethod
def from_rupture_model(cls, properties, magnitudes, distances,
gsims, imts, stddevs='Total', **kwargs):
"""
Constructs the Base Trellis Class from a rupture model
:param dict properties:
Properties of the rupture and sites, including (* indicates
required): *dip, *aspect, tectonic_region, rake, ztor, strike,
msr, initial_point, hypocentre_location, distance_type,
vs30, line_azimuth, origin_point, vs30measured, z1pt0,
z2pt5, backarc
:param list magnitudes:
List of magnitudes
:param list distances:
List of distances (the distance type should be specified in the
properties dict - rrup, by default)
"""
kwargs.setdefault('filename', None)
kwargs.setdefault('filetype', "png")
kwargs.setdefault('dpi', 300)
kwargs.setdefault('plot_type', "loglog")
kwargs.setdefault('distance_type', "rjb")
kwargs.setdefault('xlim', None)
kwargs.setdefault('ylim', None)
# Defaults for the properties of the rupture and site configuration
properties.setdefault("tectonic_region", "Active Shallow Crust")
properties.setdefault("rake", 0.)
properties.setdefault("ztor", 0.)
properties.setdefault("strike", 0.)
properties.setdefault("msr", WC1994())
properties.setdefault("initial_point", DEFAULT_POINT)
properties.setdefault("hypocentre_location", None)
properties.setdefault("line_azimuth", 90.)
properties.setdefault("origin_point", (0.5, 0.5))
properties.setdefault("vs30measured", True)
properties.setdefault("z1pt0", None)
properties.setdefault("z2pt5", None)
properties.setdefault("backarc", False)
properties.setdefault("distance_type", "rrup")
distance_dicts = []
rupture_dicts = []
for magnitude in magnitudes:
# Generate the rupture for the specific magnitude
rup = GSIMRupture(magnitude, properties["dip"],
properties["aspect"],
properties["tectonic_region"],
properties["rake"], properties["ztor"],
properties["strike"], properties["msr"],
properties["initial_point"],
properties["hypocentre_location"])
distance_dict = None
for distance in distances:
# Define the target sites with respect to the rupture
_ = rup.get_target_sites_point(distance,
properties["distance_type"],
properties["vs30"],
properties["line_azimuth"],
properties["origin_point"],
properties["vs30measured"],
properties["z1pt0"],
properties["z2pt5"],
properties["backarc"])
sctx, rctx, dctx = rup.get_gsim_contexts()
if not distance_dict:
distance_dict = []
for (key, val) in dctx.__dict__.items():
distance_dict.append((key, val))
distance_dict = dict(distance_dict)
else:
for (key, val) in dctx.__dict__.items():
distance_dict[key] = np.hstack([
distance_dict[key], val])
distance_dicts.append(distance_dict)
rupture_dicts.append(rctx)
return cls(rupture_dicts, distance_dicts, gsims, imts, properties,
stddevs, **kwargs)
def plot(self):
"""
Create plot!
"""
nrow = len(self.magnitudes)
# Get means and standard deviations
gmvs = self.get_ground_motion_values()
fig = plt.figure(figsize=self.figure_size)
fig.set_tight_layout({"pad": 0.5})
for rowloc in range(nrow):
for colloc in range(self.nsites):
self._build_plot(
plt.subplot2grid((nrow, self.nsites), (rowloc, colloc)),
gmvs,
rowloc,
colloc)
# Add legend
lgd = plt.legend(self.lines,
self.labels,
loc=3.,
bbox_to_anchor=(1.1, 0.0),
fontsize=self.legend_fontsize)
_save_image_tight(fig, lgd, self.filename, self.filetype, self.dpi)
plt.show()
def get_ground_motion_values(self):
"""
Runs the GMPE calculations to retreive ground motion values
:returns:
Nested dictionary of values
{'GMPE1': {'IM1': , 'IM2': },
'GMPE2': {'IM1': , 'IM2': }}
"""
gmvs = OrderedDict()
for gmpe_name, gmpe in self.gsims.items():
gmvs.update([(gmpe_name, {})])
for i_m in self.imts:
gmvs[gmpe_name][i_m] = np.zeros(
[len(self.rctx), self.nsites], dtype=float)
for iloc, (rct, dct) in enumerate(zip(self.rctx, self.dctx)):
try:
means, _ = gmpe.get_mean_and_stddevs(
self.sctx, rct, dct,
imt.from_string(i_m),
[self.stddevs])
gmvs[gmpe_name][i_m][iloc, :] = \
np.exp(means)
except (KeyError, ValueError):
gmvs[gmpe_name][i_m] = np.array([], dtype=float)
break
return gmvs
def _build_plot(self, ax, gmvs, rloc, cloc):
"""
Plots the lines for a given axis
:param ax:
Axes object
:param str i_m:
Intensity Measure
:param dict gmvs:
Ground Motion Values Dictionary
"""
self.labels = []
self.lines = []
max_period = 0.0
min_period = np.inf
for gmpe_name in self.gsims:
periods = []
spec = []
for i_m in self.imts:
if len(gmvs[gmpe_name][i_m]):
periods.append(imt.from_string(i_m).period)
spec.append(gmvs[gmpe_name][i_m][rloc, cloc])
periods = np.array(periods)
spec = np.array(spec)
max_period = np.max(periods) if np.max(periods) > max_period else \
max_period
min_period = np.min(periods) if np.min(periods) < min_period else \
min_period
self.labels.append(gmpe_name)
# Get spectrum from gmvs
if self.plot_type == "loglog":
line, = ax.loglog(periods,
spec,
linewidth=2.0,
label=gmpe_name)
else:
line, = ax.semilogy(periods,
spec,
linewidth=2.0,
label=gmpe_name)
# On the top row, add the distance as a title
if rloc == 0:
ax.set_title("%s = %9.1f (km)" %
(self.distance_type,
self.distances[rloc][self.distance_type][cloc]),
fontsize=14)
# On the last column add a vertical label with magnitude
if cloc == (self.nsites - 1):
ax.annotate("M = %s" % self.rctx[rloc].mag,
(1.05, 0.5),
xycoords="axes fraction",
fontsize=14,
rotation="vertical")
self.lines.append(line)
ax.set_xlim(min_period, max_period)
if isinstance(self.ylim, tuple):
ax.set_ylim(self.ylim[0], self.ylim[1])
ax.grid(True)
self._set_labels(i_m, ax)
def _set_labels(self, i_m, ax):
"""
Sets the labels on the specified axes
"""
ax.set_xlabel("Period (s)", fontsize=14)
ax.set_ylabel(self._get_ylabel(None), fontsize=14)
def to_dict(self):
"""
Export ground motion values to a dictionary
"""
gmvs = self.get_ground_motion_values()
periods = [float(val.split("SA(")[1].rstrip(")"))
for val in self.imts]
gmv_dict = OrderedDict([
("xlabel", "Period (s)"),
("xvalues", periods),
("figures", [])
])
mags = [rup.mag for rup in self.magnitudes]
dists = self.distances[0][self.distance_type]
for i, mag in enumerate(mags):
for j, dist in enumerate(dists):
ydict = OrderedDict([
("ylabel", self._get_ylabel(None)), # arg 'None' not used
("magnitude", mag),
("distance", np.around(dist, 3)),
("imt", 'SA'),
("row", i),
("column", j),
("yvalues", OrderedDict([(gsim, []) for gsim in gmvs]))
])
for gsim in gmvs:
for imt in self.imts:
if len(gmvs[gsim][imt]):
value = gmvs[gsim][imt][i, j]
if np.isnan(value):
value = None
ydict["yvalues"][gsim].append(value)
else:
ydict["yvalues"][gsim].append(None)
gmv_dict["figures"].append(ydict)
return gmv_dict
def to_json(self):
"""
Exports the ground motion values to json
"""
return json.dumps(self.to_dict())
def _get_ylabel(self, i_m):
"""
In this case only the spectra are being shown, so return only the
Sa (g) label
"""
return "Sa (g)"
def pretty_print(self, filename=None, sep=","):
"""
Format the ground motion for printing to file or to screen
:param str filename:
Path to file
:param str sep:
Separator character
"""
if filename:
fid = open(filename, "w")
else:
fid = sys.stdout
# Print Meta information
self._write_pprint_header_line(fid, sep)
# Loop over IMTs
gmvs = self.get_ground_motion_values()
# Get the GMPE list header string
gsim_str = "IMT{:s}{:s}".format(
sep,
sep.join(self.gsims))
for i, mag in enumerate(self.magnitudes):
for j in range(self.nsites):
dist_string = sep.join([
"{:s}{:s}{:s}".format(dist_type, sep, str(val[j]))
for (dist_type, val) in self.distances.items()])
# Get M-R header string
mr_header = "Magnitude{:s}{:s}{:s}{:s}".format(sep, str(mag),
sep,
dist_string)
fid.write("%s\n" % mr_header)
fid.write("%s\n" % gsim_str)
for imt in self.imts:
iml_str = []
for gmpe_name in self.gsims:
# Need to deal with case that GSIMs don't define
# values for the period
if len(gmvs[gmpe_name][imt]):
iml_str.append("{:.8f}".format(
gmvs[gmpe_name][imt][i, j]))
else:
iml_str.append("-999.000")
# Retreived IMT string
imt_str = imt.split("(")[1].rstrip(")")
iml_str = sep.join(iml_str)
fid.write("{:s}{:s}{:s}\n".format(imt_str, sep, iml_str))
fid.write("================================================\n")
if filename:
fid.close()
def _write_pprint_header_line(self, fid, sep=","):
"""
Write the header lines of the pretty print function
"""
fid.write("Magnitude - Distance Spectra (km) IMT Trellis\n")
fid.write("%s\n" % sep.join(["{:s}{:s}{:s}".format(key, sep, str(val))
for (key, val) in self.params.items()]))
class MagnitudeDistanceSpectraSigmaTrellis(MagnitudeDistanceSpectraTrellis):
"""
"""
def _build_plot(self, ax, gmvs, rloc, cloc):
"""
Plots the lines for a given axis
:param ax:
Axes object
:param str i_m:
Intensity Measure
:param dict gmvs:
Ground Motion Values Dictionary
"""
self.labels = []
self.lines = []
max_period = 0.0
min_period = np.inf
for gmpe_name in self.gsims:
periods = []
spec = []
# Get spectrum from gmvs
for i_m in self.imts:
if len(gmvs[gmpe_name][i_m]):
periods.append(imt.from_string(i_m).period)
spec.append(gmvs[gmpe_name][i_m][rloc, cloc])
periods = np.array(periods)
spec = np.array(spec)
self.labels.append(gmpe_name)
max_period = np.max(periods) if np.max(periods) > max_period else \
max_period
min_period = np.min(periods) if np.min(periods) < min_period else \
min_period
if self.plot_type == "loglog":
line, = ax.semilogx(periods,
spec,
linewidth=2.0,
label=gmpe_name)
else:
line, = ax.plot(periods,
spec,
linewidth=2.0,
label=gmpe_name)
# On the top row, add the distance as a title
if rloc == 0:
ax.set_title("%s = %9.3f (km)" %
(self.distance_type,
self.distances[0][self.distance_type][cloc]),
fontsize=14)
# On the last column add a vertical label with magnitude
if cloc == (self.nsites - 1):
ax.annotate("M = %s" % self.rctx[rloc].mag,
(1.05, 0.5),
xycoords="axes fraction",
fontsize=14,
rotation="vertical")
self.lines.append(line)
ax.set_xlim(min_period, max_period)
if isinstance(self.ylim, tuple):
ax.set_ylim(self.ylim[0], self.ylim[1])
ax.grid(True)
self._set_labels(i_m, ax)
def get_ground_motion_values(self):
"""
Runs the GMPE calculations to retreive ground motion values
:returns:
Nested dictionary of values
{'GMPE1': {'IM1': , 'IM2': },
'GMPE2': {'IM1': , 'IM2': }}
"""
gmvs = OrderedDict()
for gmpe_name, gmpe in self.gsims.items():
gmvs.update([(gmpe_name, {})])
for i_m in self.imts:
gmvs[gmpe_name][i_m] = np.zeros([len(self.rctx), self.nsites],
dtype=float)
for iloc, (rct, dct) in enumerate(zip(self.rctx, self.dctx)):
try:
_, sigmas = gmpe.get_mean_and_stddevs(
self.sctx, rct, dct,
imt.from_string(i_m),
[self.stddevs])
gmvs[gmpe_name][i_m][iloc, :] = sigmas[0]
except (KeyError, ValueError):
gmvs[gmpe_name][i_m] = np.array([], dtype=float)
break
return gmvs
def _get_ylabel(self, i_m):
"""
Returns the standard deviation term (specific to the standard deviation
type specified for the class)
"""
return "{:s} Std. Dev.".format(self.stddevs)
def _write_pprint_header_line(self, fid, sep=","):
"""
Write the header lines of the pretty print function
"""
fid.write("Magnitude - Distance (km) Spectra %s Standard Deviations Trellis\n" %
self.stddevs)
fid.write("%s\n" % sep.join(["{:s}{:s}{:s}".format(key, sep, str(val))
for (key, val) in self.params.items()]))
| agpl-3.0 |
lewisc/spark-tk | regression-tests/sparktkregtests/testcases/scoretests/naive_bayes_test.py | 10 | 2147 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests Naive Bayes Model against known values. """
import unittest
import os
from sparktkregtests.lib import sparktk_test
from sparktkregtests.lib import scoring_utils
class NaiveBayes(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build the frames needed for the tests."""
super(NaiveBayes, self).setUp()
dataset = self.get_file("naive_bayes.csv")
schema = [("label", int),
("f1", int),
("f2", int),
("f3", int)]
self.frame = self.context.frame.import_csv(dataset, schema=schema)
def test_model_scoring(self):
"""Test training intializes theta, pi and labels"""
model = self.context.models.classification.naive_bayes.train(self.frame, ['f1', 'f2', 'f3'], "label")
res = model.predict(self.frame, ['f1', 'f2', 'f3'])
analysis = res.to_pandas()
file_name = self.get_name("naive_bayes")
model_path = model.export_to_mar(self.get_export_file(file_name))
with scoring_utils.scorer(
model_path, self.id()) as scorer:
for _, i in analysis.iterrows():
r = scorer.score(
[dict(zip(['f1', 'f2', 'f3'],
map(lambda x: int(x), (i[1:4]))))])
self.assertEqual(
r.json()["data"][0]['Score'], i['predicted_class'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
empirical-org/WikipediaSentences | notebooks/BERT-4 Experiments Multilabel.py | 1 | 19268 | #!/usr/bin/env python
# coding: utf-8
# # Multilabel BERT Experiments
#
# In this notebook we do some first experiments with BERT: we finetune a BERT model+classifier on each of our datasets separately and compute the accuracy of the resulting classifier on the test data.
# For these experiments we use the `pytorch_transformers` package. It contains a variety of neural network architectures for transfer learning and pretrained models, including BERT and XLNET.
#
# Two different BERT models are relevant for our experiments:
#
# - BERT-base-uncased: a relatively small BERT model that should already give reasonable results,
# - BERT-large-uncased: a larger model for real state-of-the-art results.
# In[1]:
from multilabel import EATINGMEAT_BECAUSE_MAP, EATINGMEAT_BUT_MAP, JUNKFOOD_BECAUSE_MAP, JUNKFOOD_BUT_MAP
label_map = EATINGMEAT_BECAUSE_MAP
# In[2]:
import torch
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.modeling_bert import BertForSequenceClassification
BERT_MODEL = 'bert-large-uncased'
BATCH_SIZE = 16 if "base" in BERT_MODEL else 2
GRADIENT_ACCUMULATION_STEPS = 1 if "base" in BERT_MODEL else 8
tokenizer = BertTokenizer.from_pretrained(BERT_MODEL)
# ## Data
#
# We use the same data as for all our previous experiments. Here we load the training, development and test data for a particular prompt.
# In[3]:
import ndjson
import glob
from collections import Counter
prefix = "eatingmeat_because_xl"
train_file = f"../data/interim/{prefix}_train_withprompt.ndjson"
synth_files = glob.glob(f"../data/interim/{prefix}_train_withprompt_*.ndjson")
dev_file = f"../data/interim/{prefix}_dev_withprompt.ndjson"
test_file = f"../data/interim/{prefix}_test_withprompt.ndjson"
with open(train_file) as i:
train_data = ndjson.load(i)
synth_data = []
for f in synth_files:
if "allsynth" in f:
continue
with open(f) as i:
synth_data += ndjson.load(i)
with open(dev_file) as i:
dev_data = ndjson.load(i)
with open(test_file) as i:
test_data = ndjson.load(i)
labels = Counter([item["label"] for item in train_data])
print(labels)
# Next, we build the label vocabulary, which maps every label in the training data to an index.
# In[4]:
label2idx = {}
idx2label = {}
target_names = []
for item in label_map:
for label in label_map[item]:
if label not in target_names:
idx = len(target_names)
target_names.append(label)
label2idx[label] = idx
idx2label[idx] = label
print(label2idx)
print(idx2label)
# In[5]:
def map_to_multilabel(items):
return [{"text": item["text"], "label": label_map[item["label"]]} for item in items]
train_data = map_to_multilabel(train_data)
dev_data = map_to_multilabel(dev_data)
test_data = map_to_multilabel(test_data)
# In[6]:
import random
def sample(train_data, synth_data, label2idx, number):
"""Sample a fixed number of items from every label from
the training data and test data.
"""
new_train_data = []
for label in label2idx:
data_for_label = [i for i in train_data if i["label"] == label]
# If there is more training data than the required number,
# take a random sample of n examples from the training data.
if len(data_for_label) >= number:
random.shuffle(data_for_label)
new_train_data += data_for_label[:number]
# If there is less training data than the required number,
# combine training data with synthetic data.
elif len(data_for_label) < number:
# Automatically add all training data
new_train_data += data_for_label
# Compute the required number of additional data
rest = number-len(data_for_label)
# Collect the synthetic data for the label
synth_data_for_label = [i for i in synth_data if i["label"] == label]
# If there is more synthetic data than required,
# take a random sample from the synthetic data.
if len(synth_data_for_label) > rest:
random.shuffle(synth_data_for_label)
new_train_data += synth_data_for_label[:rest]
# If there is less synthetic data than required,
# sample with replacement from this data until we have
# the required number.
else:
new_train_data += random.choices(synth_data_for_label, k=rest)
return new_train_data
def random_sample(train_data, train_size):
random.shuffle(train_data)
train_data = train_data[:TRAIN_SIZE]
#train_data = sample(train_data, synth_data, label2idx, 200)
print("Train data size:", len(train_data))
# ## Model
#
# We load the pretrained model and put it on a GPU if one is available. We also put the model in "training" mode, so that we can correctly update its internal parameters on the basis of our data sets.
# In[7]:
from torch import nn
from pytorch_transformers.modeling_bert import BertPreTrainedModel, BertModel
class BertForMultiLabelSequenceClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super(BertForMultiLabelSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = nn.BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
# In[8]:
model = BertForMultiLabelSequenceClassification.from_pretrained(BERT_MODEL, num_labels=len(label2idx))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.train()
# ## Preprocessing
#
# We preprocess the data by turning every example to an `InputFeatures` item. This item has all the attributes we need for finetuning BERT:
#
# - input ids: the ids of the tokens in the text
# - input mask: tells BERT what part of the input it should not look at (such as padding tokens)
# - segment ids: tells BERT what segment every token belongs to. BERT can take two different segments as input
# - label id: the id of this item's label
# In[ ]:
import logging
import numpy as np
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_SEQ_LENGTH=100
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
def convert_examples_to_features(examples, label2idx, max_seq_length, tokenizer, verbose=0):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, ex) in enumerate(examples):
# TODO: should deal better with sentences > max tok length
input_ids = tokenizer.encode("[CLS] " + ex["text"] + " [SEP]")
segment_ids = [0] * len(input_ids)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_ids = np.zeros(len(label2idx))
for label in ex["label"]:
label_ids[label2idx[label]] = 1
if verbose and ex_index == 0:
logger.info("*** Example ***")
logger.info("text: %s" % ex["text"])
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label:" + str(ex["label"]) + " id: " + str(label_ids))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids))
return features
train_features = convert_examples_to_features(train_data, label2idx, MAX_SEQ_LENGTH, tokenizer, verbose=0)
dev_features = convert_examples_to_features(dev_data, label2idx, MAX_SEQ_LENGTH, tokenizer)
test_features = convert_examples_to_features(test_data, label2idx, MAX_SEQ_LENGTH, tokenizer, verbose=1)
# Next, we initialize data loaders for each of our data sets. These data loaders present the data for training (for example, by grouping them into batches).
# In[ ]:
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler
def get_data_loader(features, max_seq_length, batch_size, shuffle=True):
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.float)
data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
dataloader = DataLoader(data, shuffle=shuffle, batch_size=batch_size)
return dataloader
train_dataloader = get_data_loader(train_features, MAX_SEQ_LENGTH, BATCH_SIZE)
dev_dataloader = get_data_loader(dev_features, MAX_SEQ_LENGTH, BATCH_SIZE)
test_dataloader = get_data_loader(test_features, MAX_SEQ_LENGTH, BATCH_SIZE, shuffle=False)
# ## Evaluation
#
# Our evaluation method takes a pretrained model and a dataloader. It has the model predict the labels for the items in the data loader, and returns the loss, the correct labels, and the predicted labels.
# In[ ]:
from torch.nn import Sigmoid
def evaluate(model, dataloader, verbose=False):
eval_loss = 0
nb_eval_steps = 0
predicted_labels, correct_labels = [], []
for step, batch in enumerate(tqdm(dataloader, desc="Evaluation iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
with torch.no_grad():
tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, label_ids)
sig = Sigmoid()
outputs = sig(logits).to('cpu').numpy()
label_ids = label_ids.to('cpu').numpy()
predicted_labels += list(outputs >= 0.5)
correct_labels += list(label_ids)
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
correct_labels = np.array(correct_labels)
predicted_labels = np.array(predicted_labels)
return eval_loss, correct_labels, predicted_labels
# ## Training
#
# Let's prepare the training. We set the training parameters and choose an optimizer and learning rate scheduler.
# In[ ]:
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
NUM_TRAIN_EPOCHS = 20
LEARNING_RATE = 1e-5
WARMUP_PROPORTION = 0.1
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 - x
num_train_steps = int(len(train_data) / BATCH_SIZE / GRADIENT_ACCUMULATION_STEPS * NUM_TRAIN_EPOCHS)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=LEARNING_RATE, correct_bias=False)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=100, t_total=num_train_steps)
# Now we do the actual training. In each epoch, we present the model with all training data and compute the loss on the training set and the development set. We save the model whenever the development loss improves. We end training when we haven't seen an improvement of the development loss for a specific number of epochs (the patience).
#
# Optionally, we use gradient accumulation to accumulate the gradient for several training steps. This is useful when we want to use a larger batch size than our current GPU allows us to do.
# In[ ]:
import os
from tqdm import trange
from tqdm import tqdm
from sklearn.metrics import classification_report, precision_recall_fscore_support
OUTPUT_DIR = "/tmp/"
MODEL_FILE_NAME = "pytorch_model.bin"
PATIENCE = 5
global_step = 0
model.train()
loss_history = []
best_epoch = 0
for epoch in trange(int(NUM_TRAIN_EPOCHS), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Training iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
outputs = model(input_ids, segment_ids, input_mask, label_ids)
loss = outputs[0]
if GRADIENT_ACCUMULATION_STEPS > 1:
loss = loss / GRADIENT_ACCUMULATION_STEPS
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % GRADIENT_ACCUMULATION_STEPS == 0:
lr_this_step = LEARNING_RATE * warmup_linear(global_step/num_train_steps, WARMUP_PROPORTION)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
dev_loss, _, _ = evaluate(model, dev_dataloader)
print("Loss history:", loss_history)
print("Dev loss:", dev_loss)
if len(loss_history) == 0 or dev_loss < min(loss_history):
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(OUTPUT_DIR, MODEL_FILE_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
best_epoch = epoch
if epoch-best_epoch >= PATIENCE:
print("No improvement on development set. Finish training.")
break
loss_history.append(dev_loss)
# ## Results
#
# We load the pretrained model, set it to evaluation mode and compute its performance on the training, development and test set. We print out an evaluation report for the test set.
#
# Note that different runs will give slightly different results.
# In[ ]:
from tqdm import tqdm_notebook as tqdm
output_model_file = "/tmp/pytorch_model.bin"
print("Loading model from", output_model_file)
device="cpu"
model_state_dict = torch.load(output_model_file, map_location=lambda storage, loc: storage)
model = BertForMultiLabelSequenceClassification.from_pretrained(BERT_MODEL, state_dict=model_state_dict, num_labels=len(label2idx))
model.to(device)
model.eval()
_, test_correct, test_predicted = evaluate(model, test_dataloader, verbose=True)
# In[ ]:
all_correct = 0
fp, fn, tp, tn = 0, 0, 0, 0
for c, p in zip(test_correct, test_predicted):
if sum(c == p) == len(c):
all_correct +=1
for ci, pi in zip(c, p):
if pi == 1 and ci == 1:
tp += 1
same = 1
elif pi == 1 and ci == 0:
fp += 1
elif pi == 0 and ci == 1:
fn += 1
else:
tn += 1
same =1
precision = tp/(tp+fp)
recall = tp/(tp+fn)
print("P:", precision)
print("R:", recall)
print("A:", all_correct/len(test_correct))
# In[ ]:
for item, predicted, correct in zip(test_data, test_predicted, test_correct):
correct_labels = [idx2label[i] for i, l in enumerate(correct) if l == 1]
predicted_labels = [idx2label[i] for i, l in enumerate(predicted) if l == 1]
print("{}#{}#{}".format(item["text"], ";".join(correct_labels), ";".join(predicted_labels)))
# In[ ]:
| agpl-3.0 |
pliz/gunfolds | scripts/stackedbars_many.py | 1 | 4935 | import pandas as pd
import pylab as pl
import seaborn as sb
from matplotlib import pyplot as plt
import matplotlib as mpl
import seaborn as sns
import numpy as np
SBDIR = '~/soft/src/dev/tools/stackedBarGraph/'
GFDIR = '/na/home/splis/soft/src/dev/craft/gunfolds/tools/'
import sys, os
sys.path.append(os.path.expanduser(SBDIR))
sys.path.append(os.path.expanduser(GFDIR))
import zickle as zkl
from stackedBarGraph import StackedBarGrapher
SBG = StackedBarGrapher()
def gettimes(d):
t = [x['ms'] for x in d]
time = map(lambda x: x/1000./60., t)
return time
l = [(0.15, 'leibnitz_nodes_15_density_0.1_newp_.zkl'),
(0.20, 'leibnitz_nodes_20_density_0.1_newp_.zkl'),
(0.25, 'leibnitz_nodes_25_density_0.1_newp_.zkl'),
(0.30, 'leibnitz_nodes_30_density_0.1_newp_.zkl'),
(0.35, 'leibnitz_nodes_35_density_0.1_newp_.zkl')]
fig = pl.figure(figsize=[10,3])
#Read in data & create total column
d = zkl.load("hooke_nodes_6_g32g1_.zkl")#hooke_nodes_35_newp_.zkl")
densities = [.15, .20 ,.25, .30, .35]
d = {}
for fname in l:
d[fname[0]] = zkl.load(fname[1])
def get_counts(d):
eqc = [len(x['eq']) for x in d]
keys = np.sort(np.unique(eqc))
c = {}
for k in keys:
c[k] = len(np.where(eqc == k)[0])
return c
# unique size
usz = set()
dc = {}
for u in densities:
dc[u] = get_counts(d[u])
for v in dc[u]:
usz.add(v)
for u in densities:
for c in usz:
if not c in dc[u]:
dc[u][c] = 0
A = []
for u in densities:
A.append([dc[u][x] for x in np.sort(dc[u].keys())])
#print A
#A = np.array(A)
pp = mpl.colors.LinearSegmentedColormap.from_list("t",sns.color_palette("Paired",len(usz)))
#pp = mpl.colors.LinearSegmentedColormap.from_list("t",sns.dark_palette("#5178C7",len(usz)))
#pp = mpl.colors.LinearSegmentedColormap.from_list("t",sns.blend_palette(["mediumseagreen", "ghostwhite", "#4168B7"],len(usz)))
scalarMap = mpl.cm.ScalarMappable(norm = lambda x: x/np.double(len(usz)),
cmap=pp)
d_widths = [.5]*len(densities)
d_labels = map(lambda x: str(int(x*100))+"%",densities)
#u = np.sort(list(usz))
d_colors = [scalarMap.to_rgba(i) for i in range(len(A[0]))]
#d_colors = ['#2166ac', '#fee090', '#fdbb84', '#fc8d59', '#e34a33', '#b30000', '#777777','#2166ac', '#fee090', '#fdbb84', '#fc8d59', '#e34a33', '#b30000', '#777777','#2166ac', '#fee090']
#ax = fig.add_subplot(211)
ax = plt.subplot2grid((3,1), (0, 0), rowspan=2)
SBG.stackedBarPlot(ax,
A,
d_colors,
xLabels=d_labels,
yTicks=3,
widths=d_widths,
gap = 0.005,
scale=False
)
for i in range(len(A)):
Ai = [x for x in A[i] if x>0]
y = [x/2.0 for x in Ai]
for j in range(len(Ai)):
if j>0:
yy = y[j]+np.sum(Ai[0:j])
else:
yy = y[j]
pl.text(0.5*i-0.02,yy-1.2,str(Ai[j]),fontsize=12,zorder=10)
# #Set general plot properties
# sns.set_style("white")
# sns.set_context({"figure.figsize": (24, 10)})
# for i in np.sort(list(usz))[::-1]:
# y = [100-dc[u][i] for u in np.sort(dc.keys())]
# bottom_plot=sns.barplot(x=np.asarray(densities)*100, y=y)
# # color=scalarMap.to_rgba(i))
# #y = (sbd[i+1]-sbd[i])/2.+sbd[i]scala
# #for j in range(len(sbd.Density)):
# # pl.text(j-0.1,y[j],'1',fontsize=16,zorder=i)
# #Optional code - Make plot look nicer
sns.despine(left=True)
# #Set fonts to consistent 16pt size
ax.set(xticklabels="")
for item in ([ax.xaxis.label, ax.yaxis.label] +
#ax.get_xticklabels() +
ax.get_yticklabels()):
item.set_fontsize(12)
alltimes_new = []
for fname in l:
dp = zkl.load(fname[1])
alltimes_new.append(gettimes(dp))
shift = 0.15
wds = 0.3
fliersz = 2
lwd = 1
ax = plt.subplot2grid((3,1), (2, 0))
g = sb.boxplot(alltimes_new,names=map(lambda x: str(int(x*100))+"",
densities),
widths=wds, color="Reds",fliersize=fliersz,
linewidth=lwd,
**{'positions':np.arange(len(densities))+shift,
'label':'MSL'})
# plt.plot(np.arange(len(densities))-shift,
# map(np.median,alltimes_old), 'ro-', lw=0.5, mec='k')
# plt.plot(np.arange(len(densities))+shift,
# map(np.median,alltimes_new), 'bo-', lw=0.5, mec='k')
g.figure.get_axes()[1].set_yscale('log')
plt.xlabel('number of nodes in a graph')
plt.ylabel('computation time\n(minutes)')
#plt.title('100 6 node graphs per density\n$G_2 \\rightarrow G_1$',
# multialignment='center')
#plt.subplots_adjust(right=0.99, left=0.2)
plt.legend(loc=0)
for item in ([ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() +
ax.get_yticklabels()):
item.set_fontsize(12)
pl.subplots_adjust(bottom=0.1,hspace=0.01,top=0.98)
# plt.show()
pl.show()
| gpl-3.0 |
onlynight/wechat-dump | plot-num-msg-by-time.py | 1 | 1464 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: plot_num_msg_by_time.py
# Date: Wed Mar 25 17:44:39 2015 +0800
# Author: Yuxin Wu <[email protected]>
from wechat.parser import WeChatDBParser
from wechat.utils import ensure_unicode
from datetime import timedelta, datetime
import numpy as np
import matplotlib.pyplot as plt
import sys, os
if len(sys.argv) != 3:
sys.exit("Usage: {0} <path to decrypted_database.db> <name>".format(sys.argv[0]))
db_file = sys.argv[1]
name = ensure_unicode(sys.argv[2])
every_k_days = 2
parser = WeChatDBParser(db_file)
msgs = parser.msgs_by_talker[name]
times = [x.createTime for x in msgs]
start_time = times[0]
diffs = [(x - start_time).days for x in times]
max_day = diffs[-1]
width = 20
numbers = range((max_day / width + 1) * width + 1)[::width]
labels = [(start_time + timedelta(x)).strftime("%m/%d") for x in numbers]
plt.xticks(numbers, labels)
plt.xlabel("Date")
plt.ylabel("Number of msgs in k days")
plt.hist(diffs, bins=max_day / every_k_days)
plt.show()
# statistics by hour
# I'm in a different time zone in this period:
#TZ_DELTA = {(datetime(2014, 7, 13), datetime(2014, 10, 1)): -15}
#def real_hour(x):
#for k, v in TZ_DELTA.iteritems():
#if x > k[0] and x < k[1]:
#print x
#return (x.hour + v + 24) % 24
#return x.hour
#hours = [real_hour(x) for x in times]
#plt.ylabel("Number of msgs")
#plt.xlabel("Hour in a day")
#plt.hist(hours, bins=24)
#plt.show()
| gpl-3.0 |
bavardage/statsmodels | statsmodels/tsa/filters/hp_filter.py | 3 | 2954 | from __future__ import absolute_import
from scipy import sparse
from scipy.sparse import dia_matrix, eye as speye
from scipy.sparse.linalg import spsolve
import numpy as np
from .utils import _maybe_get_pandas_wrapper
def hpfilter(X, lamb=1600):
"""
Hodrick-Prescott filter
Parameters
----------
X : array-like
The 1d ndarray timeseries to filter of length (nobs,) or (nobs,1)
lamb : float
The Hodrick-Prescott smoothing parameter. A value of 1600 is
suggested for quarterly data. Ravn and Uhlig suggest using a value
of 6.25 (1600/4**4) for annual data and 129600 (1600*3**4) for monthly
data.
Returns
-------
cycle : array
The estimated cycle in the data given lamb.
trend : array
The estimated trend in the data given lamb.
Examples
---------
>>> import statsmodels.api as sm
>>> dta = sm.datasets.macrodata.load()
>>> X = dta.data['realgdp']
>>> cycle, trend = sm.tsa.filters.hpfilter(X,1600)
Notes
-----
The HP filter removes a smooth trend, `T`, from the data `X`. by solving
min sum((X[t] - T[t])**2 + lamb*((T[t+1] - T[t]) - (T[t] - T[t-1]))**2)
T t
Here we implemented the HP filter as a ridge-regression rule using
scipy.sparse. In this sense, the solution can be written as
T = inv(I - lamb*K'K)X
where I is a nobs x nobs identity matrix, and K is a (nobs-2) x nobs matrix
such that
K[i,j] = 1 if i == j or i == j + 2
K[i,j] = -2 if i == j + 1
K[i,j] = 0 otherwise
References
----------
Hodrick, R.J, and E. C. Prescott. 1980. "Postwar U.S. Business Cycles: An
Empricial Investigation." `Carnegie Mellon University discussion
paper no. 451`.
Ravn, M.O and H. Uhlig. 2002. "Notes On Adjusted the Hodrick-Prescott
Filter for the Frequency of Observations." `The Review of Economics and
Statistics`, 84(2), 371-80.
"""
_pandas_wrapper = _maybe_get_pandas_wrapper(X)
X = np.asarray(X, float)
if X.ndim > 1:
X = X.squeeze()
nobs = len(X)
I = speye(nobs,nobs)
offsets = np.array([0,1,2])
data = np.repeat([[1.],[-2.],[1.]], nobs, axis=1)
K = dia_matrix((data, offsets), shape=(nobs-2,nobs))
import scipy
if (X.dtype != np.dtype('<f8') and
int(scipy.__version__[:3].split('.')[1]) < 11):
#scipy umfpack bug on Big Endian machines, will be fixed in 0.11
use_umfpack = False
else:
use_umfpack = True
if scipy.__version__[:3] == '0.7':
#doesn't have use_umfpack option
#will be broken on big-endian machines with scipy 0.7 and umfpack
trend = spsolve(I+lamb*K.T.dot(K), X)
else:
trend = spsolve(I+lamb*K.T.dot(K), X, use_umfpack=use_umfpack)
cycle = X-trend
if _pandas_wrapper is not None:
return _pandas_wrapper(cycle), _pandas_wrapper(trend)
return cycle, trend
| bsd-3-clause |
virneo/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_wxagg.py | 70 | 9051 | from __future__ import division
"""
backend_wxagg.py
A wxPython backend for Agg. This uses the GUI widgets written by
Jeremy O'Donoghue ([email protected]) and the Agg backend by John
Hunter ([email protected])
Copyright (C) 2003-5 Jeremy O'Donoghue, John Hunter, Illinois Institute of
Technology
License: This work is licensed under the matplotlib license( PSF
compatible). A copy should be included with this source code.
"""
import wx
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
import backend_wx
from backend_wx import FigureManager, FigureManagerWx, FigureCanvasWx, \
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, error_msg_wx, \
draw_if_interactive, show, Toolbar, backend_version
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.BeginDrawing()
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.EndDrawing()
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
frame = FigureFrameWxAgg(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
#
# agg/wxPython image conversion functions (wxPython <= 2.6)
#
def _py_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
if bbox is None:
# agg => rgb -> image
return image
else:
# agg => rgb -> image => bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_clipped_image_as_bitmap(image, bbox))
def _py_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image => bitmap
return wx.BitmapFromImage(_py_convert_agg_to_wx_image(agg, None))
else:
# agg => rgb -> image => bitmap => clipped bitmap
return _clipped_image_as_bitmap(
_py_convert_agg_to_wx_image(agg, None),
bbox)
def _clipped_image_as_bitmap(image, bbox):
"""
Convert the region of a wx.Image bounded by bbox to a wx.Bitmap.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromImage(image)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(image.GetHeight() - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _py_WX28_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _py_WX28_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
def _use_accelerator(state):
"""
Enable or disable the WXAgg accelerator, if it is present and is also
compatible with whatever version of wxPython is in use.
"""
global _convert_agg_to_wx_image
global _convert_agg_to_wx_bitmap
if getattr(wx, '__version__', '0.0')[0:3] < '2.8':
# wxPython < 2.8, so use the C++ accelerator or the Python routines
if state and _wxagg is not None:
_convert_agg_to_wx_image = _wxagg.convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _wxagg.convert_agg_to_wx_bitmap
else:
_convert_agg_to_wx_image = _py_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_convert_agg_to_wx_bitmap
else:
# wxPython >= 2.8, so use the accelerated Python routines
_convert_agg_to_wx_image = _py_WX28_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_WX28_convert_agg_to_wx_bitmap
# try to load the WXAgg accelerator
try:
import _wxagg
except ImportError:
_wxagg = None
# if it's present, use it
_use_accelerator(True)
| agpl-3.0 |
kc-lab/dms2dfe | dms2dfe/ana4_plotter.py | 2 | 1598 | #!usr/bin/python
# Copyright 2016, Rohan Dandage <[email protected],[email protected]>
# This program is distributed under General Public License v. 3.
import sys
from os import makedirs,stat
from os.path import splitext, join, exists, isdir,basename,abspath,dirname
import pandas as pd
from dms2dfe.lib.io_strs import get_logger
logging=get_logger()
from dms2dfe import configure
from dms2dfe.lib.io_plot_files import plot_coverage,plot_mutmap,plot_submap,plot_multisca,plot_pdb,plot_violin,plot_pies
def main(prj_dh):
"""
**--step 5**. Generates vizualizations.
#. Scatter grid plots raw counts in replicates, if present.
#. Mutation matrix. of frequencies of mutants (log scaled).
#. Scatter plots of raw counts among selected and unselected samples
#. Mutation matrix. of Fitness values.
#. DFE plot. ie. Distribution of Fitness values for samples.
#. Projections on PDB. Average of fitness values per residue are projected onto PDB file.
:param prj_dh: path to project directory.
"""
logging.info("start")
if not exists(prj_dh) :
logging.error("Could not find '%s'" % prj_dh)
sys.exit()
configure.main(prj_dh)
from dms2dfe.tmp import info
for type_form in ['aas','cds']:
plots_dh='%s/plots/%s' % (prj_dh,type_form)
if not exists(plots_dh):
makedirs(plots_dh)
plot_coverage(info)
plot_mutmap(info)
plot_submap(info)
plot_multisca(info)
plot_pdb(info)
plot_violin(info)
# plot_pies(info)
if __name__ == '__main__':
main(sys.argv[1]) | gpl-3.0 |
MrJarv1s/FEMur | FEMur/solver2D.py | 1 | 7980 | from FEMur import *
import sys
import sympy as sy
import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import scipy.interpolate
from math import ceil
class Solver(object):
'''
2-dimensional solver top class.
Provides common initialization to all child solver classes.
'''
def __init__(self, meshfile, analysis_type):
self.meshfile = meshfile
self.analysis_type = analysis_type
self.get_mesh()
self.D = None
self.gbl_stiff = None
self.gbl_load = None
self.gbl_omega = None
self.dirichlet_applied = False
def weakform(self):
'''
Prints weak form used by the solver to approximate the results.
'''
def get_mesh(self):
'''
Call Mesh class to create the mesh.
'''
try:
a = self.meshfile
except AttributeError:
print('A mesh file has not been provided.')
sys.exit(1)
self.mesh = Mesh2D(self.meshfile, self.analysis_type)
self.mesh.mesh()
def solve(self):
'''
Solves the equation system.
Solves for O in:
[K]{O}={F}
where K is the global stiffness matrix (or conductivitity)
O is the displacement (or temperature)
F is the applied load (or [...])
'''
if self.gbl_stiff is None or self.gbl_load is None:
self.assemble_stiff_load()
if self.dirichlet_applied is False:
self.update_stiff_load_dirichlet()
print('\n# SOLVING FOR OMEGA #\n')
print(self.gbl_stiff)
print(self.gbl_load)
new_stiff = sy.matrix2numpy(self.gbl_stiff, dtype=float)
new_load = sy.matrix2numpy(self.gbl_load, dtype=float)
new_omega = np.linalg.solve(new_stiff, new_load)
self.gbl_omega = new_omega
print(self.gbl_omega)
def plot_results(self):
if self.gbl_omega is None: # Check if the system has been solved
self.solve()
x = np.zeros(self.mesh.num_nodes)
y = np.zeros(self.mesh.num_nodes)
z = self.gbl_omega.T
for i in self.mesh.nodes.keys():
x[i] = self.mesh.nodes[i].x
y[i] = self.mesh.nodes[i].y
xi, yi = np.linspace(x.min(), x.max(), 100), np.linspace(y.min(), y.max(), 100)
xi, yi = np.meshgrid(xi, yi)
trias = np.zeros((self.mesh.num_elem, 3))
for i in self.mesh.elements.keys():
if self.mesh.elements[i].num_nodes < 6:
pass
else:
for j in range(3):
trias[i, j] = self.mesh.elements[i].nodes[j].index
rbf = sc.interpolate.Rbf(x, y, z, function='linear')
zi = rbf(xi, yi)
plt.imshow(zi, vmin=z.min(), vmax=z.max(), origin='lower',
extent=[x.min(), x.max(), y.min(), y.max()], cmap=plt.get_cmap('plasma'))
plt.scatter(x, y, c=z, cmap=plt.get_cmap('plasma'))
plt.colorbar()
plt.triplot(x, y, trias, 'o-', ms=3, lw=1.0, color='black')
plt.show()
class SteadyHeatSolver(Solver):
'''
2-dimensional steady state heat transfer solver.
'''
def __init__(self, meshfile):
Solver.__init__(self, meshfile, "SSHeat")
def assemble_stiff_load(self):
'''
Assemble the Global Stiffness Matrix and Global Load Vector based on
elements.
Only affects nodes pertaining to shell elements. It will overwrite nodes
that were defined under the first assembler.
'''
try:
a = self.mesh.elements[0].nodes[0].x
except AttributeError:
self.get_mesh()
self.gbl_stiff = sy.zeros(self.mesh.num_nodes)
self.gbl_load = sy.zeros(self.mesh.num_nodes, 1)
print('\n# STARTING ASSEMBLY #\n')
if self.mesh.calculated is None:
self.mesh.solve_elements()
for i in self.mesh.elements.keys():
key = int(i)
# To do: Create a method to provide all elements with the
# surrounding conditions.
self.mesh.elements[i].D = self.D
self.mesh.elements[i].h = self.h
self.mesh.elements[i].e = self.e
self.mesh.elements[i].t_ext = self.t_ext
is_point = isinstance(self.mesh.elements[i], Point1) # Is it a Point1
if not is_point:
print(f"Calculating Element({key})'s Stiffness Matrix")
# self.elements[i].solve_heat_stiff()
print(f"Calculating Element({key})'s Load Vector")
self.mesh.elements[i].get_C()
# self.elements[i].solve_heat_load()
print(f"Applying Element({key})'s Stiffness Matrix and Load Vector"
f"to the global Stiffness Matrix and Global Load Vector")
nodes_indexes = []
for j in self.mesh.elements[i].nodes.keys():
# Create a list with all the element nodes indexes
nodes_indexes.append(self.mesh.elements[i].nodes[j].index)
for j in range(self.mesh.elements[i].num_nodes):
# Assemble Stiffness matrix
for k in range(self.mesh.elements[i].num_nodes):
self.gbl_stiff[nodes_indexes[j], nodes_indexes[k]] += (
self.mesh.elements[i].K_e[j, k]
)
# Assemble Load vector
self.gbl_load[nodes_indexes[j]] += (
self.mesh.elements[i].F_e[j]
)
return None
def set_environment(self, t_ext, h, e, dirichlet, dirichlet_nodes, k_x,
k_y=None, k_xy=None):
'''
Provide the environment variable to the mesh
'T_ext' the temperature of the surounding air.
'h' the convection factors.
'e' the thickness of the shell.
[D] with its diffusion factors (K) will be as follows:
[D] = [k_x k_xy]
[k_xy k_y]
'''
print('Applying Environment')
self.t_ext = t_ext
self.h = h
self.e = e
self.dirichlet = dirichlet
self.dirichlet_nodes = dirichlet_nodes # table with nodes affected.
if k_y is None:
k_y = k_x
if k_xy is None:
k_xy = 0
self.D = sy.Matrix([[k_x, k_xy], [k_xy, k_y]])
print('Environment Applied')
def update_stiff_load_dirichlet(self):
'''
Impose the 'impose' value on all nodes corresponding to value of x or y
provided.
This will clear the row and column associated with all nodes,
effectively cancelling all neighboring nodes from having an impact on the dirichlet nodes.
'''
if self.gbl_stiff is None or self.gbl_load is None:
self.assemble_stiff_load()
new_gbl_stiff = self.gbl_stiff
new_gbl_load = self.gbl_load
print('\n# IMPOSING DIRICHLET #\n')
for i in self.dirichlet_nodes:
print(f"Imposing Dirichlet on Node({i}).")
new_gbl_load -= (new_gbl_stiff[:, self.mesh.nodes[i].index]
* self.dirichlet)
for j in range(self.mesh.num_nodes):
new_gbl_stiff[self.mesh.nodes[i].index, j] = 0
new_gbl_stiff[j, self.mesh.nodes[i].index] = 0
new_gbl_stiff[self.mesh.nodes[i].index, self.mesh.nodes[i].index] = 1
new_gbl_load[self.mesh.nodes[i].index] = self.dirichlet
self.gbl_stiff = new_gbl_stiff
self.gbl_load = new_gbl_load
self.dirichlet_applied = True
return None
class SteadyStructureSolver(Solver):
'''
2-dimensional steady state structure solver.
'''
def __init__(self, meshfile):
Solver.__init__(self, meshfile, "SSMech")
| mit |
apdavison/sumatra | test/system/test_ircr.py | 2 | 6563 | """
A run through of basic Sumatra functionality.
As our example code, we will use a Python program for analyzing scanning
electron microscope (SEM) images of glass samples. This example was taken from
an online SciPy tutorial at http://scipy-lectures.github.com/intro/summary-exercises/image-processing.html
Usage:
nosetests -v test_ircr.py
or:
python test_ircr.py
"""
from __future__ import print_function
from __future__ import unicode_literals
from builtins import input
# Requirements: numpy, scipy, matplotlib, mercurial, sarge
import os
from datetime import datetime
import utils
from utils import (setup, teardown, run_test, build_command, assert_file_exists, assert_in_output,
assert_config, assert_label_equal, assert_records, assert_return_code,
edit_parameters, expected_short_list, substitute_labels)
from functools import partial
import re
repository = "https://bitbucket.org/apdavison/ircr2013"
#repository = "/Volumes/USERS/andrew/dev/ircr2013" # during development
#repository = "/Users/andrew/dev/ircr2013"
def modify_script(filename):
def wrapped():
with open(os.path.join(utils.working_dir, filename), 'r') as fp:
script = fp.readlines()
with open(os.path.join(utils.working_dir, filename), 'w') as fp:
for line in script:
if "print(mean_bubble_size, median_bubble_size)" in line:
fp.write('print("Mean:", mean_bubble_size)\n')
fp.write('print("Median:", median_bubble_size)\n')
else:
fp.write(line)
return wrapped
test_steps = [
("Get the example code",
"hg clone %s ." % repository,
assert_in_output, "updating to branch default"),
("Run the computation without Sumatra",
"python glass_sem_analysis.py default_parameters MV_HFV_012.jpg",
assert_in_output, re.compile(r"2416\.863[0-9]* 60\.0"),
assert_file_exists, os.path.join("Data", datetime.now().strftime("%Y%m%d")), # Data subdirectory contains another subdirectory labelled with today's date)
), # assert(subdirectory contains three image files).
("Set up a Sumatra project",
"smt init -d Data -i . ProjectGlass",
assert_in_output, "Sumatra project successfully set up"),
("Run the ``glass_sem_analysis.py`` script with Sumatra",
"smt run -e python -m glass_sem_analysis.py -r 'initial run' default_parameters MV_HFV_012.jpg",
assert_in_output, (re.compile(r"2416\.863[0-9]* 60\.0"), "histogram.png")),
("Comment on the outcome",
"smt comment 'works fine'"),
("Set defaults",
"smt configure -e python -m glass_sem_analysis.py"),
("Look at the current configuration of the project",
"smt info",
assert_config, {"project_name": "ProjectGlass", "executable": "Python", "main": "glass_sem_analysis.py",
"code_change": "error"}),
edit_parameters("default_parameters", "no_filter", "filter_size", 1),
("Run with changed parameters and user-defined label",
"smt run -l example_label -r 'No filtering' no_filter MV_HFV_012.jpg", # TODO: assert(results have changed)
assert_in_output, "phases.png",
assert_label_equal, "example_label"),
("Change parameters from the command line",
"smt run -r 'Trying a different colourmap' default_parameters MV_HFV_012.jpg phases_colourmap=hot"), # assert(results have changed)
("Add another comment",
"smt comment 'The default colourmap is nicer'"), #TODO add a comment to an older record (e.g. this colourmap is nicer than 'hot')")
("Add tags on the command line",
build_command("smt tag mytag {0} {1}", "labels")),
modify_script("glass_sem_analysis.py"),
("Run the modified code",
"smt run -r 'Added labels to output' default_parameters MV_HFV_012.jpg",
assert_return_code, 1,
assert_in_output, "Code has changed, please commit your changes"),
("Commit changes...",
"hg commit -m 'Added labels to output' -u testuser"),
("...then run again",
"smt run -r 'Added labels to output' default_parameters MV_HFV_012.jpg"), # assert(output has changed as expected)
#TODO: make another change to the Python script
("Change configuration to store diff",
"smt configure --on-changed=store-diff"),
("Run with store diff",
"smt run -r 'made a change' default_parameters MV_HFV_012.jpg"), # assert(code runs, stores diff)
("Review previous computations - get a list of labels",
"smt list",
assert_in_output, expected_short_list),
("Review previous computations in detail",
"smt list -l",
assert_records, substitute_labels([
{'label': 0, 'executable_name': 'Python', 'outcome': 'works fine', 'reason': 'initial run',
'version': '6038f9c500d1', 'vcs': 'Mercurial', 'script_arguments': '<parameters> MV_HFV_012.jpg',
'main_file': 'glass_sem_analysis.py'}, # TODO: add checking of parameters
{'label': 1, 'outcome': '', 'reason': 'No filtering'},
{'label': 2, 'outcome': 'The default colourmap is nicer', 'reason': 'Trying a different colourmap'},
{'label': 3, 'outcome': '', 'reason': 'Added labels to output'},
{'label': 4, 'outcome': '', 'reason': 'made a change'}, # TODO: add checking of diff
])),
("Filter the output of ``smt list`` based on tag",
"smt list mytag",
#assert(list is correct)
),
("Export Sumatra records as JSON.",
"smt export",
assert_file_exists, ".smt/records_export.json"),
]
def test_all():
"""Test generator for Nose."""
for step in test_steps:
if callable(step):
step()
else:
test = partial(*tuple([run_test] + list(step[1:])))
test.description = step[0]
yield test
# Still to test:
#
#.. LaTeX example
#.. note that not only Python is supported - separate test
#.. play with labels? uuid, etc.
#.. move recordstore
#.. migrate datastore
#.. repeats
#.. moving forwards and backwards in history
#.. upgrades (needs Docker)
if __name__ == '__main__':
# Run the tests without using Nose.
setup()
for step in test_steps:
if callable(step):
step()
else:
print(step[0]) # description
run_test(*step[1:])
response = input("Do you want to delete the temporary directory (default: yes)? ")
if response not in ["n", "N", "no", "No"]:
teardown()
else:
print("Temporary directory %s not removed" % utils.temporary_dir)
| bsd-2-clause |
puruckertom/ubertool | ubertool/therps/tests/test_therps_unittest.py | 1 | 63527 | from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import pandas.util.testing as pdt
import sys
from tabulate import tabulate
import unittest
#find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..therps_exe import Therps
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
class Testtherps(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("THerps unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for therps unit tests.
:return:
"""
pass
test = {}
# setup the test as needed
# e.g. pandas to open therps qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for therps unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_therps_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty therps object
therps_empty = Therps(df_empty, df_empty)
return therps_empty
def test_convert_app_intervals(self):
"""
unit test for function convert_app_intervals
the method converts number of applications and application interval into application rates and day of year number
this is so that the same concentration timeseries method from trex_functions can be reused here
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result_day_out = pd.Series([], dtype="object")
result_app_rates = pd.Series([], dtype="object")
expected_result_day_out = pd.Series([[1,8,15], [1], [1,22,43,64], [1,8,15]], dtype = 'object')
expected_result_app_rates = pd.Series([[1.2,1.2,1.2], [2.3], [2.5,2.5,2.5,2.5], [5.1,5.1,5.1]], dtype = 'object')
try:
therps_empty.num_apps = [3,1,4,3]
therps_empty.app_interval = [7,1,21,7]
therps_empty.application_rate = [1.2, 2.3, 2.5,5.1]
result_day_out, result_app_rates = therps_empty.convert_app_intervals()
#using pdt.assert_series_equal assertion instead of npt.assert_allclose
#because npt.assert_allclose does not handle uneven object/series lists
#Note that pdt.assert_series_equal requires object/series to be exactly equal
#this is ok in this instance because we are not "calculating" real numbers
#but rather simply distributing them from an input value into a new object/series
pdt.assert_series_equal(result_app_rates,expected_result_app_rates)
pdt.assert_series_equal(result_day_out,expected_result_day_out)
finally:
tab1 = [result_app_rates, expected_result_app_rates]
tab2 = [result_day_out, expected_result_day_out]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([1.734, 9.828, 0.702], dtype = 'float')
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.food_multiplier_init_sg = 15.
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(therps_empty.frac_act_ing)):
result[i] = therps_empty.conc_initial(i, therps_empty.app_rates[i][0], therps_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([9.726549e-4, 0.08705506, 9.8471475], dtype = 'float')
try:
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = therps_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([.04556, .1034, .9389], dtype = 'float')
try:
therps_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = therps_empty.percent_to_frac(therps_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([48.97314, 136.99477, 95.16341], dtype = 'float')
try:
therps_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
therps_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
therps_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
therps_empty.aw_herp_sm = pd.Series([1.5, 40., 250.], dtype = 'float') # use values for small, medium, large in this test
result = therps_empty.at_bird(therps_empty.aw_herp_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_mamm(self):
"""
unittest for function fi_mamm:
food_intake = (0.621 * (aw_mamm ** 0.564)) / (1 - mf_w_mamm)
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([3.178078, 23.06301, 53.15002], dtype = 'float')
try:
therps_empty.mf_w_mamm_2 = pd.Series([0.1, 0.8, 0.9], dtype='float')
therps_empty.bw_frog_prey_mamm = pd.Series([15., 35., 45.], dtype='float')
result = therps_empty.fi_mamm(therps_empty.bw_frog_prey_mamm, therps_empty.mf_w_mamm_2)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_herp(self):
"""
unittest for function fi_herp: Food intake for herps.
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([0.02932976, 0.3854015, 1.054537], dtype = 'float')
try:
therps_empty.mf_w_mamm_2 = pd.Series([0.1, 0.8, 0.9], dtype='float')
therps_empty.bw_frog_prey_herp = pd.Series([2.5, 10., 15.], dtype='float')
result = therps_empty.fi_herp(therps_empty.bw_frog_prey_herp, therps_empty.mf_w_mamm_2)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_diet_timeseriesA(self):
"""
combined unit test for methods eec_diet_timeseries;
* this test calls eec_diet_timeseries, which in turn calls conc_initial and conc_timestep
* this unittest executes the timeseries method for three sets of inputs (i.e., model simulations)
* each timeseries is the target of an assertion test
* the complete timeseries is not compared between actual and expected results
* rather selected values from each series are extracted and placed into a list for comparison
* the values extracted include the first and last entry in the timeseries (first and last day of simulated year)
* additional values are extracted on each day of the year for which there is a pesticide application
* the code here is not elegant (each simulation timeseries is checked within it own code segment; as opposed to
* getting the indexing squared away so that a single piece of code would loop through all simulations
* (perhaps at a later time this can be revisited and made more elegant)
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
conc_timeseries = pd.Series([], dtype = 'object')
result1 = pd.Series([], dtype = 'float')
result2 = pd.Series([], dtype = 'float')
result3 = pd.Series([], dtype = 'float')
expected_results1 = [0.0, 1.734, 6.791566e-5]
expected_results2 = [9.828, 145.341, 80.93925, 20.6686758, 1.120451e-18]
expected_results3 = [0.0, 0.702, 0.5656463, 0.087722]
num_app_days = pd.Series([], dtype='int')
try:
#define needed inputs for method
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.food_multiplier_init_sg = 15.
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
#specifying 3 different application scenarios (i.e., model simulations) of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
#therps_empty.num_apps = [0] * len(therps_empty.app_rates) #set length of num_apps list (no longer needed)
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'series of app-rates and app_days do not match'
#run method and get timeseries (all simulations will be executed here)
conc_timeseries = therps_empty.eec_diet_timeseries(therps_empty.food_multiplier_init_sg)
#let's extract from each timeseries values on the first day, each day of an application, and the last day
#these will be placed into the 'result#' and used for the allclose assertion
#need to execute this extraction for each simulation timeseries because 'allclose' will not handle uneven series lists
#first simulation result
num_values_to_check = len(therps_empty.app_rates[0]) + 2 #number of applications plus first and last timeseries elements
if (therps_empty.day_out[0][0] == 1): #if first app day is first day of year
num_values_to_check = num_values_to_check - 1
result1 = [0.] * num_values_to_check
result1[0] = float(conc_timeseries[0][0]) #first day of timeseries
result1[-1] = float(conc_timeseries[0][370]) #last day of timeseries
num_values_to_check = len(therps_empty.app_rates[0])
if ((num_values_to_check) >= 1):
result_index = 1
for i in range(0 ,num_values_to_check):
if(therps_empty.day_out[0][i] != 1):
series_index = therps_empty.day_out[0][i] - 1
result1[result_index] = float(conc_timeseries[0][series_index])
result_index = result_index + 1
npt.assert_allclose(result1,expected_results1,rtol=1e-4, atol=0, err_msg='', verbose=True)
#second simulation result
num_values_to_check = len(therps_empty.app_rates[1]) + 2
if (therps_empty.day_out[1][0] == 1): #if first app day is first day of year
num_values_to_check = num_values_to_check - 1
result2 = [0.] * num_values_to_check
result2[0] = float(conc_timeseries[1][0]) #first day of timeseries
result2[-1] = float(conc_timeseries[1][370]) #last day of timeseries
num_values_to_check = len(therps_empty.app_rates[1])
if ((num_values_to_check) >= 1):
result_index = 1
for i in range(0 ,num_values_to_check):
if(therps_empty.day_out[1][i] != 1):
series_index = therps_empty.day_out[1][i] - 1
result2[result_index] = float(conc_timeseries[1][series_index])
result_index = result_index + 1
npt.assert_allclose(result2,expected_results2,rtol=1e-4, atol=0, err_msg='', verbose=True)
#3rd simulation result
num_values_to_check = len(therps_empty.app_rates[2]) + 2
if (therps_empty.day_out[2][0] == 1): #if first app day is first day of year
num_values_to_check = num_values_to_check - 1
result3 = [0.] * num_values_to_check
result3[0] = float(conc_timeseries[2][0]) #first day of timeseries
result3[-1] = float(conc_timeseries[2][370]) #last day of timeseries
num_values_to_check = len(therps_empty.app_rates[2])
if ((num_values_to_check) >= 1):
result_index = 1
for i in range(0 ,num_values_to_check):
if(therps_empty.day_out[2][i] != 1):
series_index = therps_empty.day_out[2][i] - 1
result3[result_index] = float(conc_timeseries[2][series_index])
result_index = result_index + 1
npt.assert_allclose(result3,expected_results3,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab1 = [result1, expected_results1]
tab2 = [result2, expected_results2]
tab3 = [result3, expected_results3]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print(tabulate(tab2, headers='keys', tablefmt='rst'))
print(tabulate(tab3, headers='keys', tablefmt='rst'))
return
def test_eec_diet_timeseries(self):
"""
combined unit test for methods eec_diet_timeseries;
* this test calls eec_diet_timeseries, which in turn calls conc_initial and conc_timestep
* this unittest executes the timeseries method for three sets of inputs (i.e., model simulations)
* each timeseries (i.e., simulation result) is the target of an assertion test
* the complete timeseries is not compared between actual and expected results
* rather selected values from each series are extracted and placed into a list for comparison
* the values extracted include the first and last entry in the timeseries (first and last day of simulated year)
* additional values are extracted on each day of the year for which there is a pesticide application
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
conc_timeseries = pd.Series([], dtype = 'object')
result = pd.Series([], dtype = 'object')
expected_results = pd.Series([[0.0, 1.734, 6.791566e-5], [9.828, 145.341, 80.93925, 20.6686758, 1.120451e-18],
[0.0, 0.702, 0.5656463, 0.087722]], dtype = 'object')
num_app_days = pd.Series([], dtype='int')
try:
#define needed inputs for method
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.food_multiplier_init_sg = 15.
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
#specifying 3 different application scenarios (i.e., model simulations) of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
#therps_empty.num_apps = [0] * len(therps_empty.app_rates) #set length of num_apps list (no longer needed)
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'series of app-rates and app_days do not match'
#run method and get timeseries (all simulations will be executed here)
conc_timeseries = therps_empty.eec_diet_timeseries(therps_empty.food_multiplier_init_sg)
#let's extract from each timeseries values on the first day, each day of an application, and the last day
#these will be placed into 'result' and used for the allclose assertion
#loop through simulation results extracting values of interest per timeseries
for isim in range(len(therps_empty.app_rates)):
num_values_to_check = len(therps_empty.app_rates[isim]) + 2 #number of applications plus first and last timeseries elements
if (therps_empty.day_out[isim][0] == 1): #if first app day is first day of year
num_values_to_check = num_values_to_check - 1
result[isim] = [0.] * num_values_to_check #initialize result list for this simulation
result[isim][0] = float(conc_timeseries[isim][0]) #first day of timeseries
result[isim][-1] = float(conc_timeseries[isim][370]) #last day of timeseries
num_values_to_check = len(therps_empty.app_rates[isim]) #just the application days for this loop
if ((num_values_to_check) >= 1):
result_index = 1
for i in range(0 ,num_values_to_check):
if(therps_empty.day_out[isim][i] != 1): #application day of 1 has been processed above
series_index = therps_empty.day_out[isim][i] - 1
result[isim][result_index] = float(conc_timeseries[isim][series_index])
result_index = result_index + 1
npt.assert_allclose(result[isim][:],expected_results[isim][:],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
print("\n")
print(inspect.currentframe().f_code.co_name)
for isim in range(len(therps_empty.app_rates)):
tab1 = [result[isim], expected_results[isim]]
print(tabulate(tab1, headers='keys', tablefmt='rst'))
return
def test_eec_diet_max(self):
"""
unit test for method eec_diet_max;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep'
* this test calls eec_diet_max, which in turn calls eec_diet_timeseries (which produces
concentration timeseries), which in turn calls conc_initial and conc_timestep
* eec_diet_max processes the timeseries and extracts the maximum values
* the assertion check is that the maximum values from each timeseries is correctly identified
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([1.734, 145.3409, 0.702], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.food_multiplier_init_sg = 15.
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
#therps_empty.num_apps = [0] * len(therps_empty.app_rates) #set length of num_apps list (no longer needed)
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'series of app-rates and app_days do not match'
result = therps_empty.eec_diet_max(therps_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_dose_mamm(self):
"""
unit test for function eec_dose_mamm;
internal calls to 'eec_diet_mamm' --> 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep'
--> fi_mamm
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'eec_dose_mamm' are correctly implemented
* methods called inside of 'eec_dose_mamm' are not retested/recalculated
* for methods inside of 'eec_dose_mamm' the same values were used here that were used in the unit tests for that method
* thus, only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
* only calculations done for this test are for those unique to this method
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([3.673858, 83.80002, 0.1492452], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.food_multiplier_init_sg = 15.
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
#therps_empty.mf_w_mamm_2 = pd.Series([0.1, 0.8, 0.9], dtype='float')
#therps_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
therps_empty.aw_herp_sm = pd.Series([1.5, 40., 250.], dtype = 'float') # use values for small, medium, large in this test
therps_empty.bw_frog_prey_mamm = pd.Series([15., 35., 45.], dtype='float')
therps_empty.mf_w_mamm_2 = pd.Series([0.1, 0.8, 0.9], dtype='float')
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.eec_dose_mamm(therps_empty.food_multiplier_init_sg, therps_empty.aw_herp_sm,
therps_empty.bw_frog_prey_mamm, therps_empty.mf_w_mamm_2)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_arq_dose_mamm(self):
"""
unit test for function arq_dose_mamm;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'arq_dose_mamm' are correctly implemented
* methods called inside of 'arq_dose_mamm' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([0.07501781, 0.6117023, 0.0015683], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
therps_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
therps_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
therps_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
therps_empty.food_multiplier_init_sg = 15.
therps_empty.mf_w_mamm_2 = pd.Series([0.1, 0.8, 0.9], dtype='float')
therps_empty.aw_herp_sm = pd.Series([1.5, 40., 250.], dtype = 'float') # use values for small, medium, large in this test
therps_empty.bw_frog_prey_mamm = pd.Series([15., 35., 45.], dtype='float')
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.arq_dose_mamm(therps_empty.food_multiplier_init_sg, therps_empty.aw_herp_sm,
therps_empty.bw_frog_prey_mamm, therps_empty.mf_w_mamm_2)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_arq_diet_mamm(self):
"""
unit test for function arq_diet_mamm;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'arq_diet_mamm' are correctly implemented
* methods called inside of 'arq_diet_mamm' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([0.00293908, 0.03830858, 0.00165828], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.food_multiplier_init_sg = 15.
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
therps_empty.lc50_bird = pd.Series([125., 2500., 500.], dtype = 'float')
therps_empty.bw_frog_prey_mamm = pd.Series([15., 35., 45.], dtype='float')
therps_empty.mf_w_mamm_2 = pd.Series([0.1, 0.8, 0.9], dtype='float')
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.arq_diet_mamm(therps_empty.food_multiplier_init_sg,
therps_empty.bw_frog_prey_mamm, therps_empty.mf_w_mamm_2)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_crq_diet_mamm(self):
"""
unit test for function crq_diet_mamm;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'crq_diet_mamm' are correctly implemented
* methods called inside of 'crq_diet_mamm' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([0.01469543, 0.9577145, 0.01507527], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.food_multiplier_init_sg = 15.
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
therps_empty.noaec_bird = pd.Series([25., 100., 55.], dtype = 'float')
therps_empty.bw_frog_prey_mamm = pd.Series([15., 35., 45.], dtype='float')
therps_empty.mf_w_mamm_2 = pd.Series([0.1, 0.8, 0.9], dtype='float')
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.crq_diet_mamm(therps_empty.food_multiplier_init_sg,
therps_empty.bw_frog_prey_mamm, therps_empty.mf_w_mamm_2 )
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_crq_diet_tp(self):
"""
unit test for function crq_diet_tp; amphibian chronic dietary-based risk quotients for tp
internal calls to : 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'crq_diet_tp' are correctly implemented
* methods called inside of 'crq_diet_tp' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([8.1372e-4, 0.05601463, 8.973091e-4], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.food_multiplier_init_blp = 15.
therps_empty.bw_frog_prey_herp = pd.Series([2.5, 10., 15.], dtype='float')
therps_empty.noaec_bird = pd.Series([25., 100., 55.], dtype = 'float')
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
therps_empty.awc_herp_sm = pd.Series([10., 80., 90.], dtype = 'float') # initialize as percent to match model input
therps_empty.awc_herp_sm = therps_empty.percent_to_frac(therps_empty.awc_herp_sm) # convert to mass fraction water content
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.crq_diet_tp(therps_empty.food_multiplier_init_blp,
therps_empty.bw_frog_prey_herp, therps_empty.awc_herp_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_crq_diet_herp(self):
"""
amphibian chronic dietary-based risk quotients
unit test for function crq_diet_herp; amphibian acute dietary-based risk quotients for tp
internal calls to : 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'crq_diet_herp' are correctly implemented
* methods called inside of 'crq_diet_herp' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([0.06936, 1.4534, 0.01276364], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.food_multiplier_init_blp = 15.
therps_empty.noaec_bird = pd.Series([25., 100., 55.], dtype = 'float')
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.crq_diet_herp(therps_empty.food_multiplier_init_blp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_arq_diet_tp(self):
"""
unit test for function arq_diet_tp; amphibian acute dietary-based risk quotients for tp
internal calls to : 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
'fi_herp'
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'arq_diet_tp' are correctly implemented
* methods called inside of 'arq_diet_tp' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([1.62745e-4, 0.002240583, 9.870466e-5], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.lc50_bird = pd.Series([125., 2500., 500.], dtype = 'float')
therps_empty.food_multiplier_init_blp = 15.
therps_empty.bw_frog_prey_herp = pd.Series([2.5, 10., 15.], dtype='float')
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
therps_empty.awc_herp_sm = pd.Series([10., 80., 90.], dtype = 'float') # initialize as percent to match model input
therps_empty.awc_herp_sm = therps_empty.percent_to_frac(therps_empty.awc_herp_sm) # convert to mass fraction water content
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.arq_diet_tp(therps_empty.food_multiplier_init_blp,
therps_empty.bw_frog_prey_herp, therps_empty.awc_herp_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_arq_diet_herp(self):
"""
unit test for function arq_diet_herp; amphibian acute dietary-based risk quotients
internal calls to : 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'arq_diet_herp' are correctly implemented
* methods called inside of 'arq_diet_herp' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([0.013872, 0.0581364, 0.001404], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.food_multiplier_mean_fp = 15.
therps_empty.lc50_bird = pd.Series([125., 2500., 500.], dtype = 'float')
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.arq_diet_herp(therps_empty.food_multiplier_mean_fp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_arq_dose_tp(self):
"""
unit test for function arq_dose_tp; amphibian acute dose-based risk quotients for tp
internal calls to : 'eec_dose_herp' --> 'fi_herp' --> ;
'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
'at_bird'
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'arq_dose_tp' are correctly implemented
* methods called inside of 'arq_dose_tp' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([1.641716e-5, 0.001533847, 1.92511e-5], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
therps_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
therps_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
therps_empty.food_multiplier_init_blp = 15.
therps_empty.aw_herp_sm = pd.Series([1.5, 40., 250.], dtype = 'float') # use values for small, medium, large in this test
therps_empty.bw_frog_prey_herp = pd.Series([2.5, 10., 15.], dtype='float')
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
therps_empty.awc_herp_sm = pd.Series([10., 80., 90.], dtype = 'float') # initialize as percent to match model input
therps_empty.awc_herp_sm = therps_empty.percent_to_frac(therps_empty.awc_herp_sm) # convert to mass fraction water content
therps_empty.awc_herp_md = pd.Series([70., 85., 90.], dtype = 'float')
therps_empty.awc_herp_md = therps_empty.percent_to_frac(therps_empty.awc_herp_md)
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.arq_dose_tp(therps_empty.food_multiplier_init_blp,
therps_empty.aw_herp_sm, therps_empty.bw_frog_prey_herp,
therps_empty.awc_herp_sm, therps_empty.awc_herp_md)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_arq_dose_herp(self):
"""
unit test for function arq_dose_herp; amphibian acute dose-based risk quotients
internal calls to : 'eec_dose_herp' --> 'fi_herp' --> ;
'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
'at_bird'
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'arq_dose_herp' are correctly implemented
* methods called inside of 'arq_dose_herp' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([4.664597e-4, 0.02984901, 2.738237e-4], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
therps_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
therps_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
therps_empty.aw_herp_sm = pd.Series([1.5, 40., 250.], dtype = 'float') # use values for small, medium, large in this test
therps_empty.food_multiplier_mean_blp = 15.
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
therps_empty.awc_herp_sm = pd.Series([10., 80., 90.], dtype = 'float') # initialize as percent to match model input
therps_empty.awc_herp_sm = therps_empty.percent_to_frac(therps_empty.awc_herp_sm) # convert to mass fraction water content
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.arq_dose_herp(therps_empty.aw_herp_sm, therps_empty.awc_herp_sm,
therps_empty.food_multiplier_mean_blp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_dose_tp(self):
"""
unit test for function eec_dose_tp; amphibian Dose based eecs for terrestrial
internal calls to : "fi_herp";
'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'eec_dose_tp' are correctly implemented
* methods called inside of 'eec_dose_tp' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([8.040096e-4, 0.2101289, 0.001831959], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.food_multiplier_mean_blp = 15.
therps_empty.aw_herp_sm = pd.Series([1.5, 40., 250.], dtype = 'float') # use values for small, medium, large in this test
therps_empty.bw_frog_prey_herp = pd.Series([2.5, 10., 15.], dtype='float')
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
therps_empty.awc_herp_sm = pd.Series([10., 80., 90.], dtype = 'float') # initialize as percent to match model input
therps_empty.awc_herp_sm = therps_empty.percent_to_frac(therps_empty.awc_herp_sm) # convert to mass fraction water content
therps_empty.awc_herp_md = pd.Series([70., 85., 90.], dtype = 'float')
therps_empty.awc_herp_md = therps_empty.percent_to_frac(therps_empty.awc_herp_md)
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.eec_dose_tp(therps_empty.food_multiplier_mean_blp,
therps_empty.aw_herp_sm, therps_empty.bw_frog_prey_herp,
therps_empty.awc_herp_sm, therps_empty.awc_herp_md)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_dose_herp(self):
"""
unit test for function eec_dose_herp; amphibian Dose based eecs
internal calls to : "fi_herp";
'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'ceec_dose_herp' are correctly implemented
* methods called inside of 'eec_dose_herp' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([0.02284427, 4.089158, 0.02605842], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.aw_herp_sm = pd.Series([1.5, 40., 250.], dtype = 'float') # use values for small, medium, large in this test
therps_empty.food_multiplier_init_blp = 15.
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
therps_empty.awc_herp_sm = pd.Series([10., 80., 90.], dtype = 'float') # initialize as percent to match model input
therps_empty.awc_herp_sm = therps_empty.percent_to_frac(therps_empty.awc_herp_sm) # convert to mass fraction water content
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.eec_dose_herp(therps_empty.aw_herp_sm, therps_empty.awc_herp_sm,
therps_empty.food_multiplier_init_blp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_diet_tp(self):
"""
unit test for function eec_diet_tp; Dietary terrestrial phase based eecs
internal calls to : "fi_herp";
'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'eec_diet_tp' are correctly implemented
* methods called inside of 'eec_diet_tp' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([0.02034312, 5.601457, 0.04935233], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.food_multiplier_mean_sg = 15.
therps_empty.bw_frog_prey_herp = pd.Series([2.5, 10., 15.], dtype='float')
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
therps_empty.awc_herp_sm = pd.Series([10., 80., 90.], dtype = 'float') # initialize as percent to match model input
therps_empty.awc_herp_sm = therps_empty.percent_to_frac(therps_empty.awc_herp_sm) # convert to mass fraction water content
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.eec_diet_tp(therps_empty.food_multiplier_mean_sg,
therps_empty.bw_frog_prey_herp, therps_empty.awc_herp_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_diet_mamm(self):
"""
unit test for function eec_diet_mamm; Dietary_mammal based eecs
internal calls to : "fi_mamm";
'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'eec_diet_mamm' are correctly implemented
* methods called inside of 'eec_diet_mamm' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
therps_empty = self.create_therps_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([0.3673858, 95.771454, 0.829140], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
therps_empty.food_multiplier_mean_sg = 15.
therps_empty.bw_frog_prey_mamm = pd.Series([15., 35., 45.], dtype='float')
therps_empty.mf_w_mamm_2 = pd.Series([0.1, 0.8, 0.9], dtype='float')
therps_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype = 'float')
therps_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype = 'float')
#specifying 3 different application scenarios of 1, 4, and 2 applications
therps_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
therps_empty.day_out = pd.Series([[5], [1, 11, 21, 51], [150, 250]], dtype='object')
for i in range(len(therps_empty.app_rates)):
therps_empty.num_apps[i] = len(therps_empty.app_rates[i])
num_app_days[i] = len(therps_empty.day_out[i])
assert (therps_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
result = therps_empty.eec_diet_mamm(therps_empty.food_multiplier_mean_sg,
therps_empty.bw_frog_prey_mamm, therps_empty.mf_w_mamm_2)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
# unittest will
# 1) call the setup method,
# 2) then call every method starting with "test",
# 3) then the teardown method
if __name__ == '__main__':
unittest.main()
#pass | unlicense |
TinyOS-Camp/DDEA-DEV | Archive/[14_09_12] DDEA_example_code/radar_example.py | 10 | 1434 | import numpy as np
import matplotlib.pyplot as plt
import pprint
import mytool as mt
import radar_chart
# Load from binaries
avgsensor_names = mt.loadObjectBinary("tmp/avgsensor_names.bin")
Conditions_dict = mt.loadObjectBinary("tmp/Conditions_dict.bin")
Events_dict = mt.loadObjectBinary("tmp/Events_dict.bin")
wf_tuple_t = mt.loadObjectBinary("tmp/wf_tuple_t.bin")
wf_tuple_d = mt.loadObjectBinary("tmp/wf_tuple_d.bin")
wf_tuple_h = mt.loadObjectBinary("tmp/wf_tuple_h.bin")
wf_tuple_e = mt.loadObjectBinary("tmp/wf_tuple_e.bin")
wf_tuple_c = mt.loadObjectBinary("tmp/wf_tuple_c.bin")
sensor_no = len(avgsensor_names)
# convert 'inf' to 1
sen_t = [1 if val == float("inf") else val for val in wf_tuple_t[3]]
sen_d = [1 if val == float("inf") else val for val in wf_tuple_d[3]]
sen_h = [1 if val == float("inf") else val for val in wf_tuple_h[3]]
sen_e = [1 if val == float("inf") else val for val in wf_tuple_e[3]]
sen_c = [1 if val == float("inf") else val for val in wf_tuple_c[3]]
SEN = [[sen_t[i], sen_d[i], sen_h[i], sen_e[i], sen_c[i]] for i in range(sensor_no)]
TOTAL_SEN = np.array([sum(SEN[i]) for i in range(sensor_no)])
idx = np.argsort(TOTAL_SEN)[-6:] # Best 6 sensors
spoke_labels = ["Temperature", "Dew Point", "Humidity", "Events", "Conditions"]
data = [SEN[i] for i in idx]
sensor_labels = [avgsensor_names[i] for i in idx]
radar_chart.plot(data, spoke_labels, sensor_labels, saveto="radar.png")
# plt.show()
| gpl-2.0 |
theoryno3/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
cloudera/ibis | ibis/backends/clickhouse/client.py | 1 | 12169 | import re
from collections import OrderedDict
import numpy as np
import pandas as pd
from clickhouse_driver.client import Client as _DriverClient
from pkg_resources import parse_version
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.schema as sch
import ibis.expr.types as ir
from ibis.backends.base_sqlalchemy.compiler import DDL
from ibis.client import Database, DatabaseEntity, Query, SQLClient
from ibis.config import options
from ibis.util import log
from .compiler import ClickhouseDialect, build_ast
fully_qualified_re = re.compile(r"(.*)\.(?:`(.*)`|(.*))")
base_typename_re = re.compile(r"(\w+)")
_clickhouse_dtypes = {
'Null': dt.Null,
'Nothing': dt.Null,
'UInt8': dt.UInt8,
'UInt16': dt.UInt16,
'UInt32': dt.UInt32,
'UInt64': dt.UInt64,
'Int8': dt.Int8,
'Int16': dt.Int16,
'Int32': dt.Int32,
'Int64': dt.Int64,
'Float32': dt.Float32,
'Float64': dt.Float64,
'String': dt.String,
'FixedString': dt.String,
'Date': dt.Date,
'DateTime': dt.Timestamp,
}
_ibis_dtypes = {v: k for k, v in _clickhouse_dtypes.items()}
_ibis_dtypes[dt.String] = 'String'
class ClickhouseDataType:
__slots__ = 'typename', 'nullable'
def __init__(self, typename, nullable=False):
m = base_typename_re.match(typename)
base_typename = m.groups()[0]
if base_typename not in _clickhouse_dtypes:
raise com.UnsupportedBackendType(typename)
self.typename = base_typename
self.nullable = nullable
def __str__(self):
if self.nullable:
return 'Nullable({})'.format(self.typename)
else:
return self.typename
def __repr__(self):
return '<Clickhouse {}>'.format(str(self))
@classmethod
def parse(cls, spec):
# TODO(kszucs): spare parsing, depends on clickhouse-driver#22
if spec.startswith('Nullable'):
return cls(spec[9:-1], nullable=True)
else:
return cls(spec)
def to_ibis(self):
return _clickhouse_dtypes[self.typename](nullable=self.nullable)
@classmethod
def from_ibis(cls, dtype, nullable=None):
typename = _ibis_dtypes[type(dtype)]
if nullable is None:
nullable = dtype.nullable
return cls(typename, nullable=nullable)
@dt.dtype.register(ClickhouseDataType)
def clickhouse_to_ibis_dtype(clickhouse_dtype):
return clickhouse_dtype.to_ibis()
class ClickhouseDatabase(Database):
pass
class ClickhouseQuery(Query):
def _external_tables(self):
tables = []
for name, df in self.extra_options.get('external_tables', {}).items():
if not isinstance(df, pd.DataFrame):
raise TypeError(
'External table is not an instance of pandas ' 'dataframe'
)
schema = sch.infer(df)
chtypes = map(ClickhouseDataType.from_ibis, schema.types)
structure = list(zip(schema.names, map(str, chtypes)))
tables.append(
{
'name': name,
'data': df.to_dict('records'),
'structure': structure,
}
)
return tables
def execute(self):
cursor = self.client._execute(
self.compiled_sql, external_tables=self._external_tables()
)
result = self._fetch(cursor)
return self._wrap_result(result)
def _fetch(self, cursor):
data, colnames, _ = cursor
if not len(data):
# handle empty resultset
return pd.DataFrame([], columns=colnames)
df = pd.DataFrame.from_dict(OrderedDict(zip(colnames, data)))
return self.schema().apply_to(df)
class ClickhouseTable(ir.TableExpr, DatabaseEntity):
"""References a physical table in Clickhouse"""
@property
def _qualified_name(self):
return self.op().args[0]
@property
def _unqualified_name(self):
return self._match_name()[1]
@property
def _client(self):
return self.op().args[2]
def _match_name(self):
m = fully_qualified_re.match(self._qualified_name)
if not m:
raise com.IbisError(
'Cannot determine database name from {0}'.format(
self._qualified_name
)
)
db, quoted, unquoted = m.groups()
return db, quoted or unquoted
@property
def _database(self):
return self._match_name()[0]
def invalidate_metadata(self):
self._client.invalidate_metadata(self._qualified_name)
def metadata(self):
"""
Return parsed results of DESCRIBE FORMATTED statement
Returns
-------
meta : TableMetadata
"""
return self._client.describe_formatted(self._qualified_name)
describe_formatted = metadata
@property
def name(self):
return self.op().name
def _execute(self, stmt):
return self._client._execute(stmt)
def insert(self, obj, **kwargs):
from .identifiers import quote_identifier
schema = self.schema()
assert isinstance(obj, pd.DataFrame)
assert set(schema.names) >= set(obj.columns)
columns = ', '.join(map(quote_identifier, obj.columns))
query = 'INSERT INTO {table} ({columns}) VALUES'.format(
table=self._qualified_name, columns=columns
)
# convert data columns with datetime64 pandas dtype to native date
# because clickhouse-driver 0.0.10 does arithmetic operations on it
obj = obj.copy()
for col in obj.select_dtypes(include=[np.datetime64]):
if isinstance(schema[col], dt.Date):
obj[col] = obj[col].dt.date
data = obj.to_dict('records')
return self._client.con.execute(query, data, **kwargs)
class ClickhouseDatabaseTable(ops.DatabaseTable):
pass
class ClickhouseClient(SQLClient):
"""An Ibis client interface that uses Clickhouse"""
database_class = ClickhouseDatabase
query_class = ClickhouseQuery
dialect = ClickhouseDialect
table_class = ClickhouseDatabaseTable
table_expr_class = ClickhouseTable
def __init__(self, *args, **kwargs):
self.con = _DriverClient(*args, **kwargs)
def _build_ast(self, expr, context):
return build_ast(expr, context)
@property
def current_database(self):
# might be better to use driver.Connection instead of Client
return self.con.connection.database
def log(self, msg):
log(msg)
def close(self):
"""Close Clickhouse connection and drop any temporary objects"""
self.con.disconnect()
def _execute(self, query, external_tables=(), results=True):
if isinstance(query, DDL):
query = query.compile()
self.log(query)
response = self.con.execute(
query,
columnar=True,
with_column_types=True,
external_tables=external_tables,
)
if not results:
return response
data, columns = response
colnames, typenames = zip(*columns)
coltypes = list(map(ClickhouseDataType.parse, typenames))
return data, colnames, coltypes
def _fully_qualified_name(self, name, database):
if bool(fully_qualified_re.search(name)):
return name
database = database or self.current_database
return '{0}.`{1}`'.format(database, name)
def list_tables(self, like=None, database=None):
"""
List tables in the current (or indicated) database. Like the SHOW
TABLES command in the clickhouse-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
database : string, default None
If not passed, uses the current/default database
Returns
-------
tables : list of strings
"""
statement = 'SHOW TABLES'
if database:
statement += " FROM `{0}`".format(database)
if like:
m = fully_qualified_re.match(like)
if m:
database, quoted, unquoted = m.groups()
like = quoted or unquoted
return self.list_tables(like=like, database=database)
statement += " LIKE '{0}'".format(like)
data, _, _ = self.raw_sql(statement, results=True)
return data[0]
def set_database(self, name):
"""
Set the default database scope for client
"""
self.con.database = name
def exists_database(self, name):
"""
Checks if a given database exists
Parameters
----------
name : string
Database name
Returns
-------
if_exists : boolean
"""
return len(self.list_databases(like=name)) > 0
def list_databases(self, like=None):
"""
List databases in the Clickhouse cluster.
Like the SHOW DATABASES command in the clickhouse-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
Returns
-------
databases : list of strings
"""
statement = 'SELECT name FROM system.databases'
if like:
statement += " WHERE name LIKE '{0}'".format(like)
data, _, _ = self.raw_sql(statement, results=True)
return data[0]
def get_schema(self, table_name, database=None):
"""
Return a Schema object for the indicated table and database
Parameters
----------
table_name : string
May be fully qualified
database : string, default None
Returns
-------
schema : ibis Schema
"""
qualified_name = self._fully_qualified_name(table_name, database)
query = 'DESC {0}'.format(qualified_name)
data, _, _ = self.raw_sql(query, results=True)
colnames, coltypes = data[:2]
coltypes = list(map(ClickhouseDataType.parse, coltypes))
return sch.schema(colnames, coltypes)
@property
def client_options(self):
return self.con.options
def set_options(self, options):
self.con.set_options(options)
def reset_options(self):
# Must nuke all cursors
raise NotImplementedError
def exists_table(self, name, database=None):
"""
Determine if the indicated table or view exists
Parameters
----------
name : string
database : string, default None
Returns
-------
if_exists : boolean
"""
return len(self.list_tables(like=name, database=database)) > 0
def _ensure_temp_db_exists(self):
name = (options.clickhouse.temp_db,)
if not self.exists_database(name):
self.create_database(name, force=True)
def _get_table_schema(self, tname):
return self.get_schema(tname)
def _get_schema_using_query(self, query):
_, colnames, coltypes = self._execute(query)
return sch.schema(colnames, coltypes)
def _exec_statement(self, stmt, adapter=None):
query = ClickhouseQuery(self, stmt)
result = query.execute()
if adapter is not None:
result = adapter(result)
return result
def _table_command(self, cmd, name, database=None):
qualified_name = self._fully_qualified_name(name, database)
return '{0} {1}'.format(cmd, qualified_name)
@property
def version(self):
self.con.connection.force_connect()
try:
server = self.con.connection.server_info
vstring = '{}.{}.{}'.format(
server.version_major, server.version_minor, server.revision
)
except Exception:
self.con.connection.disconnect()
raise
else:
return parse_version(vstring)
| apache-2.0 |
IndraVikas/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
pkainz/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/test_localdot.py | 44 | 5013 | from __future__ import print_function
import nose
import unittest
import numpy as np
from theano.compat.six.moves import xrange
import theano
from .localdot import LocalDot
from ..test_matrixmul import SymbolicSelfTestMixin
class TestLocalDot32x32(unittest.TestCase, SymbolicSelfTestMixin):
channels = 3
bsize = 10 # batch size
imshp = (32, 32)
ksize = 5
nkern_per_group = 16
subsample_stride = 1
ngroups = 1
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
def setUp(self):
np.random.seed(234)
assert self.imshp[0] == self.imshp[1]
fModulesR = (self.imshp[0] - self.ksize + 1) // self.subsample_stride
#fModulesR += 1 # XXX GpuImgActs crashes w/o this??
fModulesC = fModulesR
self.fshape = (fModulesR, fModulesC, self.channels // self.ngroups,
self.ksize, self.ksize, self.ngroups, self.nkern_per_group)
self.ishape = (self.ngroups, self.channels // self.ngroups,
self.imshp[0], self.imshp[1], self.bsize)
self.hshape = (self.ngroups, self.nkern_per_group, fModulesR, fModulesC,
self.bsize)
filters = theano.shared(self.rand(self.fshape))
self.A = LocalDot(filters, self.imshp[0], self.imshp[1],
subsample=(self.subsample_stride, self.subsample_stride))
self.xlval = self.rand((self.hshape[-1],) + self.hshape[:-1])
self.xrval = self.rand(self.ishape)
self.xl = theano.shared(self.xlval)
self.xr = theano.shared(self.xrval)
# N.B. the tests themselves come from SymbolicSelfTestMixin
class TestLocalDotLargeGray(TestLocalDot32x32):
channels = 1
bsize = 128
imshp = (256, 256)
ksize = 9
nkern_per_group = 16
subsample_stride = 2
ngroups = 1
n_patches = 3000
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
# not really a test, but important code to support
# Currently exposes error, by e.g.:
# CUDA_LAUNCH_BLOCKING=1
# THEANO_FLAGS=device=gpu,mode=DEBUG_MODE
# nosetests -sd test_localdot.py:TestLocalDotLargeGray.run_autoencoder
def run_autoencoder(
self,
n_train_iter=10000, # -- make this small to be a good unit test
rf_shape=(9, 9),
n_filters=1024,
dtype='float32',
module_stride=2,
lr=0.01,
show_filters=True,
):
if show_filters:
# import here to fail right away
import matplotlib.pyplot as plt
try:
import skdata.vanhateren.dataset
except ImportError:
raise nose.SkipTest()
# 1. Get a set of image patches from the van Hateren data set
print('Loading van Hateren images')
n_images = 50
vh = skdata.vanhateren.dataset.Calibrated(n_images)
patches = vh.raw_patches((self.n_patches,) + self.imshp,
items=vh.meta[:n_images],
rng=np.random.RandomState(123),
)
patches = patches.astype('float32')
patches /= patches.reshape(self.n_patches, self.imshp[0] * self.imshp[1])\
.max(axis=1)[:, None, None]
# TODO: better local contrast normalization
if 0 and show_filters:
plt.subplot(2, 2, 1); plt.imshow(patches[0], cmap='gray')
plt.subplot(2, 2, 2); plt.imshow(patches[1], cmap='gray')
plt.subplot(2, 2, 3); plt.imshow(patches[2], cmap='gray')
plt.subplot(2, 2, 4); plt.imshow(patches[3], cmap='gray')
plt.show()
# -- Convert patches to localdot format:
# groups x colors x rows x cols x images
patches5 = patches[:, :, :, None, None].transpose(3, 4, 1, 2, 0)
print('Patches shape', patches.shape, self.n_patches, patches5.shape)
# 2. Set up an autoencoder
print('Setting up autoencoder')
hid = theano.tensor.tanh(self.A.rmul(self.xl))
out = self.A.rmul_T(hid)
cost = ((out - self.xl) ** 2).sum()
params = self.A.params()
gparams = theano.tensor.grad(cost, params)
train_updates = [(p, p - lr / self.bsize * gp)
for (p, gp) in zip(params, gparams)]
if 1:
train_fn = theano.function([], [cost], updates=train_updates)
else:
train_fn = theano.function([], [], updates=train_updates)
theano.printing.debugprint(train_fn)
# 3. Train it
params[0].set_value(0.001 * params[0].get_value())
for ii in xrange(0, self.n_patches, self.bsize):
self.xl.set_value(patches5[:, :, :, :, ii:ii + self.bsize], borrow=True)
cost_ii, = train_fn()
print('Cost', ii, cost_ii)
if 0 and show_filters:
self.A.imshow_gray()
plt.show()
assert cost_ii < 0 # TODO: determine a threshold for detecting regression bugs
| bsd-3-clause |
fabianp/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
jrkerns/pylinac | docs/source/conf.py | 1 | 9756 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pylinac documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 10 11:56:25 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'matplotlib.sphinxext.plot_directive',
]
# put mock objects here
autodoc_mock_imports = ['_tkinter', 'tkinter']
autoclass_content = 'both'
autodoc_default_flags = ['show-inheritance', 'members'
] # See: http://sphinx-doc.org/latest/ext/autodoc.html#confval-autodoc_default_flags
autodoc_member_order = 'bysource'
napoleon_include_special_with_doc = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pylinac'
copyright = '2021'
author = 'James Kerns'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0'
# The full version, including alpha/beta/rc tags.
release = '3.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pylinacdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pylinac.tex', 'pylinac Documentation',
'James', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pylinac', 'pylinac Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pylinac', 'pylinac Documentation',
author, 'pylinac', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
plot_include_source = True
| mit |
zorojean/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
MTgeophysics/mtpy | tests/imaging/test_plotPhaseTensorMaps.py | 1 | 8357 | import glob
import inspect
import os.path
import unittest
import pytest
from mtpy.imaging.penetration import load_edi_files
from mtpy.imaging.phase_tensor_maps import PlotPhaseTensorMaps
from tests.imaging import ImageTestCase, ImageCompare
# configure matplotlib for testing
def _expected_compare_fail():
pytest.xfail(
"expected the image to be different on different platform, please check the image manually.")
test_params = [
("data/edifiles", 1, {"fig_size": (7, 8), "savefig_kwargs": {'dpi': 100},
"on_compare_fail": _expected_compare_fail}),
("../MT_Datasets/3D_MT_data_edited_fromDuanJM", 10, {"fig_size": (7, 8), "savefig_kwargs": {'dpi': 100}}),
("../MT_Datasets/GA_UA_edited_10s-10000s", 0.025, {"fig_size": (8, 5), "savefig_kwargs": {'dpi': 150}}),
("../MT_Datasets/GA_UA_edited_10s-10000s", 0.01, {"fig_size": (8, 7), "savefig_kwargs": {'dpi': 100}}),
("../MT_Datasets/GA_UA_edited_10s-10000s", 0.0625, {"fig_size": (8, 5), "savefig_kwargs": {'dpi': 150}}),
("../MT_Datasets/GA_UA_edited_10s-10000s", 0.0005, {"fig_size": (8, 5), "savefig_kwargs": {'dpi': 150}}),
("data/edifiles2", 1, {"fig_size": (7, 8), "savefig_kwargs": {'dpi': 100},
"on_compare_fail": _expected_compare_fail})
]
class TestPlotPhaseTensorMaps(ImageTestCase):
@classmethod
def setUpClass(cls):
super(TestPlotPhaseTensorMaps, cls).setUpClass()
# 1) Define plots params
# parameters describing ellipses, differ for different map scales: deg, m, km
# Try different size to find a suitable value for your case. as a
# guidance: 1 degree=100KM
cls.ellipse_dict = {
'size': 0.2,
'colorby': 'phimin',
'range': (
0,
90,
1),
'cmap': 'mt_bl2gr2rd'}
# adjust to suitable size: parameters describing the induction vector arrows
cls.arrow_dict = {'size': 0.5,
'lw': 0.2,
'head_width': 0.04,
'head_length': 0.04,
'threshold': 0.8,
'direction': 0}
# parameters describing the arrow legend (not necessarily used)
# self.arrow_legend_dict = {'position': 'upper right',
# 'fontpad': 0.0025,
# 'xborderpad': 0.07,
# 'yborderpad': 0.015}
@unittest.skipUnless(os.path.isdir("data/edifiles2"), "data file not found")
# @unittest.expectedFailure
@pytest.mark.skip(reason="no way of currently testing this")
def test_edifiles2_input(self):
"""
testing to use Z and tipper objects as input
this fails because the constructor of PlotPhaseTensorMaps only initialize the Mplot object properly when reading from files
:return:
"""
edi_path = test_params[4]
freq = 1
mt_objs = load_edi_files(edi_path)
z_objs = [mt.Z for mt in mt_objs]
tipper = [mt.Tipper for mt in mt_objs]
save_figure_path = os.path.join(self._temp_dir, "%s.png" % inspect.currentframe().f_code.co_name)
save_param_path = os.path.join(self._temp_dir, "params_%s" % inspect.currentframe().f_code.co_name)
pt_obj = PlotPhaseTensorMaps(z_object_list=z_objs,
tipper_object_list=tipper,
plot_freq=freq,
ftol=0.10, # freq tolerance,which will decide how many data points included
mapscale='deg', # deg or m, or km
xpad=0.4, # plot margin; change according to lat-lon in edifiles
ypad=0.4, # ~ 2* ellipse size
# ellipse_dict=self.ellipse_dict, # not implemented
ellipse_size=.2,
ellipse_colorby= 'phimin',
ellipse_range=(0, 90, 1),
ellipse_cmap= 'mt_bl2gr2rd',
plot_tipper='yr',
# arrow_dict=self.arrow_dict, # not implemented
arrow_size=0.5,
arrow_lw=0.2,
arrow_head_width=0.04,
arrow_head_length=0.04,
arrow_direction=0,
arrow_threshold=0.8,
# arrow_legend_dict=arrow_legend_dict,
# fig_spython examples/plot_phase_tensor_map.py data/edifiles/ 10 /e/MTPY2_Outputs/ptmap3deg.pngize=(6, 5),
# fig_dpi=300, the default is OK. Higher dpi
# may distort figure
save_fn=save_figure_path, fig_size=(8, 6), fig_dpi=100)
path2figure = pt_obj.plot()
pt_obj.save_figure(save_figure_path)
assert (os.path.isfile(save_figure_path))
pt_obj.export_params_to_file(save_path=save_param_path)
assert (os.path.isdir(save_param_path))
def _test_gen(edi_path, freq):
def default(self):
save_figure_path = os.path.join(self._temp_dir, "%s.png" % default.__name__)
save_param_path = os.path.join(self._temp_dir, "params_%s" % default.__name__)
edi_file_list = glob.glob(os.path.join(edi_path, "*.edi"))
pt_obj = PlotPhaseTensorMaps(fn_list=edi_file_list,
plot_freq=freq,
ftol=0.10, # freq tolerance,which will decide how many data points included
mapscale='deg', # deg or m, or km
xpad=0.4, # plot margin; change according to lat-lon in edifiles
ypad=0.4, # ~ 2* ellipse size
# ellipse_dict=self.ellipse_dict, # Not implemented
ellipse_size=.2,
ellipse_colorby= 'phimin',
ellipse_range=(0, 90, 1),
ellipse_cmap= 'mt_bl2gr2rd',
plot_tipper='yr',
#arrow_dict=self.arrow_dict, # Not implemented
arrow_size=0.5,
arrow_lw=0.2,
arrow_head_width=0.04,
arrow_head_length=0.04,
arrow_direction=0,
arrow_threshold=0.8,
# arrow_legend_dict=arrow_legend_dict,
# fig_spython examples/plot_phase_tensor_map.py data/edifiles/ 10 /e/MTPY2_Outputs/ptmap3deg.pngize=(6, 5),
# fig_dpi=300, the default is OK. Higher dpi
# may distort figure
save_fn=save_figure_path)
# 3) do the plot and save figure - if the param save_path provided
path2figure = pt_obj.plot(show=True)
pt_obj.save_figure(save_figure_path, close_plot='n')
assert (os.path.isfile(save_figure_path))
pt_obj.export_params_to_file(save_path=save_param_path)
assert (os.path.isdir(save_param_path))
return default,
# generate tests
for edi_path, freq, img_kwargs in test_params:
if os.path.isdir(edi_path):
test_name = os.path.basename(edi_path)
for _test_func in _test_gen(edi_path, freq):
plot_name = _test_func.__name__
_test_func.__name__ = "test_{test_name}_{freq}_{plot_name}".format(
test_name=test_name, freq=str(freq).replace('.', '_'), plot_name=plot_name)
setattr(
TestPlotPhaseTensorMaps,
_test_func.__name__,
ImageCompare(**img_kwargs).__call__(_test_func))
| gpl-3.0 |
jdavidrcamacho/Tests_GP | 05 - Performance tests/tests_comparison_1spot.py | 1 | 4751 | # -*- coding: utf-8 -*-
import Gedi as gedi
import numpy as np; #np.random.seed(13042017)
import matplotlib.pylab as pl; pl.close("all")
import astropy.table as Table
### NUMBER OF SPOTS = 1 ###
#import sys
#f=open("Tests_gradient_optimization.txt","w")
#sys.stdout = f
##### Spots data preparation ##################################################
print
print "****************************************************"
print "Preparing data"
print "****************************************************"
print
spots_data= [] #to contain all data in the end
#data from .rdb file
rdb_data= Table.Table.read('soap_s1.rdb',format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y= np.array(spots_info+yerr)
time= np.array(range(1,101))
pl.figure('data')
pl.plot(time,y,'*')
pl.close('data')
print "Done."
##### Optimization ############################################################
print
print "****************************************************"
print "Running optimization algorithms"
print "****************************************************"
print
kernel1= gedi.kernel.ExpSineSquared(8.0, 1.0, 25.0)+ \
gedi.kernel.WhiteNoise(1.0)
kernel2= gedi.kernel.QuasiPeriodic(8.0, 1.0, 1.0, 25.0)+ \
gedi.kernel.WhiteNoise(1.0)
kernel3= gedi.kernel.ExpSquared(8.0,1.0)+ \
gedi.kernel.WhiteNoise(1.0)
likelihood1=gedi.kernel_likelihood.likelihood(kernel1,time,y,yerr)
likelihood2=gedi.kernel_likelihood.likelihood(kernel2,time,y,yerr)
likelihood3=gedi.kernel_likelihood.likelihood(kernel3,time,y,yerr)
print "Initial kernel # log-likelihood"
print kernel1, '#', likelihood1
print kernel2, '#', likelihood2
print kernel3, '#', likelihood3
print
optimization1=gedi.kernel_optimization.committed_optimization(kernel1, \
time,y,yerr)
#optimization1=gedi.kernel_optimization.single_optimization(kernel1, \
# time,y,yerr,method="altsda")
optimization2=gedi.kernel_optimization.committed_optimization(kernel2, \
time,y,yerr)
#optimization2=gedi.kernel_optimization.single_optimization(kernel2, \
# time,y,yerr,method="altsda")
optimization3=gedi.kernel_optimization.committed_optimization(kernel3, \
time,y,yerr)
i#optimization=gedi.kernel_optimization.single_optimization(kernel3, \
# time,y,yerr,method="altsda")
print "Final kernel # log-likelihood"
print optimization1[1], '#', optimization1[0]
print optimization2[1], '#', optimization2[0]
print optimization3[1], '#', optimization3[0]
print
print "Done."
##### Final Graphics #########################################################
print
print "****************************************************"
print "Preparing graphics"
print "****************************************************"
print
xcalc=np.linspace(0,101,500)
[mu1,std1]=gedi.kernel_likelihood.compute_kernel(optimization1[1], \
time,xcalc,y,yerr)
[mu2,std2]=gedi.kernel_likelihood.compute_kernel(optimization2[1], \
time,xcalc,y,yerr)
[mu3,std3]=gedi.kernel_likelihood.compute_kernel(optimization3[1], \
time,xcalc,y,yerr)
pl.figure('fit1') #Graphics
pl.fill_between(xcalc, mu1+std1, mu1-std1, color="k", alpha=0.1)
pl.plot(xcalc, mu1+std1, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu1-std1, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu1, color="k", alpha=1, lw=0.5)
pl.errorbar(time, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_1spot_ess.png")
pl.close('fit1')
pl.figure('fit2') #Graphics
pl.fill_between(xcalc, mu2+std2, mu2-std2, color="k", alpha=0.1)
pl.plot(xcalc, mu2+std2, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu2-std2, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu2, color="k", alpha=1, lw=0.5)
pl.errorbar(time, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_1spot_qp.png")
pl.close('fit2')
pl.figure('fit3') #Graphics
pl.fill_between(xcalc, mu3+std3, mu3-std3, color="k", alpha=0.1)
pl.plot(xcalc, mu3+std3, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu3-std3, color="k", alpha=1, lw=0.25)
pl.plot(xcalc, mu3, color="k", alpha=1, lw=0.5)
pl.errorbar(time, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$time (days)$")
pl.ylabel("$RV (m/s)$")
pl.xlim((0,101))
pl.savefig("fit_1spot_es.png")
pl.close('fit3')
print "Done." | mit |
cjubb39/SNSS | algo_src/serialize.py | 1 | 4398 | import operator
import sys
from random import sample
import networkx as nx
import numpy as np
from sklearn import svm, cross_validation, grid_search, metrics
from math import ceil
import scipy
from collections import defaultdict
import pickle
def hits(G,max_iter=100,tol=1.0e-6):
M=nx.adjacency_matrix(G,nodelist=G.nodes())
(n,m)=M.shape # should be square
A=M.T*M # authority matrix
x=scipy.ones((n,1))/n # initial guess
# power iteration on authority matrix
i=0
while True:
xlast=x
x=A*x
x=x/x.sum()
# check convergence, l1 norm
err=scipy.absolute(x-xlast).sum()
if err < tol:
break
if i>max_iter:
raise NetworkXError(\
"HITS: power iteration failed to converge in %d iterations."%(i+1))
i+=1
a=np.asarray(x).flatten()
h=np.asarray(M*a).flatten()
hubs=dict(zip(G.nodes(),h/h.sum()))
authorities=dict(zip(G.nodes(),a/a.sum()))
return hubs,authorities
def calc_ratio(G, node):
in_count = 0.0
out_count = 0.0
for u,v,d in G.in_edges_iter(node, data=True):
in_count += d['weight']
for u,v,d in G.out_edges_iter(node, data=True):
out_count += d['weight']
return in_count / out_count if out_count != 0.0 else 0.0
def normalize_feature(feature_dict):
mean = np.mean(feature_dict.values())
std_dev = np.std(feature_dict.values())
return {k: (v - mean) / std_dev for k, v in feature_dict.iteritems()}
if __name__ == "__main__":
known_input = sys.argv[1]
goal_input = sys.argv[2]
kernel = sys.argv[3]
params_in_name = sys.argv[4]
random_input_data_name = sys.argv[5]
output_data_name = sys.argv[6]
best_params = pickle.load(open(params_in_name, 'r'))
retweet_graph = nx.DiGraph()
nx.read_weighted_edgelist('data/higgs-retweet_network.edgelist', create_using=retweet_graph)
reply_graph = nx.DiGraph()
nx.read_weighted_edgelist('data/higgs-reply_network.edgelist', create_using=reply_graph)
mention_graph = nx.DiGraph()
nx.read_weighted_edgelist('data/higgs-mention_network.edgelist', create_using=mention_graph)
graphs = [retweet_graph, reply_graph, mention_graph]
# load random N
training_set = []
with open(random_input_data_name, 'r') as f:
for line in f:
training_set.append(line.split())
# calculate h/a for each graph
features = []
for g in graphs:
hubs, authorities = hits(g)
hubs = defaultdict(lambda: 0.0, hubs)
authorities = defaultdict(lambda: 0.0, authorities)
# features.append(hubs)
features.append(authorities)
# normalize feature data
features = [normalize_feature(x) for x in features]
# get set of unique nodes in all graphs
training_nodes = set(map(lambda x: x[0], training_set))
# sample "guess" nodes from social data
all_nodes = set(retweet_graph.nodes()) | set(reply_graph.nodes()) | set(mention_graph.nodes())
testing_nodes = all_nodes - training_nodes
# convert features to dictionary
features = {node:map(lambda f: f[node] if node in f else 0.0, features) for node in all_nodes}
# populate training
training_X = np.empty([len(training_nodes), len(features.values()[0])])
training_Y = np.empty(len(training_nodes))
empty_features = [0.0] * len(features.values()[0])
for index, n_v_tuple in enumerate(training_set):
node = n_v_tuple[0]
value = n_v_tuple[1]
training_X[index] = features[node] if node in features else empty_features
training_Y[index] = value
# populate testing
X = np.zeros([len(testing_nodes), len(features.values()[0])])
ordered_test_nodes = [None] * len(testing_nodes)
for index, node in enumerate(testing_nodes):
X[index] = features[node]
ordered_test_nodes[index] = node
clf = svm.SVR(**(best_params[kernel]))
clf.fit(training_X, training_Y)
predictions = clf.predict(X)
# sort by predicted edges score
with open(output_data_name, 'w+') as f:
count = 0
for i,p in sorted(enumerate(predictions), key=operator.itemgetter(1), reverse=True):
if count >= int(goal_input):
break
f.write(str(ordered_test_nodes[i]))
f.write(' ')
f.write(str(p))
f.write("\n")
count += 1
| gpl-3.0 |
PYPIT/COS_REDUX | cosredux/science.py | 1 | 7185 | """ Utility routines
"""
from __future__ import (print_function, absolute_import, division, unicode_literals)
import numpy as np
import os
import pdb
from matplotlib import pyplot as plt
from astropy.table import Table
from xastropy.xutils import xdebug as xdb
def set_extraction_region(obj_tr, segm, coadd_corrtag_woPHA_file, apert=25., offs1=0., offs2=0., check=False):
""" Defines extraction region
Parameters
----------
obj_tr : float, int
object trace
segm : str
segment
apert : float, int, optional
coadd_corrtag_woPHA_file : str
For wavelength info
offs1 : float, int, optional
offs2 : float, int, optional
left and right offsets from the aperture
could be used for FUVB
check : bool, optional
show extraction region
#ywidth : float, optional
Returns
-------
ex_region : dict
"""
# Load
data = Table.read(coadd_corrtag_woPHA_file)
wave = data['WAVELENGTH'].data
# Set
if segm == 'FUVA':
x1=1200.
x2=max(wave)
elif segm == 'FUVB':
x1=900.
x2=max(wave)
ex_region = {}
ex_region['extraction'] = [x1, x2, obj_tr - apert+offs1, obj_tr + apert+offs2]
# Write and Return
#outfile = coadd_corrtag_woPHA_file.replace('.fits', '_exregion.json')
if check:
yfull = data['YFULL']
plt.scatter(wave,yfull,s=1)
# Region
x1,x2,y1,y2 = ex_region['extraction']
plt.plot([x1,x2,x2,x1,x1],[y1,y1,y2,y2,y1],'b',linewidth=3.3)
# Axes
plt.xlim(x1-10,x2+10)
if segm == 'FUVB':
plt.xlim(50.,x2+10)
plt.ylim(min(yfull[wave > x1]),max(yfull[wave > x1]))
plt.show()
# Return
return ex_region
def coadd_exposures(x1d_files, segm, outfile, bin=None):
""" Coadd exposures (step 9 of the procedure)
Parameters
----------
x1d_files : list of str
segm : str
outfile : str
file with coadded exposures
bin : 2, 3, None
bin the output spectrum
Returns
-------
"""
from scipy.interpolate import interp1d
if segm == 'FUVA':
spec_row = 0
subseg = 'a'
elif segm == 'FUVB':
spec_row = 1
subseg = 'b'
# Load
xtbls = []
dark_files = []
for x1d_file in x1d_files:
dark_files.append(x1d_file.replace('_x1d.fits','_{:s}_bkgd.fits'.format(subseg)))
if not os.path.isfile(dark_files[-1]):
print("No background file named {:s}".format(dark_files[-1]))
raise IOError("Make it with dark_to_exposures()")
#
xtbl = Table.read(x1d_file)
xtbls.append(xtbl[spec_row:spec_row+1])
# Grab one wavelength array
wave = xtbls[0]['WAVELENGTH'][0,:].data
# Sum exposure time
total_time = np.zeros_like(wave)
for xtbl in xtbls:
total_time += xtbl['DQ_WGT'][0,:]*xtbl['EXPTIME']
# Find DQmin for all exposures -- Why are we doing this step??
dqmin = np.ones_like(wave).astype(int) * 99999
for xtbl in xtbls:
# Reset DQ
dq = xtbl['DQ'][0,:].data
reset_1024 = dq == 1024
dq[reset_1024] = 2
dqmin = np.minimum(dq, dqmin)
# Find DQ_WGT max for all exposures
DQWmax = np.zeros_like(wave)
for xtbl in xtbls:
# Reset DQ
dqw = xtbl['DQ_WGT'][0,:].data
DQWmax = np.maximum(dqw, DQWmax)
# ####################
# CALIBRATION
wave_calib, calib = [], []
for xtbl in xtbls:
gddq = (xtbl['DQ'] == 0) & (xtbl['FLUX'] > 0)
# Append
wave_calib.append(xtbl['WAVELENGTH'][gddq].data.flatten())
calib.append( (xtbl['NET'][gddq] / xtbl['FLUX'][gddq]).data)
# arrays
wave_calib = np.concatenate(wave_calib)
calib = np.concatenate(calib)
# sort
srt = np.argsort(wave_calib)
wave_calib = wave_calib[srt]
calib = calib[srt]
# Cut down
gdwv = wave_calib < 2100.
# Spline
sens_func = interp1d(wave_calib[gdwv], calib[gdwv], bounds_error=False, fill_value=0.) # cubic behaves badly
# Total counts in science and background
total_counts = np.zeros_like(wave)
total_dark = np.zeros_like(wave)
for ss, xtbl in enumerate(xtbls):
# Science
dqw = xtbl['DQ_WGT'][0,:].data
total_counts += dqw * xtbl['GCOUNTS'][0,:]
# Dark
bkgd = Table.read(dark_files[ss])
total_dark += dqw * bkgd['DARK'].data
# Bin
if bin is not None:
# Check
if bin not in [2,3]:
raise IOError("Only ready for binning by 2 or 3 channels")
# Ugly for loop
nchannel = len(total_counts)
new_tot_counts, new_tot_dark, new_tot_time, new_wave, new_dqmin, new_DQW = [], [], [], [], [], []
for kk in np.arange(0, nchannel, bin):
# Simple stuff sums
new_tot_counts.append(np.sum(total_counts[kk:kk+bin]))
new_tot_dark.append(np.sum(total_dark[kk:kk+bin]))
new_tot_time.append(np.sum(total_time[kk:kk+bin]))
new_dqmin.append(np.min(dqmin[kk:kk+bin]))
new_DQW.append(np.max(DQWmax[kk:kk+bin]))
# Wavelength
new_wave.append(np.mean(wave[kk:kk+bin]))
# Turn into arrays
new_tot_counts = np.array(new_tot_counts)
new_tot_dark = np.array(new_tot_dark)
new_tot_time = np.array(new_tot_time)
new_wave = np.array(new_wave)
else:
new_tot_counts, new_tot_dark, new_tot_time, new_wave = total_counts, total_dark, total_time, wave
new_dqmin, new_DQW = dqmin, DQWmax
# Flux array
flux = np.zeros_like(new_tot_time)
calib = sens_func(new_wave)
gd_time_sens = (new_tot_time > 0.) & (calib > 0.)
flux[np.where(gd_time_sens)[0]] = (new_tot_counts[gd_time_sens]-new_tot_dark[gd_time_sens]) / (calib[gd_time_sens] * new_tot_time[gd_time_sens])
# Simple error estimate
error = np.zeros_like(new_tot_time)
gd_error = (new_tot_time > 0.) & (calib > 0.) & (new_tot_counts > 0)
error[np.where(gd_error)[0]] = np.sqrt(new_tot_counts[gd_error]) / (calib[gd_error] * new_tot_time[gd_error])
# Final spectral information
coadd = Table()
coadd['wave'] = new_wave
coadd['flux'] = flux
coadd['error'] = error
coadd['counts'] = new_tot_counts
coadd['bgkd'] = new_tot_dark
coadd['eff_time'] = new_tot_time
coadd['calib'] = calib
coadd['DQ_MIN'] = new_dqmin
coadd['DQW_max'] = new_DQW
# Write
coadd.write(outfile, overwrite=True)
print("Wrote {:s}".format(outfile))
def combinespectfiles(spfile_a, spfile_b, file_ab):
""" Coadd two spectra, and write output in a file
Parameters
----------
spfile_a : str
spfile_b : str
.fits files with spectra
file_ab : str
output .fits file with combined spectra
Returns
-------
"""
from linetools.spectra import io as tio
from linetools.spectra import utils as spltu
file_a = tio.readspec(spfile_a)
file_b = tio.readspec(spfile_b)
spliced_sp=spltu.splice_two(file_b, file_a, chk_units=False)
spliced_sp.write(file_ab)
#print("Wrote {:s}".format(file_ab))
| bsd-2-clause |
silky/sms-tools | software/models_interface/sprModel_function.py | 2 | 3433 | # function to call the main analysis/synthesis functions in software/models/sprModel.py
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.signal import get_window
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import sprModel as SPR
import stft as STFT
def main(inputFile='../../sounds/bendir.wav', window='hamming', M=2001, N=2048, t=-80,
minSineDur=0.02, maxnSines=150, freqDevOffset=10, freqDevSlope=0.001):
"""
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks
minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# perform sinusoidal plus residual analysis
tfreq, tmag, tphase, xr = SPR.sprModelAnal(x, fs, w, N, H, t, minSineDur, maxnSines, freqDevOffset, freqDevSlope)
# compute spectrogram of residual
mXr, pXr = STFT.stftAnal(xr, fs, w, N, H)
# sum sinusoids and residual
y, ys = SPR.sprModelSynth(tfreq, tmag, tphase, xr, Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFileSines = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sprModel_sines.wav'
outputFileResidual = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sprModel_residual.wav'
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sprModel.wav'
# write sounds files for sinusoidal, residual, and the sum
UF.wavwrite(ys, fs, outputFileSines)
UF.wavwrite(xr, fs, outputFileResidual)
UF.wavwrite(y, fs, outputFile)
# create figure to show plots
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the magnitude spectrogram of residual
plt.subplot(3,1,2)
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mXr[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:maxplotbin+1]))
plt.autoscale(tight=True)
# plot the sinusoidal frequencies on top of the residual spectrogram
if (tfreq.shape[1] > 0):
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k')
plt.title('sinusoidal tracks + residual spectrogram')
plt.autoscale(tight=True)
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show(block=False)
if __name__ == "__main__":
main()
| agpl-3.0 |
trankmichael/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
massmutual/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
skavulya/spark-tk | regression-tests/sparktkregtests/testcases/dicom/dicom_filter_test.py | 13 | 10428 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests dicom.filter functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import os
import dicom
import numpy
import random
from lxml import etree
import datetime
class DicomFilterTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(DicomFilterTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = "../../../datasets/dicom/dicom_uncompressed/xml/"
self.image_directory = "../../../datasets/dicom/dicom_uncompressed/imagedata/"
self.query = ".//DicomAttribute[@keyword='KEYWORD']/Value/text()"
self.count = self.dicom.metadata.count()
def test_filter_one_key(self):
"""test filter with basic filter function"""
# extract a key-value pair from the first row metadata for our use
first_row = self.dicom.metadata.to_pandas()["metadata"][0]
xml = etree.fromstring(first_row.encode("ascii", "ignore"))
patient_id = xml.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
# ask dicom to filter using our key-value filter function
self.dicom.filter(self._filter_key_values({ "PatientID" : patient_id }))
# we generate our own result to compare to dicom's
expected_result = self._filter({ "PatientID" : patient_id })
# ensure results match
self._compare_dicom_with_expected_result(expected_result)
def test_filter_multi_key(self):
"""test filter with basic filter function mult keyval pairs"""
# first we extract key-value pairs from the first row's metadata
# for our own use to generate a key-val dictionary
first_row = self.dicom.metadata.to_pandas()["metadata"][0]
xml = etree.fromstring(first_row.encode("ascii", "ignore"))
patient_id = xml.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
sopi_id = xml.xpath(self.query.replace("KEYWORD", "SOPInstanceUID"))[0]
key_val = { "PatientID" : patient_id, "SOPInstanceUID" : sopi_id }
# we use our filter function and ask dicom to filter
self.dicom.filter(self._filter_key_values(key_val))
# here we generate our own result
expected_result = self._filter(key_val)
# compare expected result to what dicom gave us
self._compare_dicom_with_expected_result(expected_result)
def test_filter_zero_matching_records(self):
"""test filter with filter function returns none"""
# we give dicom a filter function which filters by
# key-value and give it a key-value pair which will
# return 0 records
pandas = self.dicom.metadata.to_pandas()
self.dicom.filter(self._filter_key_values({ "PatientID" : -6 }))
self.assertEqual(0, self.dicom.metadata.count())
def test_filter_nothing(self):
"""test filter with filter function filters nothing"""
# this filter function will return all records
self.dicom.filter(self._filter_nothing())
self.assertEqual(self.dicom.metadata.count(), self.count)
def test_filter_everything(self):
"""test filter function filter everything"""
# filter_everything filter out all of the records
self.dicom.filter(self._filter_everything())
self.assertEqual(0, self.dicom.metadata.count())
def test_filter_timestamp_range(self):
"""test filter with timestamp range function"""
# we will test filter with a function which takes a begin and end
# date and returns all records with a study date between them
# we will set begin date to 15 years ago and end date to 5 years ago
begin_date = datetime.datetime.now() - datetime.timedelta(days=15*365)
end_date = datetime.datetime.now() - datetime.timedelta(days=5*365)
# here we will generate our own result by filtering for records
# which meet our criteria
expected_result = []
pandas = self.dicom.metadata.to_pandas()
# iterate through the rows and append all records with
# a study date between our begin and end date
for index, row in pandas.iterrows():
ascii_row = row["metadata"].encode("ascii", "ignore")
xml_root = etree.fromstring(ascii_row)
study_date = xml_root.xpath(self.query.replace("KEYWORD", "StudyDate"))[0]
datetime_study_date = datetime.datetime.strptime(study_date, "%Y%m%d")
if datetime_study_date > begin_date and datetime_study_date < end_date:
expected_result.append(ascii_row)
# now we ask dicom to use our filter function below to return
# all records with a StudyDate within our specified range
self.dicom.filter(self._filter_timestamp_range(begin_date, end_date))
# ensure that expected result matches actual
self._compare_dicom_with_expected_result(expected_result)
def test_return_type_str(self):
"""test filter with function that returns strings"""
self.dicom.filter(self._filter_return_string())
self.assertEqual(3, self.dicom.metadata.count())
def test_return_type_int(self):
"""test filter wtih function that returns ints"""
self.dicom.filter(self._filter_return_int())
self.assertEqual(3, self.dicom.metadata.count())
def test_filter_has_bugs(self):
"""test filter with a broken filter function"""
with self.assertRaisesRegexp(Exception, "this filter is broken!"):
self.dicom.filter(self._filter_has_bugs())
self.dicom.metadata.count()
def test_filter_invalid_param(self):
"""test filter with an invalid param type"""
# should fail because filter takes a function not a keyvalue pair
with self.assertRaisesRegexp(Exception, "'dict' object is not callable"):
self.dicom.filter({ "PatientID" : "bla" })
self.dicom.metadata.count()
def test_filter_invalid_function(self):
"""test filter with function which takes more than one param"""
with self.assertRaisesRegexp(Exception, "takes exactly 2 arguments"):
self.dicom.filter(self._filter_invalid())
self.dicom.metadata.count()
def _filter_key_values(self, key_val):
"""filter by key-value"""
def _filter_key_value(row):
metadata = row["metadata"].encode("ascii", "ignore")
xml_root = etree.fromstring(metadata)
for key in key_val:
xml_element_value = xml_root.xpath(".//DicomAttribute[@keyword='" + key + "']/Value/text()")[0]
if xml_element_value != key_val[key]:
return False
else:
return True
return _filter_key_value
def _filter_nothing(self):
"""returns all records"""
def _filter_nothing(row):
return True
return _filter_nothing
def _filter_everything(self):
"""returns no records"""
def _filter_everything(row):
return False
return _filter_everything
def _filter_timestamp_range(self, begin_date, end_date):
"""return records within studydate date range"""
def _filter_timestamp_range(row):
metadata = row["metadata"].encode("ascii", "ignore")
xml_root = etree.fromstring(metadata)
timestamp = xml_root.xpath(".//DicomAttribute[@keyword='StudyDate']/Value/text()")[0]
timestamp = datetime.datetime.strptime(timestamp, "%Y%m%d")
if begin_date < timestamp and timestamp < end_date:
return True
else:
return False
return _filter_timestamp_range
def _filter_return_string(self):
"""filter function which returns str"""
def _filter_return_string(row):
return "True"
return _filter_return_string
def _filter_return_int(self):
"""filter function returns int"""
def _filter_return_int(row):
return -1
return _filter_return_int
def _filter_has_bugs(self):
"""broken filter function"""
def _filter_has_bugs(row):
raise Exception("this filter is broken!")
return _filter_has_bugs
def _filter_invalid(self):
"""filter function takes 2 params"""
# filter is invalid because it takes
# 2 parameters
def _filter_invalid(index, row):
return True
return _filter_invalid
def _filter(self, keywords):
"""filter records by key value pair"""
# here we are generating the expected result
matching_records = []
pandas_metadata = self.dicom.metadata.to_pandas()["metadata"]
for row in pandas_metadata:
ascii_xml = row.encode("ascii", "ignore")
xml = etree.fromstring(row.encode("ascii", "ignore"))
for keyword in keywords:
this_row_keyword_value = xml.xpath(self.query.replace("KEYWORD", keyword))
if this_row_keyword_value == keyword:
matching_records.append(ascii_xml)
return matching_records
def _compare_dicom_with_expected_result(self, expected_result):
"""compare expected result with actual result"""
pandas_result = self.dicom.metadata.to_pandas()["metadata"]
for expected, actual in zip(expected_result, pandas_result):
actual_ascii = actual.encode("ascii", "ignore")
self.assertEqual(actual_ascii, expected)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
mjgrav2001/scikit-learn | sklearn/utils/extmath.py | 142 | 21102 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
rlowrance/re-avm | predict.py | 1 | 5949 | '''predict using many fitted models on the real estate data
INVOCATION
python fit.py DATA MODEL TRANSACTIONMONTH NEIGHBORHOOD
where
SAMPLES in {train, all} specifies which data in Working/samples2 to use
MODEL in {en, gb, rf} specified which model to use
TRANSACTIONMONTH like YYYYMM specfies the last month of training data
NEIGHBORHOOD in {all, city_name} specifies whether to train a model on all cities or just the specified city
which will fit all observations in the MONTH for all fitted models of kind MODEL
INPUTS
WORKING/samples2/train.csv or WORKING/samples2/all.csv
WORKING/fit/DATA-MODEL-{TRANSACTIONMONTH - 1}-NEIGHBHOOD/*.pickle the fitted models
OUTPUTS
WORKING/predict[-item]/SAMPLES-MODEL-TRANSACTIONMONTH-NEIGHBORHOOD/predictions.pickle a dict
'''
from __future__ import division
import argparse
import collections
import cPickle as pickle
import datetime
import itertools
import numpy as np
import os
import pandas as pd
import pdb
from pprint import pprint
import random
import sklearn
import sys
import time
import arg_type
import AVM
from Bunch import Bunch
from columns_contain import columns_contain
import dirutility
from Features import Features
import HPs
import layout_transactions
from Logger import Logger
from Month import Month
from Path import Path
from Report import Report
from SampleSelector import SampleSelector
from valavmtypes import ResultKeyEn, ResultKeyGbr, ResultKeyRfr, ResultValue
from Timer import Timer
# from TimeSeriesCV import TimeSeriesCV
cc = columns_contain
def make_control(argv):
'return a Bunch'
print argv
parser = argparse.ArgumentParser()
parser.add_argument('invocation')
parser.add_argument('samples', choices=['all', 'train'])
parser.add_argument('model', choices=['en', 'gb', 'rf'])
parser.add_argument('transaction_month')
parser.add_argument('neighborhood')
parser.add_argument('--test', action='store_true')
parser.add_argument('--trace', action='store_true')
arg = parser.parse_args(argv)
arg.me = arg.invocation.split('.')[0]
if arg.trace:
pdb.set_trace()
# convert arg.neighborhood into arg.all and arg.city
arg.city = (
None if arg.neighborhood == 'all' else
arg.neighborhood.replace('_', ' ')
)
random_seed = 123
random.seed(random_seed)
prior_month = Month(arg.transaction_month).decrement().as_str()
in_dir = '%s-%s-%s-%s' % (arg.samples, arg.model, prior_month, arg.neighborhood)
out_dir = '%s-%s-%s-%s' % (arg.samples, arg.model, arg.transaction_month, arg.neighborhood)
dir_working = Path().dir_working()
output_dir = (
os.path.join(dir_working, arg.me + '-test', out_dir, '') if arg.test else
os.path.join(dir_working, arg.me, out_dir, '')
)
dirutility.assure_exists(output_dir)
return Bunch(
arg=arg,
path_in_fitted=os.path.join(dir_working, 'fit', in_dir, ''),
path_in_samples=os.path.join(dir_working, 'samples2', arg.samples + '.csv'),
path_out_file=os.path.join(output_dir, 'predictions.pickle'),
path_out_log=os.path.join(output_dir, '0log.txt'),
random_seed=random_seed,
timer=Timer(),
)
def do_work(control):
'write predictions to output csv file'
samples = pd.read_csv(
control.path_in_samples,
nrows=10 if control.arg.test else None,
usecols=None, # TODO: change to columns we actually use
low_memory=False,
)
apns = samples[layout_transactions.apn]
sale_dates = samples[layout_transactions.sale_date]
print 'read %d rows of samples from file %s' % (len(samples), control.path_in_samples)
# iterate over the fitted models
hps_predictions = {}
for root, dirnames, filenames in os.walk(control.path_in_fitted):
assert len(dirnames) == 0, dirnames
print root, len(filenames)
for filename in filenames:
suffix_we_process = '.pickle'
if not filename.endswith(suffix_we_process):
print 'skipping file without a fitted model: %s' % filename
continue
hps_string = filename[:-len(suffix_we_process)]
hps = HPs.from_str(hps_string)
path_to_file = os.path.join(root, filename)
with open(path_to_file, 'r') as f:
ok, fitted_model = pickle.load(f)
if ok:
print 'predicting samples using fitted model %s' % filename
X, y = Features().extract_and_transform(samples, hps['units_X'], hps['units_y'])
predictions = fitted_model.predict(X)
assert len(predictions) == len(samples)
assert hps_string not in hps_predictions
hps_predictions[hps_string] = predictions
else:
print 'not not predict samples using fitted model %s; reason: %s' % (
filename,
fitted_model, # an error message
)
# have all the predictions for all filenames (= a set of hyperparameters)
print 'walked all %d files' % len(filenames)
out = {
'apns': apns,
'sale_dates': sale_dates,
'hps_predictions': hps_predictions,
}
with open(control.path_out_file, 'w') as f:
pickle.dump(out, f)
print 'wr0te results to %s' % control.path_out_file
return
def main(argv):
control = make_control(argv)
sys.stdout = Logger(control.path_out_log) # now print statements also write to the log file
print control
lap = control.timer.lap
do_work(control)
lap('work completed')
if control.arg.test:
print 'DISCARD OUTPUT: test'
print control
print 'done'
return
if __name__ == '__main__':
if False:
# avoid pyflakes warnings
pdb.set_trace()
pprint()
pd.DataFrame()
np.array()
main(sys.argv)
| bsd-3-clause |
bjlittle/iris | docs/gallery_code/oceanography/plot_orca_projection.py | 4 | 1647 | """
Tri-Polar Grid Projected Plotting
=================================
This example demonstrates cell plots of data on the semi-structured ORCA2 model
grid.
First, the data is projected into the PlateCarree coordinate reference system.
Second four pcolormesh plots are created from this projected dataset,
using different projections for the output image.
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import iris
import iris.analysis.cartography
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Load data
filepath = iris.sample_data_path("orca2_votemper.nc")
cube = iris.load_cube(filepath)
# Choose plot projections
projections = {}
projections["Mollweide"] = ccrs.Mollweide()
projections["PlateCarree"] = ccrs.PlateCarree()
projections["NorthPolarStereo"] = ccrs.NorthPolarStereo()
projections["Orthographic"] = ccrs.Orthographic(
central_longitude=-90, central_latitude=45
)
pcarree = projections["PlateCarree"]
# Transform cube to target projection
new_cube, extent = iris.analysis.cartography.project(
cube, pcarree, nx=400, ny=200
)
# Plot data in each projection
for name in sorted(projections):
fig = plt.figure()
fig.suptitle("ORCA2 Data Projected to {}".format(name))
# Set up axes and title
ax = plt.subplot(projection=projections[name])
# Set limits
ax.set_global()
# plot with Iris quickplot pcolormesh
qplt.pcolormesh(new_cube)
# Draw coastlines
ax.coastlines()
iplt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
gxxjjj/QuantEcon.py | examples/3dvec.py | 7 | 1491 | """
QE by Tom Sargent and John Stachurski.
Illustrates the span of two vectors in R^3.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import interp2d
fig = plt.figure()
ax = fig.gca(projection='3d')
x_min, x_max = -5, 5
y_min, y_max = -5, 5
alpha, beta = 0.2, 0.1
ax.set_xlim((x_min, x_max))
ax.set_ylim((x_min, x_max))
ax.set_zlim((x_min, x_max))
# Axes
ax.set_xticks((0,))
ax.set_yticks((0,))
ax.set_zticks((0,))
gs = 3
z = np.linspace(x_min, x_max, gs)
x = np.zeros(gs)
y = np.zeros(gs)
ax.plot(x, y, z, 'k-', lw=2, alpha=0.5)
ax.plot(z, x, y, 'k-', lw=2, alpha=0.5)
ax.plot(y, z, x, 'k-', lw=2, alpha=0.5)
# Fixed linear function, to generate a plane
def f(x, y):
return alpha * x + beta * y
# Vector locations, by coordinate
x_coords = np.array((3, 3))
y_coords = np.array((4, -4))
z = f(x_coords, y_coords)
for i in (0, 1):
ax.text(x_coords[i], y_coords[i], z[i], r'$a_{}$'.format(i+1), fontsize=14)
# Lines to vectors
for i in (0, 1):
x = (0, x_coords[i])
y = (0, y_coords[i])
z = (0, f(x_coords[i], y_coords[i]))
ax.plot(x, y, z, 'b-', lw=1.5, alpha=0.6)
# Draw the plane
grid_size = 20
xr2 = np.linspace(x_min, x_max, grid_size)
yr2 = np.linspace(y_min, y_max, grid_size)
x2, y2 = np.meshgrid(xr2, yr2)
z2 = f(x2, y2)
ax.plot_surface(x2, y2, z2, rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=True, alpha=0.2)
plt.show()
| bsd-3-clause |
DistrictDataLabs/yellowbrick | yellowbrick/text/base.py | 1 | 4140 | # yellowbrick.text.base
# Base classes for text feature visualizers and feature selection tools.
#
# Author: Rebecca Bilbro
# Created: Sat Jan 21 09:37:01 2017 -0500
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: base.py [75d9b20] [email protected] $
"""
Base classes for text feature visualizers and text feature selection tools.
"""
##########################################################################
## Imports
##########################################################################
from yellowbrick.base import Visualizer
from sklearn.base import TransformerMixin
##########################################################################
## Text Visualizers
##########################################################################
class TextVisualizer(Visualizer, TransformerMixin):
"""
Base class for text feature visualization to investigate documents
individually or as a full corpus.
TextVisualizers are used after a text corpus has been transformed
in some way (e.g. normalized through stemming or lemmatization, via
stopwords removal, or through vectorization). Thus a TextVisualizer
is itself a transformer and can be used in a Scikit-Learn Pipeline
to perform automatic visual analysis during build.
Accepts as input a DataFrame or Numpy array.
"""
def __init__(self, ax=None, fig=None, **kwargs):
"""
These parameters can be influenced later on in the visualization
process, but can and should be set as early as possible.
Parameters
----------
ax : axes
the axis to plot the figure on
fig : matplotlib Figure, default: None
The figure to plot the Visualizer on. If None is passed in the current
plot will be used (or generated if required).
kwargs : dict
Pass generic arguments to the drawing method
"""
super(TextVisualizer, self).__init__(ax=ax, fig=fig, **kwargs)
def fit(self, X, y=None, **fit_params):
"""
This method performs preliminary computations in order to set up the
figure, compute statistics, or perform other analyses. It can also
call drawing methods in order to set up various non-instance-related
figure elements.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
fit_params: dict
keyword arguments for parameter fitting.
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
return self
def transform(self, X):
"""
Primarily a pass-through to ensure that the text visualizer will
work in a pipeline setting. This method can also call drawing methods
in order to ensure that the visualization is constructed.
Returns
-------
X : numpy array
This method must return a numpy array with the same shape as X.
"""
return X
def fit_transform_show(self, X, y=None, **kwargs):
"""
Fit to data, transform it, then visualize it.
Fits the text visualizer to X and y with optional parameters by
passing in all of kwargs, then calls show with the same kwargs.
This method must return the result of the transform method.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the show method
Returns
-------
X : numpy array
This method must return a numpy array with the same shape as X.
"""
Xp = self.fit(X, y, **kwargs).transform(X)
self.show(**kwargs)
return Xp
| apache-2.0 |
rohanp/scikit-learn | sklearn/model_selection/tests/test_search.py | 20 | 30855 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
# TODO Import from sklearn.exceptions once merged.
from sklearn.base import ChangedBehaviorWarning
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_labels():
# Check if ValueError (when labels is None) propagates to GridSearchCV
# And also check if labels is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
labels = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, labels)
non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_label_cvs:
print(cv)
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
igabr/Metis_Projects_Chicago_2017 | 03-Project-McNulty/show_cnf_matrix.py | 1 | 3303 | #With great thanks to https://notmatthancock.github.io/2015/10/28/confusion-matrix.html
def show_confusion_matrix(C,class_labels=['PAID','CHARGED OFF']):
"""
C: ndarray, shape (2,2) as given by scikit-learn confusion_matrix function
class_labels: list of strings, default simply labels 0 and 1.
Draws confusion matrix with associated metrics.
"""
import matplotlib.pyplot as plt
import numpy as np
assert C.shape == (2,2), "Confusion matrix should be from binary classification only."
# true negative, false positive, etc...
tn = C[0,0]; fp = C[0,1]; fn = C[1,0]; tp = C[1,1];
NP = fn+tp # Num positive examples
NN = tn+fp # Num negative examples
N = NP+NN
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
ax.imshow(C, interpolation='nearest', cmap=plt.cm.gray)
# Draw the grid boxes
ax.set_xlim(-0.5,2.5)
ax.set_ylim(2.5,-0.5)
ax.plot([-0.5,2.5],[0.5,0.5], '-k', lw=2)
ax.plot([-0.5,2.5],[1.5,1.5], '-k', lw=2)
ax.plot([0.5,0.5],[-0.5,2.5], '-k', lw=2)
ax.plot([1.5,1.5],[-0.5,2.5], '-k', lw=2)
# Set xlabels
ax.set_xlabel('Predicted Label', fontsize=16)
ax.set_xticks([0,1,2])
ax.set_xticklabels(class_labels + [''])
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
# These coordinate might require some tinkering. Ditto for y, below.
ax.xaxis.set_label_coords(0.34,1.06)
# Set ylabels
ax.set_ylabel('True Label', fontsize=16, rotation=90)
ax.set_yticklabels(class_labels + [''],rotation=90)
ax.set_yticks([0,1,2])
ax.yaxis.set_label_coords(-0.09,0.65)
# Fill in initial metrics: tp, tn, etc...
ax.text(0,0,
'True Neg: %d\n(Num Neg: %d)'%(tn,NN),
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
ax.text(0,1,
'False Neg: %d'%fn,
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
ax.text(1,0,
'False Pos: %d'%fp,
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
ax.text(1,1,
'True Pos: %d\n(Num Pos: %d)'%(tp,NP),
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
# Fill in secondary metrics: accuracy, true pos rate, etc...
ax.text(2,0,
'False Pos Rate: %.2f'%(fp / (fp+tn+0.)),
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
ax.text(2,1,
'True Pos Rate: %.2f'%(tp / (tp+fn+0.)),
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
ax.text(2,2,
'Accuracy: %.2f'%((tp+tn+0.)/N),
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
ax.text(0,2,
'Neg Pre Val: %.2f'%(1-fn/(fn+tn+0.)),
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
ax.text(1,2,
'Pos Pred Val: %.2f'%(tp/(tp+fp+0.)),
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
plt.tight_layout()
plt.show()
| mit |
MRN-Code/pl2mind | tools/simtb_viewer.py | 1 | 3491 | """
Module to view models trained with simTB data.
"""
from math import ceil
import logging
import matplotlib
matplotlib.use("Agg")
from matplotlib import cm
from matplotlib.patches import FancyBboxPatch
from matplotlib import pyplot as plt
from matplotlib import rc
import numpy as np
logger = logging.getLogger("pl2mind")
def make_spatial_map_image(spatial_map, out_file):
fig = plt.figure(figsize=(5, 5))
assert spatial_map.shape[2] == 1
spatial_map = spatial_map.reshape(*spatial_map.shape[:2])
imax = np.max(np.absolute(spatial_map))
plt.axis("off")
imshow_args = {'vmax': imax, 'vmin': -imax}
plt.imshow(spatial_map, **imshow_args)
if out_file is not None:
logger.info("Saving montage to %s" % out_file)
plt.savefig(out_file, bbox_inches="tight")
plt.close()
else:
plt.draw()
def montage(weights, fig=None, out_file=None,
feature_dict=None, target_stat=None, target_value=None):
features = weights.shape[0]
iscale = 1
y = 8
x = int(ceil(1.0 * features / y))
font = {'size':8}
rc('font',**font)
if fig is None:
fig = plt.figure(figsize=[iscale * y, iscale * x])
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01,
top=0.99, wspace=0.1, hspace=0)
for f in xrange(features):
logger.debug("Saving simtb montage %d" % f)
feat = weights[f]
assert feat.shape[2] == 1, feat.shape
feat = feat.reshape(feat.shape[0],feat.shape[1])
feat = feat / feat.std()
imax = np.max(np.absolute(feat)); imin = -imax
imshow_args = {'vmax': imax, 'vmin': imin}
ax = fig.add_subplot(x, y, f + 1)
plt.axis("off")
ax.imshow(feat, cmap=cm.RdBu_r, **imshow_args)
plt.text(0.05, 0.8, str(f),
transform=ax.transAxes,
horizontalalignment='center',
color="white")
pos = [(0.05, 0.05), (0.4, 0.05), (0.8, 0.05)]
colors = ["purple", "yellow", "green"]
if (feature_dict is not None and
feature_dict.get(f, None) is not None):
d = feature_dict[f]
for i, key in enumerate([k for k in d if k != "real_id"]):
plt.text(pos[i][0], pos[i][1], "%s=%.2f"
% (key, d[key]) ,transform=ax.transAxes,
horizontalalignment="left", color=colors[i])
if key == target_stat:
assert target_value is not None
if d[key] >= target_value:
p_fancy = FancyBboxPatch((0.1, 0.1), 2.5 - .1, 1 - .1,
boxstyle="round,pad=0.1",
ec=(1., 0.5, 1.),
fc="none")
ax.add_patch(p_fancy)
elif d[key] <= -target_value:
p_fancy = FancyBboxPatch((0.1, 0.1), iscale * 2.5 - .1, iscale - .1,
boxstyle="round,pad=0.1",
ec=(0., 0.5, 0.),
fc="none")
ax.add_patch(p_fancy)
logger.info("Finished processing simtb montage")
if out_file is not None:
logger.info("Saving montage to %s" % out_file)
plt.savefig(out_file)
else:
plt.draw()
| gpl-2.0 |
carlsonp/kaggle-TrulyNative | processURLS.py | 1 | 3720 | from __future__ import print_function
import re, os, sys, multiprocessing, zipfile, Queue
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from urlparse import urlparse
from py2neo import authenticate, Graph, Node, Relationship
#https://pypi.python.org/pypi/etaprogress/
from etaprogress.progress import ProgressBar
#337304 total HTML files, some are actually NOT in either the training or testing set
#process_zips = ["./data/0.zip", "./data/1.zip", "./data/2.zip", "./data/3.zip", "./data/4.zip"]
process_zips = ["./data/0.zip"]
def parseFile(contents, filename, sponsored):
nodes = [sponsored, filename]
#use lxml parser for faster speed
cleaned = BeautifulSoup(contents, "lxml")
for anchor in cleaned.findAll('a', href=True):
if anchor['href'].startswith("http"):
try:
parsedurl = urlparse(anchor['href'])
parsedurl = parsedurl.netloc.replace("www.", "", 1)
parsedurl = re.sub('[^0-9a-zA-Z\.]+', '', parsedurl) #remove non-alphanumeric and non-period literals
nodes.append(parsedurl)
except ValueError:
print("IPv6 URL?")
return nodes
def addNodes(nodes):
q.put("CREATE(f:files {filename: '"+nodes[1]+"'})")
if nodes[0] == 1:
q.put("MATCH (n:roots {type:'Sponsored'}) MATCH (f:files {filename:'"+nodes[1]+"'}) MERGE(n)-[:has]->(f)")
elif nodes[0] == 0:
q.put("MATCH (n:roots {type:'NotSponsored'}) MATCH (f:files {filename:'"+nodes[1]+"'}) MERGE(n)-[:has]->(f)")
else:
q.put("MATCH (n:roots {type:'Testing'}) MATCH (f:files {filename:'"+nodes[1]+"'}) MERGE(n)-[:has]->(f)")
for n in nodes[2:]:
q.put("MERGE (w:website {website:'"+n+"'})") #create website node if it doesn't already exist
q.put("MATCH (f:files {filename:'"+nodes[1]+"'}) MATCH (w:website {website:'"+n+"'}) MERGE(f)-[:links]->(w)")
train = pd.read_csv("./data/train.csv", header=0, delimiter=",", quoting=3)
sample = pd.read_csv("./data/sampleSubmission.csv", header=0, delimiter=",", quoting=3)
print("Starting processing...")
authenticate("localhost:7474", "neo4j", "neo4j") #username and password
graph = Graph() #by default, py2neo opens localhost
graph.delete_all() #deletes all nodes and edges (clears old data)
tx = graph.cypher.begin()
tx.append("CREATE(n:roots {type: 'Sponsored'})")
tx.append("CREATE(n:roots {type: 'NotSponsored'})")
tx.append("CREATE(n:roots {type: 'Testing'})")
tx.commit()
q = Queue.Queue()
for i, zipFile in enumerate(process_zips):
archive = zipfile.ZipFile(zipFile, 'r')
file_paths = zipfile.ZipFile.namelist(archive)
bar = ProgressBar(len(file_paths), max_width=40)
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()-1 or 1)
for k, file_path in enumerate(file_paths):
data = archive.read(file_path)
openfile = file_path[2:] #filename
sponsored = train.loc[train['file'] == openfile]
if not sponsored.empty:
pool.apply_async(parseFile, args = (data, openfile, int(sponsored['sponsored']), ), callback = addNodes)
testing = sample.loc[sample['file'] == openfile]
if not testing.empty:
pool.apply_async(parseFile, args = (data, openfile, 2, ), callback = addNodes)
if k > 10:
tx = graph.cypher.begin()
while not q.empty():
tx.append(q.get())
tx.commit()
bar.numerator = k
print("Folder:", i, bar, end='\r')
sys.stdout.flush()
pool.close()
pool.join()
tx = graph.cypher.begin()
while not q.empty():
tx.append(q.get())
tx.commit()
print()
#print("Sponsored pages: ", G.out_degree("SPONSORED"))
#print("Normal pages: ", G.out_degree("NOTSPONSORED"))
#if G.out_degree("TESTING") != 235917:
#print("Error, invalid number of testing nodes.")
#if G.out_degree("SPONSORED") + G.out_degree("NOTSPONSORED") != 101107:
#print("Error, invalid number of training nodes.")
| gpl-3.0 |
cython-testbed/pandas | pandas/tests/test_algos.py | 1 | 66798 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
import struct
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com.asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self, writable):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
data.setflags(write=writable)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000',
'2015-01-01T00:00:00.000000000',
'2015-01-01T00:00:00.000000000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
duplicated_items = ['a', np.nan, 'c', 'c']
result = pd.unique(duplicated_items)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
def test_obj_none_preservation(self):
# GH 20866
arr = np.array(['foo', None], dtype=object)
result = pd.unique(arr)
expected = np.array(['foo', None], dtype=object)
tm.assert_numpy_array_equal(result, expected, strict_nan=True)
def test_signed_zero(self):
# GH 21866
a = np.array([-0.0, 0.0])
result = pd.unique(a)
expected = np.array([-0.0]) # 0.0 and -0.0 are equivalent
tm.assert_numpy_array_equal(result, expected)
def test_different_nans(self):
# GH 21866
# create different nans from bit-patterns:
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
a = np.array([NAN1, NAN2]) # NAN1 and NAN2 are equivalent
result = pd.unique(a)
expected = np.array([np.nan])
tm.assert_numpy_array_equal(result, expected)
def test_first_nan_kept(self):
# GH 22295
# create different nans from bit-patterns:
bits_for_nan1 = 0xfff8000000000001
bits_for_nan2 = 0x7ff8000000000001
NAN1 = struct.unpack("d", struct.pack("=Q", bits_for_nan1))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", bits_for_nan2))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
for el_type in [np.float64, np.object]:
a = np.array([NAN1, NAN2], dtype=el_type)
result = pd.unique(a)
assert result.size == 1
# use bit patterns to identify which nan was kept:
result_nan_bits = struct.unpack("=Q",
struct.pack("d", result[0]))[0]
assert result_nan_bits == bits_for_nan1
def test_do_not_mangle_na_values(self, unique_nulls_fixture,
unique_nulls_fixture2):
# GH 22295
if unique_nulls_fixture is unique_nulls_fixture2:
return # skip it, values not unique
a = np.array([unique_nulls_fixture,
unique_nulls_fixture2], dtype=np.object)
result = pd.unique(a)
assert result.size == 2
assert a[0] is unique_nulls_fixture
assert a[1] is unique_nulls_fixture2
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), {1})
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), {'a'})
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
def test_same_nan_is_in(self):
# GH 22160
# nan is special, because from " a is b" doesn't follow "a == b"
# at least, isin() should follow python's "np.nan in [nan] == True"
# casting to -> np.float64 -> another float-object somewher on
# the way could lead jepardize this behavior
comps = [np.nan] # could be casted to float64
values = [np.nan]
expected = np.array([True])
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(expected, result)
def test_same_object_is_in(self):
# GH 22160
# there could be special treatment for nans
# the user however could define a custom class
# with similar behavior, then we at least should
# fall back to usual python's behavior: "a in [a] == True"
class LikeNan(object):
def __eq__(self):
return False
def __hash__(self):
return 0
a, b = LikeNan(), LikeNan()
# same object -> True
tm.assert_numpy_array_equal(algos.isin([a], [a]), np.array([True]))
# different objects -> False
tm.assert_numpy_array_equal(algos.isin([a], [b]), np.array([False]))
def test_different_nans(self):
# GH 22160
# all nans are handled as equivalent
comps = [float('nan')]
values = [float('nan')]
assert comps[0] is not values[0] # different nan-objects
# as list of python-objects:
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(np.array([True]), result)
# as object-array:
result = algos.isin(np.asarray(comps, dtype=np.object),
np.asarray(values, dtype=np.object))
tm.assert_numpy_array_equal(np.array([True]), result)
# as float64-array:
result = algos.isin(np.asarray(comps, dtype=np.float64),
np.asarray(values, dtype=np.float64))
tm.assert_numpy_array_equal(np.array([True]), result)
def test_no_cast(self):
# GH 22160
# ensure 42 is not casted to a string
comps = ['ss', 42]
values = ['42']
expected = np.array([False, False])
result = algos.isin(comps, values)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
def test_different_nan_objects(self):
# GH 22119
comps = np.array(['nan', np.nan * 1j, float('nan')], dtype=np.object)
vals = np.array([float('nan')], dtype=np.object)
expected = np.array([False, False, True])
result = algos.isin(comps, vals)
tm.assert_numpy_array_equal(expected, result)
def test_different_nans_as_float64(self):
# GH 21866
# create different nans from bit-patterns,
# these nans will land in different buckets in the hash-table
# if no special care is taken
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
# check that NAN1 and NAN2 are equivalent:
arr = np.array([NAN1, NAN2], dtype=np.float64)
lookup1 = np.array([NAN1], dtype=np.float64)
result = algos.isin(arr, lookup1)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
lookup2 = np.array([NAN2], dtype=np.float64)
result = algos.isin(arr, lookup2)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category')]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category')]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([pd.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([pd.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category'),
Index(case, dtype=object)]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category'),
Series(case, dtype=object)]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
assert case.is_unique
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
@pytest.mark.parametrize('arr, unique', [
([(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],
[(0, 0), (0, 1), (1, 0), (1, 1)]),
([('b', 'c'), ('a', 'b'), ('a', 'b'), ('b', 'c')],
[('b', 'c'), ('a', 'b')]),
([('a', 1), ('b', 2), ('a', 3), ('a', 1)],
[('a', 1), ('b', 2), ('a', 3)]),
])
def test_unique_tuples(self, arr, unique):
# https://github.com/pandas-dev/pandas/issues/16519
expected = np.empty(len(unique), dtype=object)
expected[:] = unique
result = pd.unique(arr)
tm.assert_numpy_array_equal(result, expected)
class GroupVarTestMixin(object):
def test_group_var_generic_1d(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3, )).astype('int64')
expected_out = (np.squeeze(values)
.reshape((5, 3), order='F')
.std(axis=1, ddof=1) ** 2)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype='int64')
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype='int64')
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.vstack([values[:, 0]
.reshape(5, 2, order='F')
.std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5)]).T.astype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, check_less_precise=6)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float64
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float32
dtype = np.float32
rtol = 1e-2
class TestHashTable(object):
def test_lookup_nan(self, writable):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
# GH 21688 ensure we can deal with readonly memory views
xs.setflags(write=writable)
m = ht.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_add_signed_zeros(self):
# GH 21866 inconsistent hash-function for float64
# default hash-function would lead to different hash-buckets
# for 0.0 and -0.0 if there are more than 2^30 hash-buckets
# but this would mean 16GB
N = 4 # 12 * 10**8 would trigger the error, if you have enough memory
m = ht.Float64HashTable(N)
m.set_item(0.0, 0)
m.set_item(-0.0, 0)
assert len(m) == 1 # 0.0 and -0.0 are equivalent
def test_add_different_nans(self):
# GH 21866 inconsistent hash-function for float64
# create different nans from bit-patterns:
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
# default hash function would lead to different hash-buckets
# for NAN1 and NAN2 even if there are only 4 buckets:
m = ht.Float64HashTable()
m.set_item(NAN1, 0)
m.set_item(NAN2, 0)
assert len(m) == 1 # NAN1 and NAN2 are equivalent
def test_lookup_overflow(self, writable):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
# GH 21688 ensure we can deal with readonly memory views
xs.setflags(write=writable)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_get_unique(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
def test_vector_resize(self, writable):
# Test for memory errors after internal vector
# reallocations (pull request #7157)
def _test_vector_resize(htable, uniques, dtype, nvals, safely_resizes):
vals = np.array(np.random.randn(1000), dtype=dtype)
# GH 21688 ensure we can deal with readonly memory views
vals.setflags(write=writable)
# get_labels may append to uniques
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array() set an external_view_exists flag on uniques.
tmp = uniques.to_array()
oldshape = tmp.shape
# subsequent get_labels() calls can no longer append to it
# (for all but StringHashTables + ObjectVector)
if safely_resizes:
htable.get_labels(vals, uniques, 0, -1)
else:
with pytest.raises(ValueError) as excinfo:
htable.get_labels(vals, uniques, 0, -1)
assert str(excinfo.value).startswith('external reference')
uniques.to_array() # should not raise here
assert tmp.shape == oldshape
test_cases = [
(ht.PyObjectHashTable, ht.ObjectVector, 'object', False),
(ht.StringHashTable, ht.ObjectVector, 'object', True),
(ht.Float64HashTable, ht.Float64Vector, 'float64', False),
(ht.Int64HashTable, ht.Int64Vector, 'int64', False),
(ht.UInt64HashTable, ht.UInt64Vector, 'uint64', False)]
for (tbl, vect, dtype, safely_resizes) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0, safely_resizes)
_test_vector_resize(tbl(), vect(), dtype, 10, safely_resizes)
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
class TestRank(object):
@td.skip_if_no_scipy
def test_scipy_compat(self):
from scipy.stats import rankdata
def _check(arr):
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d_float64(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = nan
assert_almost_equal(result, exp)
_check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
_check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))
def test_basic(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in np.typecodes['AllInteger']:
s = Series([1, 100], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with tm.assert_raises_regex(TypeError, msg):
algos.rank(arr)
def test_pad_backfill_object_segfault():
old = np.array([], dtype='O')
new = np.array([datetime(2010, 12, 31)], dtype='O')
result = libalgos.pad_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.pad_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_arrmap():
values = np.array(['foo', 'foo', 'bar', 'bar', 'baz', 'qux'], dtype='O')
result = libalgos.arrmap_object(values, lambda x: x in ['foo', 'bar'])
assert (result.dtype == np.bool_)
class TestTseriesUtil(object):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isna(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([0, 0, 1, 1, 1, 1,
2, 2, 2, 2, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([1, 4])
new = Index(lrange(5, 10))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([5, 10])
new = Index(lrange(5))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='int64'),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13,
12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10,
9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7,
6, 5,
4, 3, 2, 1, 0], dtype='int64')]
assert (not libalgos.is_lexsorted(failure))
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype(np.int64)
b = np.random.randint(0, 1000, 100).astype(np.int64)
result = libalgos.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
# np.argsort returns int, groupsort_indexer
# always returns int64
expected = np.argsort(a, kind='mergesort')
expected = expected.astype(np.int64)
tm.assert_numpy_array_equal(result, expected)
# compare with lexsort
# np.lexsort returns int, groupsort_indexer
# always returns int64
key = a * 1000 + b
result = libalgos.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
expected = expected.astype(np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_infinity_sort():
# GH 13445
# numpy's argsort can be unhappy if something is less than
# itself. Instead, let's give our infinities a self-consistent
# ordering, but outside the float extended real line.
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
assert all(Inf >= x for x in ref_nums)
assert all(Inf > x or x is Inf for x in ref_nums)
assert Inf >= Inf and Inf == Inf
assert not Inf < Inf and not Inf > Inf
assert libalgos.Infinity() == libalgos.Infinity()
assert not libalgos.Infinity() != libalgos.Infinity()
assert all(NegInf <= x for x in ref_nums)
assert all(NegInf < x or x is NegInf for x in ref_nums)
assert NegInf <= NegInf and NegInf == NegInf
assert not NegInf < NegInf and not NegInf > NegInf
assert libalgos.NegInfinity() == libalgos.NegInfinity()
assert not libalgos.NegInfinity() != libalgos.NegInfinity()
for perm in permutations(ref_nums):
assert sorted(perm) == ref_nums
# smoke tests
np.array([libalgos.Infinity()] * 32).argsort()
np.array([libalgos.NegInfinity()] * 32).argsort()
def test_infinity_against_nan():
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
assert not Inf > np.nan
assert not Inf >= np.nan
assert not Inf < np.nan
assert not Inf <= np.nan
assert not Inf == np.nan
assert Inf != np.nan
assert not NegInf > np.nan
assert not NegInf >= np.nan
assert not NegInf < np.nan
assert not NegInf <= np.nan
assert not NegInf == np.nan
assert NegInf != np.nan
def test_ensure_platform_int():
arr = np.arange(100, dtype=np.intp)
result = libalgos.ensure_platform_int(arr)
assert (result is arr)
def test_int64_add_overflow():
# see gh-14068
msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
n = np.iinfo(np.int64).min
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), m)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), n)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
with tm.assert_produces_warning(RuntimeWarning):
algos.checked_add_with_arr(np.array([m, m]),
np.array([np.nan, m]))
# Check that the nan boolean arrays override whether or not
# the addition overflows. We don't check the result but just
# the fact that an OverflowError is not raised.
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, False]),
b_mask=np.array([False, True]))
class TestMode(object):
def test_no_mode(self):
exp = Series([], dtype=np.float64)
tm.assert_series_equal(algos.mode([]), exp)
def test_mode_single(self):
# GH 15714
exp_single = [1]
data_single = [1]
exp_multi = [1]
data_multi = [1, 1]
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1], dtype=np.int)
tm.assert_series_equal(algos.mode([1]), exp)
exp = Series(['a', 'b', 'c'], dtype=np.object)
tm.assert_series_equal(algos.mode(['a', 'b', 'c']), exp)
def test_number_mode(self):
exp_single = [1]
data_single = [1] * 5 + [2] * 3
exp_multi = [1, 3]
data_multi = [1] * 5 + [2] * 3 + [3] * 5
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_strobj_mode(self):
exp = ['b']
data = ['a'] * 2 + ['b'] * 3
s = Series(data, dtype='c')
exp = Series(exp, dtype='c')
tm.assert_series_equal(algos.mode(s), exp)
exp = ['bar']
data = ['foo'] * 2 + ['bar'] * 3
for dt in [str, object]:
s = Series(data, dtype=dt)
exp = Series(exp, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_datelike_mode(self):
exp = Series(['1900-05-03', '2011-01-03',
'2013-01-02'], dtype="M8[ns]")
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]')
s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
'2011-01-03', '2013-01-02'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_timedelta_mode(self):
exp = Series(['-1 days', '0 days', '1 days'],
dtype='timedelta64[ns]')
s = Series(['1 days', '-1 days', '0 days'],
dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_mixed_dtype(self):
exp = Series(['foo'])
s = Series([1, 'foo', 'foo'])
tm.assert_series_equal(algos.mode(s), exp)
def test_uint64_overflow(self):
exp = Series([2**63], dtype=np.uint64)
s = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1, 2**63], dtype=np.uint64)
s = Series([1, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
def test_categorical(self):
c = Categorical([1, 2])
exp = c
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 'a', 'a'])
exp = Categorical(['a'], categories=[1, 'a'])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 1, 2, 3, 3])
exp = Categorical([1, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
def test_index(self):
idx = Index([1, 2, 3])
exp = Series([1, 2, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 'a', 'a'])
exp = Series(['a'], dtype=object)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 1, 2, 3, 3])
exp = Series([1, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
idx = Index(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(idx), exp)
| bsd-3-clause |
Lawrence-Liu/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
costypetrisor/scikit-learn | sklearn/datasets/__init__.py | 74 | 3616 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
luoshao23/ML_algorithm | Naive_Bayes/naive_bayes.py | 1 | 5550 | from abc import abstractclassmethod
from collections import defaultdict
import warnings
import numpy as np
from scipy.special import logsumexp
from sklearn.utils import check_X_y
from sklearn.preprocessing import LabelBinarizer
class Naive_Bayes(object):
"""Naive Bayes classifier
The input and the target is no need to binarized.
"""
def __init__(self, alpha=0):
self._unique_labels = None
self._alpha = alpha
def fit(self, X, y):
X, y = check_X_y(X, y)
self._n_feature = X.shape[1]
self._unique_labels, self._class_counts = np.unique(
y, return_counts=True)
self._class_prior = (self._class_counts + self._alpha) / (self._class_counts.sum() + len(self._unique_labels))
self._count(X, y)
def _count(self, X, y):
self._matrix = [defaultdict(int) for _ in range(self._n_feature)]
for j in range(self._n_feature):
labels, counts = np.unique(
np.c_[X[:, j], y], return_counts=True, axis=0)
s = len(set(X[:, j]))
for l, c in zip(labels, counts):
self._matrix[j][tuple(l)] = (c + self._alpha) / (self._class_counts[self._unique_labels == l[1]][0] + s)
def predict(self, X, y=None):
n_samples, n_features = X.shape
assert n_features == self._n_feature
res = np.empty(n_samples)
for i in range(n_samples):
proba = self._class_prior
for j in range(n_features):
for c in range(len(self._unique_labels)):
proba[c] *= self._matrix[j][(X[i, j],
self._unique_labels[c])]
res[i] = self._unique_labels[np.argmax(proba)]
return res
class BaseNB(object):
@abstractclassmethod
def _joint_log_likelihood(self, X):
"""log P(c) + log P(X|c)
"""
def predict(self, X):
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
jll = self._joint_log_likelihood(X)
return jll - logsumexp(jll, axis=1, keepdims=True)
def predict_proba(self, X):
return np.exp(self.predict_log_proba(X))
_ALPHA_MIN = 1e-10
class BaseDiscreteNB(BaseNB):
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError('Number dismatch.')
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
log_class_count = np.log(self.class_count_)
self.class_log_prior_ = log_class_count - np.log(self.class_count_.sum())
else:
self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
def _check_alpha(self):
if np.min(self.alpha) < 0:
raise ValueError('Alpha should be > 0.')
if isinstance(self.alpha, np.ndarray):
if not self.alpha.shape[0] == self.feature_count_.shape[1]:
raise ValueError("alpha should be a scalar or an array with shape [n_features]")
if np.min(self.alpha) < _ALPHA_MIN:
warnings.warn("alpha too samll, setting alpha = %.1e" % _ALPHA_MIN)
return np.maximum(self.alpha, _ALPHA_MIN)
return self.alpha
def fit(self, X, y):
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
# in case of binary label, turn it into shape [n_sample, 2]
if Y.shape[1] == 1:
Y = np.concatenate([1 - Y, Y], axis=1)
class_prior = self.class_prior
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros(
(n_effective_classes, n_features), dtype=np.float64)
self._count(X, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
class MultinomialNB(BaseDiscreteNB):
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
# self.feature_count with shape [n_classes, n_features]
# self.class_count_ with shape [n_classes]
if np.any(X < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += np.dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
return np.dot(X, self.feature_log_prob.T) + self.class_log_prior_
if __name__ == "__main__":
# x1 = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]
# x2 = [3, 5, 5, 3, 3, 3, 5, 5, 7, 7, 7, 5, 5, 7, 7]
# y = [-1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1]
x1 = [3, 2, 5]
x2 = [4, 1, 8]
y = [-1, 1, -1]
X = np.vstack([x1, x2]).T
# print(X)
nb = Naive_Bayes(1)
nb.fit(X, y)
print(nb.predict(np.array([[2, 3], [3, 5]])))
mnb = MultinomialNB()
mnb.fit(X, y)
print(mnb.predict(np.array([[8, 3], [3, 5]])))
| mit |
awanke/bokeh | bokeh/crossfilter/models.py | 40 | 30635 | from __future__ import absolute_import
import logging
import six
import pandas as pd
import numpy as np
from ..plotting import curdoc
from ..models import ColumnDataSource, GridPlot, Panel, Tabs, Range
from ..models.widgets import Select, MultiSelect, InputWidget
# crossfilter plotting utilities
from .plotting import make_histogram_source, make_histogram, cross, hide_axes
from .plugins import CrossScatterPlugin, CrossBarPlugin, CrossLinePlugin
# bokeh plotting functions
from ..plot_object import PlotObject
from ..properties import Dict, Enum, Instance, List, String, Any, Int
logger = logging.getLogger(__name__)
class DiscreteFacet(object):
"""Pairing of a field and a unique value, representing a subset of the
total data."""
def __init__(self, field, value, label=None):
"""Sets object properties and creates label if not provided.
Args:
field (str): name of the column
value: unique value defined for the column
label (str, optional): string representation of the value
"""
if label is None:
label = str(value)
self.field = field
self.label = label
self._value = value
def __repr__(self):
return "%s:%s"%(self.field, self.label)
def filter(self, df):
"""Filters the provided DataFrame to the subset corresponding to value.
Args:
df (DataFrame): contains a column of ``field``
Returns:
DataFrame: filtered to rows, where column ``field`` has values
equal to ``_value``.
"""
return df[df[self.field] == self._value]
class ContinuousFacet(DiscreteFacet):
"""Represents a range of values for a field in a DataFrame."""
def __init__(self, field, value, bins, label=None):
"""Calls parent ``DiscreteFacet`` and stores bins for later filtering.
Args:
field (str): name of the column
value (str): center of range of values in the column
bins (list[float]): start and inclusive stop value for the bin
label (str, optional): string representation
"""
super(ContinuousFacet, self).__init__(field, value, label=label)
self.bins = bins
def filter(self, df):
"""Filters the provided DataFrame to the subset corresponding to bins.
Args:
df (DataFrame): contains a column of ``field``
Returns:
DataFrame: filtered to rows, where column ``field`` has values
within the bounds of ``bins``.
"""
if self.bins[0] is not None:
df = df[df[self.field] > self.bins[0]]
if self.bins[1] is not None:
df = df[df[self.field] <= self.bins[1]]
return df
class CrossFilter(PlotObject):
"""Interactive filtering and faceting application with multiple plot types"""
# identify properties for the data
columns = List(Dict(String, Any))
data = Instance(ColumnDataSource)
filtered_data = Instance(ColumnDataSource)
# list of datasources to use for filtering widgets
filter_sources = Dict(String, Instance(ColumnDataSource))
# list of columns we are filtering
filtering_columns = List(String)
# dict of column name to filtering widgets
filter_widgets = Dict(String, Instance(PlotObject))
# dict which aggregates all the selections from the different filtering
# widgets
filtered_selections = Dict(String, Dict(String, Any))
# list of facet vars
facet_x = List(String, default=[])
facet_y = List(String, default=[])
facet_tab = List(String, default=[])
# the displayed plot object
plot = Instance(PlotObject)
x_range = Instance(Range)
y_range = Instance(Range)
# configuration properties for the plot
plot_type = Enum("line", "scatter", "bar")
plot_map = {'line': CrossLinePlugin,
'scatter': CrossScatterPlugin,
'bar': CrossBarPlugin}
x = String
y = String
agg = String
color = String
title = String
height = Int()
width = Int()
# identify the selector/drop-down properties
plot_selector = Instance(Select)
x_selector = Instance(Select)
y_selector = Instance(Select)
agg_selector = Instance(Select)
def __init__(self, *args, **kwargs):
"""Creates original and filtered ColumnDataSource and handles defaults.
The df and starting configuration are only provided the first time
init is called, within the create method.
Kwargs:
df (DataFrame): the data to use in the crossfilter app
plot_type (str, optional): starting plot type
agg (str, optional): starting aggregation type
"""
if 'df' in kwargs:
self._df = kwargs.pop('df')
# initialize a "pure" and filtered data source based on df
kwargs['data'] = ColumnDataSource(data=self.df)
kwargs['filtered_data'] = ColumnDataSource(data=self.df)
# default plot type
if 'plot_type' not in kwargs:
kwargs['plot_type'] = "scatter"
# default aggregation type
if 'agg' not in kwargs:
kwargs['agg'] = 'sum'
if 'plot_map' in kwargs:
self.plot_map = kwargs.pop('plot_map')
super(CrossFilter, self).__init__(**kwargs)
@classmethod
def create(cls, **kwargs):
"""Performs all one-time construction of bokeh objects.
This classmethod is required due to the way that bokeh handles the
python and javascript components. The initialize method will be
called each additional time the app is updated (including once in
the create method), but the PlotObject infrastructure will find that
the object already exists in any future calls, and will not create a
new object.
Kwargs:
df (DataFrame): the data to use in the crossfilter app
plot_type (str, optional): starting plot type
agg (str, optional): starting aggregation type
"""
obj = cls(**kwargs)
obj.set_metadata()
choices = obj.make_plot_choices()
obj.update_plot_choices(choices)
obj.set_plot()
obj.set_input_selector()
return obj
def set_input_selector(self):
"""Creates and configures each selector (drop-down menu)."""
col_names = [x['name'] for x in self.columns]
col_names.append('None')
self.plot_selector = Select.create(
title="PlotType",
name="plot_type",
value=self.plot_type,
options=["line", "scatter", "bar"],
)
self.x_selector = Select.create(
name="x",
value=self.x,
options=col_names,
)
self.y_selector = Select.create(
name="y",
value=self.y,
options=col_names,
)
self.agg_selector = Select.create(
name='agg',
value=self.agg,
options=['sum', 'mean', 'last', 'count', 'percent'],
)
def update_plot_choices(self, input_dict):
"""Sets object attributes corresponding to input_dict's values.
Args:
input_dict (dict): dict with x, y, and plot_type keys
"""
for k, v in input_dict.items():
if getattr(self, k) is None:
setattr(self, k, v)
def get_plot_class(self):
"""Return the class for the current plot selection."""
return self.plot_map[self.plot_type]
def column_descriptor_dict(self):
"""Creates column stats dict with keys of column names.
Returns:
dict: dict with key per column in data, where values are column stats
"""
column_descriptors = {}
for x in self.columns:
column_descriptors[x['name']] = x
return column_descriptors
@property
def continuous_columns(self):
"""Returns list of column descriptors for the non-Discrete columns.
Returns:
list(dict): list of dicts, containing metadata about columns
"""
return [x for x in self.columns if x['type'] != 'DiscreteColumn']
@property
def discrete_columns(self):
"""Returns list of column descriptors for the Discrete columns.
Returns:
list(dict): list of dicts, containing metadata about columns
"""
return [x for x in self.columns if x['type'] == 'DiscreteColumn']
def make_plot_choices(self):
"""Selects first two continuous columns for x,y during initial setup
Returns:
dict: x, y, and plot_type keys and values for initial setup
"""
# prefer continuous columns to initialize with, otherwise use what we have
if len(self.continuous_columns) > 1:
x, y = [x['name'] for x in self.continuous_columns[:2]]
else:
x, y = [x['name'] for x in self.columns[:2]]
return {'x': x, 'y': y, 'plot_type': 'scatter'}
def set_plot(self):
"""Makes and sets the plot based on the current configuration of app."""
self.update_xy_ranges(source=self.df)
plot = self.make_plot()
self.plot = plot
curdoc()._add_all()
def make_plot(self):
"""Makes the correct plot layout type, based on app's current config.
Returns:
PlotObject: one plot, grid of plots, or tabs of plots/grids of plots
"""
if self.facet_tab:
facets = self.make_facets(dimension='tab')
# generate a list of panels, containing plot/plots for each facet
tabs = [self.make_tab(content=self.create_plot_page(
tab_facet=facet), tab_label=self.facet_title(facet)) for facet
in facets]
return Tabs(tabs=tabs)
else:
return self.create_plot_page()
def create_plot_page(self, tab_facet=None):
"""Generates a single visible page of a plot or plots.
Args:
tab_facet (DiscreteFacet or ContinuousFacet): a facet to filter on
Returns:
PlotObject: a single or grid of plots
"""
# no faceting
if all([len(self.facet_x) == 0,
len(self.facet_y) == 0]):
plot_page = self.make_single_plot(facet=tab_facet)
# x xor y faceting
if all([(len(self.facet_x) != 0) ^ (len(self.facet_y) != 0)]):
plot_page = self.make_1d_facet_plot(facet=tab_facet)
# x and y faceting
if all([len(self.facet_x) != 0,
len(self.facet_y) != 0]):
plot_page = self.make_2d_facet_plot(facet=tab_facet)
if isinstance(plot_page, GridPlot):
self.apply_grid_style(plot_page)
return plot_page
@staticmethod
def make_tab(content, tab_label):
"""Creates a container for the contents of a tab.
Args:
content (PlotObject): the primary content of the tab
tab_label (str): the text to place in the tab
Returns:
Panel: represents a single tab in a group of tabs
"""
return Panel(child=content, title=tab_label)
def make_facets(self, dimension):
"""Creates combination of all facets for the provided dimension
Args:
dimension (str): name of the dimension to create facets for
Returns:
list(list(DiscreteFacet or ContinuousFacet)): list of list of
unique facet combinations
"""
if dimension == 'x':
facets = self.facet_x
elif dimension == 'y':
facets = self.facet_y
else:
facets = self.facet_tab
# create facets for each column
column_descriptor_dict = self.column_descriptor_dict()
all_facets = [[]]
for field in facets:
# create facets from discrete columns
if column_descriptor_dict[field]['type'] == 'DiscreteColumn':
field_facets = [DiscreteFacet(field, val) for val in
np.unique(self.df[field].values)]
# combine any facets as required
all_facets = cross(all_facets, field_facets)
else:
# create quantile based discrete data and pairs of bins
categorical, bins = pd.qcut(self.df[field], 4, retbins=True)
cats = categorical.cat.categories
bins = [[bins[idx], bins[idx + 1]] for idx in
range(len(bins) - 1)]
bins[0][0] = None
# create list of facets
field_facets = [ContinuousFacet(field, value, bin) for
bin, value in zip(bins, cats)]
# combine any facets as required
all_facets = cross(all_facets, field_facets)
return all_facets
@staticmethod
def facet_title(facets):
"""Joins list of facets by commas.
Args:
facets (list(DiscreteFacet or ContinuousFacet)): list of facets,
which are a combination of column and unique value within it
Returns:
str: string representation of the combination of facets
"""
title = ",".join([str(x) for x in facets])
return title
def facet_data(self, facets, df=None):
"""Filters data to the rows associated with the given facet.
Args:
facets (list(DiscreteFacet or ContinuousFacet)): list of facets,
which are a combination of column and unique value within it
df (DataFrame, optional): data to be filtered on
Returns:
DataFrame: filtered DataFrame based on provided facets
"""
if df is None:
df = self.filtered_df
for f in facets:
df = f.filter(df)
return df
def make_1d_facet_plot(self, facet=None):
"""Creates the faceted plots when a facet is added to the x axis.
Returns:
GridPlot: a grid of plots, where each plot has subset of data
"""
if self.facet_x:
all_facets = self.make_facets('x')
else:
all_facets = self.make_facets('y')
plots = []
# loop over facets and create single plots for data subset
for facets in all_facets:
title = self.facet_title(facets)
if facet:
facets += facet
df = self.facet_data(facets, self.filtered_df)
plot = self.make_single_plot(
df=df, title=title, plot_height=200, plot_width=200,
tools="pan,wheel_zoom,reset", facet=facets
)
# append single plot to list of plots
plots.append(plot)
# create squarish grid based on number of plots
chunk_size = int(np.ceil(np.sqrt(len(plots))))
# create list of lists of plots, where each list of plots is a row
grid_plots = []
for i in range(0, len(plots), chunk_size):
chunk = plots[i:i + chunk_size]
grid_plots.append(chunk)
self.hide_internal_axes(grid_plots)
# return the grid as the plot
return GridPlot(children=grid_plots, plot_width=200*chunk_size)
def make_2d_facet_plot(self, facet=None):
"""Creates the grid of plots when there are both x and y facets.
Returns:
GridPlot: grid of x and y facet combinations
"""
# ToDo: gracefully handle large combinations of facets
all_facets_x = self.make_facets('x')
all_facets_y = self.make_facets('y')
grid_plots = []
# y faceting down column
for facets_y in all_facets_y:
# x faceting across row
row = []
for facets_x in all_facets_x:
# build the facets and title
facets = facets_x + facets_y
title = self.facet_title(facets)
# must filter by any extra facets provided for facet tab
if facet:
filter_facets = facets + facet
else:
filter_facets = facets
df = self.facet_data(filter_facets, self.filtered_df)
plot = self.make_single_plot(
df=df, title=title, plot_height=200, plot_width=200,
tools="pan,wheel_zoom,reset", facet=facets
)
row.append(plot)
# append the row to the list of rows
grid_plots.append(row)
self.hide_internal_axes(grid_plots)
# return the grid of plots as the plot
return GridPlot(children=grid_plots, plot_width=200*len(all_facets_x))
@staticmethod
def apply_facet_style(plot):
"""Applies facet-specific style for a given plot.
Override this method to modify the look of a customized CrossFilter
for all plugins. Or, apply custom styles in the plugin, since the
plugin will be told if it is currently being faceted.
"""
plot.title_text_font_size = "9pt"
plot.min_border = 0
def apply_single_plot_style(self, plot):
"""Applies styles when we have only one plot.
Override this method to modify the look of a customized CrossFilter
for all plugins.
"""
plot.min_border_left = 60
def apply_grid_style(self, grid_plot):
"""Applies facet-specific style for the grid of faceted plots.
Override this method to modify the look of a customized CrossFilter
for all plugins. Or, apply custom styles in the plugin, since the
plugin will be told if it is currently being faceted.
"""
grid_plot.title_text_font_size = "12pt"
grid_plot.title_text_font_style = "bold"
grid_plot.title = self.title
@staticmethod
def hide_internal_axes(grid_plots):
"""Hides the internal axes for a grid of plots.
Args:
grid_plots (list(list(Figure))): list of rows (list), containing plots
"""
for i, row in enumerate(grid_plots):
is_bottom = i + 1 == len(grid_plots)
for j, plot in enumerate(row):
if j != 0:
if is_bottom:
hide_axes(plot, axes='y')
else:
hide_axes(plot)
elif j == 0 and not is_bottom:
hide_axes(plot, axes='x')
def make_single_plot(self, df=None, title=None,
plot_width=700,
plot_height=680,
tools="pan,wheel_zoom,box_zoom,save,resize,"
"box_select,reset",
facet=None):
"""Creates a plot based on the current app configuration.
Args:
df (DataFrame, optional): data to use for the plot
title (str, optional): plot title
plot_width (float, optional): width of plot in pixels
plot_height (float, optional): height of plot in pixels
tools (str, optional): comma separated string of tool names
Returns:
PlotObject: the generated plot
"""
faceting = False
# df is not provided when we are not faceting
if df is None:
source = self.filtered_data
else:
df = self.facet_data(facets=facet, df=df)
# create column data source with filtered df
source = ColumnDataSource(data=df)
faceting = True
# check for tab faceting and filter if provided
if facet:
df = self.facet_data(facets=facet, df=df)
source = ColumnDataSource(data=df)
# get the helper class for the plot type selected
plot_class = self.get_plot_class()
# initialize the plugin class
plugin = plot_class(source=source,
title_text_font_size="12pt",
title_text_font_style = "bold",
plot_height=plot_height,
plot_width=plot_width,
tools=tools,
title=title,
x_range=self.x_range,
y_range=self.y_range,
facet=faceting,
crossfilter=self)
# generate plot
plot = plugin.get_plot()
# apply faceting-specific styling if required
if facet:
self.apply_facet_style(plot)
self.title = plugin.title
else:
self.apply_single_plot_style(plot)
self.title = plot.title
return plot
def update_xy_ranges(self, source):
"""Updates common x_range, y_range to use for creating figures.
Args:
source (ColumnDataSource): the source to return correct range for
"""
plt_cls = self.get_plot_class()
x_range, y_range = plt_cls.make_xy_ranges(cf=self)
# store x and y range from the plot class
self.x_range = x_range
self.y_range = y_range
def plot_attribute_change(self, obj, attrname, old, new):
"""Updates app's attribute and plot when view configuration changes.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
setattr(self, obj.name, new)
self.set_plot()
def facet_change(self, obj, attrname, old, new):
"""Updates plot when any facet configuration changes.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
self.set_plot()
@property
def df(self):
"""The core data that is used by the app for plotting.
Returns:
DataFrame: the original data structure
"""
if hasattr(self, '_df'):
return self._df
else:
if self.data:
return self.data.to_df()
@property
def filtered_df(self):
"""The subset of the data to use for plotting.
Returns:
DataFrame: the original data structure
"""
if hasattr(self, '_df'):
return self._df
else:
if self.filtered_data:
return self.filtered_data.to_df()
def update(self, **kwargs):
"""Updates CrossFilter attributes each time the model changes.
The events are setup each time so that we can add event handlers to
the selection/filtering widgets as they are added.
"""
super(CrossFilter, self).update(**kwargs)
self.setup_events()
def setup_events(self):
"""Registers events each time the app changes state."""
# watch the app's filtering_columns attribute to setup filters
self.on_change('filtering_columns', self, 'setup_filter_widgets')
# register any available filter widget
for obj in self.filter_widgets.values():
if isinstance(obj, InputWidget):
obj.on_change('value', self, 'handle_filter_selection')
# watch app column data source attribute for changes
for obj in self.filter_sources.values():
obj.on_change('selected', self, 'handle_filter_selection')
# selector event registration
if self.plot_selector:
self.plot_selector.on_change('value', self, 'plot_attribute_change')
if self.x_selector:
self.x_selector.on_change('value', self, 'plot_attribute_change')
if self.y_selector:
self.y_selector.on_change('value', self, 'plot_attribute_change')
if self.agg_selector:
self.agg_selector.on_change('value', self, 'plot_attribute_change')
# register to watch the app's facet attributes
self.on_change('facet_x', self, 'facet_change')
self.on_change('facet_y', self, 'facet_change')
self.on_change('facet_tab', self, 'facet_change')
def handle_filter_selection(self, obj, attrname, old, new):
"""Filters the data source whenever a filter widget changes.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
df = self.df
# loop over the column metadata
for descriptor in self.columns:
colname = descriptor['name']
# handle discrete selections
if descriptor['type'] == 'DiscreteColumn' and \
colname in self.filter_widgets:
selected = self.filter_widgets[colname].value
if not selected:
continue
if isinstance(selected, six.string_types):
df = df[colname == selected]
else:
df = df[np.in1d(df[colname], selected)]
# handle time or continuous selections
elif descriptor['type'] in ('TimeColumn', 'ContinuousColumn') and \
colname in self.filter_widgets:
obj = self.filter_sources[colname]
# hack because we don't have true range selection
if not obj.selected:
continue
# TODO: (bev) This works until CF selections are not made on
# [multi]lines and [multi]patches
min_idx = np.min(obj.selected['1d']['indices'])
max_idx = np.max(obj.selected['1d']['indices'])
min_val = obj.data['centers'][min_idx]
max_val = obj.data['centers'][max_idx]
df = df[(df[colname] >= min_val) & (df[colname] <= max_val)]
# update filtered data and force plot update
for colname in self.data.column_names:
self.filtered_data.data[colname] = df[colname]
self.filtered_data._dirty = True
self.set_plot()
def clear_selections(self, obj, attrname, old, new):
"""Updates filter widgets and sources as they are removed.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
diff = set(old) - set(new)
column_descriptor_dict = self.column_descriptor_dict()
# delete any removed filter widgets
if len(diff) > 0:
for col in diff:
metadata = column_descriptor_dict[col]
if metadata['type'] != 'DiscreteColumn':
del self.filter_sources[col]
del self.filter_widgets[col]
# update the data based on latest changes
if diff:
self.handle_filter_selection(obj, attrname, old, new)
def setup_filter_widgets(self, obj, attrname, old, new):
"""Creates new filter widget each time a new column is added to filters.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
self.clear_selections(obj, attrname, old, new)
# add new widget as required for each column set to filter on
column_descriptor_dict = self.column_descriptor_dict()
for col in self.filtering_columns:
metadata = column_descriptor_dict[col]
if not col in self.filter_widgets:
# discrete
if metadata['type'] == 'DiscreteColumn':
select = MultiSelect.create(
name=col,
options=self.df[col].unique().tolist())
self.filter_widgets[col] = select
# continuous
else:
source = make_histogram_source(self.df[col])
self.filter_sources[col] = source
hist_plot = make_histogram(self.filter_sources[col],
plot_width=200, plot_height=100,
title_text_font_size='8pt',
tools='box_select'
)
hist_plot.title = col
self.filter_widgets[col] = hist_plot
curdoc()._add_all()
def set_metadata(self):
"""Creates a list of dicts, containing summary info for each column.
The descriptions are stored in the ``columns`` property.
"""
descriptors = []
columns = self.df.columns
for c in columns:
# get description for column from pandas DataFrame
desc = self.df[c].describe()
# DiscreteColumn
if self.df[c].dtype == object:
descriptors.append({
'type': "DiscreteColumn",
'name': c,
'count': desc['count'],
'unique': desc['unique'],
'top': desc['top'],
'freq': desc['freq'],
})
# TimeColumn
elif self.df[c].dtype == np.datetime64:
descriptors.append({
'type': "TimeColumn",
'name': c,
'count': desc['count'],
'unique': desc['unique'],
'first': desc['first'],
'last': desc['last'],
})
# ContinuousColumn
else:
descriptors.append({
'type': "ContinuousColumn",
'name': c,
'count': desc['count'],
'mean': "%.2f"%desc['mean'],
'std': "%.2f"%desc['std'],
'min': "%.2f"%desc['min'],
'max': "%.2f"%desc['max'],
})
self.columns = descriptors
| bsd-3-clause |
lamastex/scalable-data-science | db/week9/18_sparklingTensorFlow/034_SampleML_SparkTensorFlow.py | 2 | 18640 | # Databricks notebook source exported at Tue, 28 Jun 2016 09:51:56 UTC
# MAGIC %md
# MAGIC
# MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
# MAGIC
# MAGIC
# MAGIC ### prepared by [Paul Brouwers](https://www.linkedin.com/in/paul-brouwers-5365117a), [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand)
# MAGIC
# MAGIC *supported by* [](https://databricks.com/)
# MAGIC and
# MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome)
# COMMAND ----------
# MAGIC %md
# MAGIC The [html source url](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/db/week9/18_sparklingTensorFlow/034_SampleML_SparkTensorFlow.html) of this databricks notebook and its recorded Uji :
# MAGIC
# MAGIC [](https://www.youtube.com/v/iDyeK3GvFpo?rel=0&autoplay=1&modestbranding=1&start=4844)
# COMMAND ----------
# MAGIC %md
# MAGIC # Distributed labeling of images using TensorFlow
# MAGIC
# MAGIC **Home work** notebook for week 9.
# MAGIC
# MAGIC This is essentially a tested copy of the databricks community edition notebook:
# MAGIC
# MAGIC * [https://docs.cloud.databricks.com/docs/latest/sample_applications/index.html#Sample%20ML/SparkTensorFlow.html](https://docs.cloud.databricks.com/docs/latest/sample_applications/index.html#Sample%20ML/SparkTensorFlow.html)
# MAGIC
# MAGIC This tutorial shows how to run TensorFlow models using Spark and Databricks. At the end of this tutorial, you will be able to classify images on a Spark cluster, using a neural network.
# MAGIC
# MAGIC TensorFlow is a new framework released by Google for numerical computations and neural networks. TensorFlow models can directly be embedded within pipelines to perform complex recognition tasks on datasets. This tutorial shows how to label a set of images, from a stock neural network model that was already trained.
# MAGIC
# MAGIC If the **classClusterTensorFlow** cluster is running already then you can just attach this notebook to it and start carrying on with this tutorial.
# MAGIC
# MAGIC > This notebook should work on the cluster named **classClusterTensorFlow** on this shard (either attach your notebook to this cluster or create and attach to a cluster named classClusterTensorFlow as instructed in the companion notebook `033_SetupCluster_SparkTensorFlow`). If you want to run this script on a larger cluster, you need to follow the [setup instructions in this notebook](https://databricks-staging-cloudfront.staging.cloud.databricks.com/public/c65da9a2fa40e45a2028cddebe45b54c/8637560089690848/619805605040471/6977722904629137/d77d0d1390.html).
# MAGIC
# MAGIC This tutorial is adapted from the tutorial published by Google on the [official TensorFlow website](http://www.tensorflow.org).
# COMMAND ----------
# MAGIC %md
# MAGIC Let's sky-dive into [official TensorFlow website](http://www.tensorflow.org) to get a view from the "stratosphere" with enough oxygen :)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Installing TensorFlow
# MAGIC
# MAGIC The TensorFlow library needs to be installed directly on the nodes of the cluster. Running the next cell installs it on your cluster if it is not there already. Running this command may take one minute or more.
# COMMAND ----------
try:
import tensorflow as tf
print "TensorFlow is already installed"
except ImportError:
print "Installing TensorFlow"
import subprocess
subprocess.check_call(["/databricks/python/bin/pip", "install", "https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.6.0-cp27-none-linux_x86_64.whl"])
print "TensorFlow has been installed on this cluster"
# COMMAND ----------
# MAGIC %md
# MAGIC TensorFlow runs as a regular Python library. The following command runs a very simple TensorFlow program.
# COMMAND ----------
def my_function(i):
import tensorflow as tf
with tf.Session():
return tf.constant("Hello, TensorFlow!").eval()
print sc.parallelize(range(5)).map(my_function).collect()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Labeling images
# MAGIC
# MAGIC We are now going to take an existing neural network model that has already been trained on a large corpus (the Inception V3 model), and we are going to apply it to images downloaded from the internet.
# MAGIC
# MAGIC The code in the next cell contains some utility functions to download this model from the internet. For the purpose of this notebook it is not critical to understand what it is doing.
# COMMAND ----------
# Imports:
import numpy as np
import tensorflow as tf
import os
from tensorflow.python.platform import gfile
import os.path
import re
import sys
import tarfile
from subprocess import Popen, PIPE, STDOUT
from six.moves import urllib
def run(cmd):
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
return p.stdout.read()
from PIL import Image
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
# All the constants to run this notebook.
model_dir = '/tmp/imagenet'
image_file = ""
num_top_predictions = 5
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' # ?modify this to
IMAGES_INDEX_URL = 'http://image-net.org/imagenet_data/urls/imagenet_fall11_urls.tgz' # ? modify this
# The number of images to process.
image_batch_size = 10
max_content = 5000L
# Downloading functions:
def read_file_index():
"""Reads the index file from ImageNet (up to a limit),
and returns the content (pairs of image id, image url) grouped in small batches.
"""
from six.moves import urllib
content = urllib.request.urlopen(IMAGES_INDEX_URL)
data = content.read(max_content)
tmpfile = "/tmp/imagenet.tgz"
with open(tmpfile, 'wb') as f:
f.write(data)
run("tar -xOzf %s > /tmp/imagenet.txt" % tmpfile)
with open("/tmp/imagenet.txt", 'r') as f:
lines = [l.split() for l in f]
input_data = [tuple(elts) for elts in lines if len(elts) == 2]
return [input_data[i:i+image_batch_size] for i in range(0,len(input_data), image_batch_size)]
def load_lookup():
"""Loads a human readable English name for each softmax node.
Returns:
dict from integer node ID to human-readable string.
"""
label_lookup_path = os.path.join(model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
uid_lookup_path = os.path.join(model_dir, 'imagenet_synset_to_human_label_map.txt')
if not gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def maybe_download_and_extract():
"""Download and extract model tar file."""
from six.moves import urllib
dest_directory = model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
filepath2, _ = urllib.request.urlretrieve(DATA_URL, filepath)
print("filepath2", filepath2)
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
else:
print('Data already downloaded:', filepath, os.stat(filepath))
def display_image(url):
"""Downloads an image from a URL and displays it in Databricks."""
filename = url.split('/')[-1]
filepath = os.path.join(model_dir, filename)
urllib.request.urlretrieve(url, filepath)
image = os.path.join(model_dir, filename)
image_png = image.replace('.jpg','.png')
Image.open(image).save(image_png,'PNG')
img = mpimg.imread(image_png)
plt.imshow(img)
display()
# COMMAND ----------
# MAGIC %md
# MAGIC The following cell downloads the data from the internet and loads the model in memory:
# COMMAND ----------
maybe_download_and_extract()
node_lookup = load_lookup()
model_path = os.path.join(model_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_path, 'rb') as f:
model_data = f.read()
# COMMAND ----------
# MAGIC %md
# MAGIC We are now going to download some image URLs from the [ImageNet](http://image-net.org) project. ImageNet is a large collection of images from the internet that is commonly used as a benchmark in image recognition tasks.
# COMMAND ----------
batched_data = read_file_index()
num_images = sum([len(batch) for batch in batched_data])
print "There are %d images grouped in %d batches" % (num_images, len(batched_data))
# COMMAND ----------
# MAGIC %md
# MAGIC The labeling process can now start. We are going to use Spark to schedule the labeling of the images across our cluster, using TensorFlow.
# MAGIC
# MAGIC The neural network model is quite large (250MB), so we will share it across the cluster using Spark's broadcasting mechanism: once it is loaded onto a machine, it will not be loaded again.
# COMMAND ----------
node_lookup_bc = sc.broadcast(node_lookup)
model_data_bc = sc.broadcast(model_data)
# COMMAND ----------
# MAGIC %md
# MAGIC We can now write the code that runs on each executor. It is split into two methods:
# MAGIC - the function `run_image` that takes a TensorFlow session already containing the graph of computations as well as a URL. This function fetches the image from the internet, passes it to the neural network and returns the list of predictions for this method
# MAGIC - the function `run_image_batch` that takes a set or URLs returns predictions for each of them. This is the function called by Spark. For efficiency reasons, it loads the graph of computations once before running the whole batch of images sequentially.
# COMMAND ----------
# Functions: run_image and run_image_batch
def run_image(sess, img_id, img_url, node_lookup):
"""Fetches an image from the web and uses the trained neural network to infer the topics of this image."""
from six.moves import urllib
from urllib2 import HTTPError
try:
image_data = urllib.request.urlopen(img_url, timeout=1.0).read()
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
except HTTPError:
return (img_id, img_url, None)
except:
# a) The data returned may be invalid JPEG
# b) The download may time out
return (img_id, img_url, None)
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-num_top_predictions:][::-1]
scores = []
for node_id in top_k:
if node_id not in node_lookup:
human_string = ''
else:
human_string = node_lookup[node_id]
score = predictions[node_id]
scores.append((human_string, score))
return (img_id, img_url, scores)
def apply_batch(batch):
with tf.Graph().as_default() as g:
graph_def = tf.GraphDef()
graph_def.ParseFromString(model_data_bc.value)
tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
labelled = [run_image(sess, img_id, img_url, node_lookup_bc.value) for (img_id, img_url) in batch]
return [tup for tup in labelled if tup[2] is not None]
# COMMAND ----------
# MAGIC %md
# MAGIC Let us see how the function `run_image` performs with a portrait of [Grace Hopper](https://en.wikipedia.org/wiki/Grace_Hopper), one of the most famous women in Computer Sciences:
# COMMAND ----------
url = "https://upload.wikimedia.org/wikipedia/commons/5/55/Grace_Hopper.jpg"
display_image(url)
# COMMAND ----------
# MAGIC %md
# MAGIC Here is the inference results we get for this image, which is quite accurate:
# COMMAND ----------
with tf.Graph().as_default() as g:
graph_def = tf.GraphDef()
graph_def.ParseFromString(model_data)
tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
res = run_image(sess, None, url, node_lookup)[-1]
for (keyword, weight) in res:
print '{:.8}: {}'.format(str(weight), keyword)
# COMMAND ----------
# MAGIC %md
# MAGIC This code is now going to be run on the dataset using Spark:
# COMMAND ----------
# MAGIC %md
# MAGIC Two Runs with just 8 nodes of size 6GB RAM per node.
# COMMAND ----------
urls = sc.parallelize(batched_data, numSlices=len(batched_data))
labelled_images = urls.flatMap(apply_batch)
local_labelled_images = labelled_images.collect()
# COMMAND ----------
urls = sc.parallelize(batched_data, numSlices=len(batched_data))
labelled_images = urls.flatMap(apply_batch)
local_labelled_images = labelled_images.collect()
# COMMAND ----------
# MAGIC %md
# MAGIC Now let us just change the cluster settings (`Clusters -> Configure ... ` and use dropdown menu and change number of nodes to 4).
# COMMAND ----------
# MAGIC %md
# MAGIC Run with just 4 nodes.
# COMMAND ----------
urls = sc.parallelize(batched_data, numSlices=len(batched_data))
labelled_images = urls.flatMap(apply_batch)
local_labelled_images = labelled_images.collect()
# COMMAND ----------
urls = sc.parallelize(batched_data, numSlices=len(batched_data))
labelled_images = urls.flatMap(apply_batch)
local_labelled_images = labelled_images.collect()
# COMMAND ----------
# MAGIC %md
# MAGIC Let us have a look at one of the images we just classified:
# COMMAND ----------
(_, url, tags) = local_labelled_images[5]
display_image(url)
# COMMAND ----------
tags
# COMMAND ----------
# MAGIC %md
# MAGIC **You Try** changing one of the images by changing '50' below to some number between 0 and 109.
# COMMAND ----------
len(local_labelled_images)
# COMMAND ----------
(_, url, tags) = local_labelled_images[15]
display_image(url)
# COMMAND ----------
tags
# COMMAND ----------
# MAGIC %md This is the end of this tutorial. You can clone this tutorial and modify it to suit your needs. Enjoy!
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ***
# MAGIC ***
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC # Scalable Object Recognition in the Cloud for a Swarm of Robots
# MAGIC ### Date: 9:45 am - 5:00 pm, Tuesday June 21, 2016
# MAGIC ### Co-organization: IEEE RAS NZ Chapter, CLAWAR, University of Canterbury, University of Lincoln
# MAGIC ### Venue: Room KF7, Kirkwood Village, University of Canterbury, Kirkwood Avenue, Christchurch, New Zealand
# MAGIC #### by Raazesh Sainudiin done as a student project (near live)
# COMMAND ----------
# MAGIC %md
# MAGIC * Google used GPUs to train the model we saw earlier
# MAGIC * Use the best tool for the job but use Spark to integrate predictions from multiple images streaming in from a swarm of robots, for example.
# COMMAND ----------
# MAGIC %md
# MAGIC #### Can we load the pre-trained model into flying drones to let them scout out which trees have fruits ready for picking?
# MAGIC
# MAGIC Let's get some apple images (and try other fruits too) to test model identification capabilities from a google search on images for:
# MAGIC * "fruit trees", "unripe fruit trees", etc.
# MAGIC * finding the url of the image and feeding it to the pre-trained model,
# MAGIC * and seeing how well the pre-trained model does.
# COMMAND ----------
url = "http://static1.squarespace.com/static/548b6971e4b0af3bfe38cd6f/t/56a7d76c42f5526d030146c8/1462916503492/Fruit-Tree-Apple-Tree.jpg"
display_image(url)
# COMMAND ----------
with tf.Graph().as_default() as g:
graph_def = tf.GraphDef()
graph_def.ParseFromString(model_data)
tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
res = run_image(sess, None, url, node_lookup)[-1]
for (keyword, weight) in res:
print '{:.8}: {}'.format(str(weight), keyword)
# COMMAND ----------
# MAGIC %md
# MAGIC The results don't look too great at identifying the apples.
# MAGIC
# MAGIC You can train your own model with your own training data for a more specific machine vision / object-identification task. See for example:
# MAGIC * [https://github.com/tensorflow/models/tree/master/inception](https://github.com/tensorflow/models/tree/master/inception).
# MAGIC
# MAGIC One can even combine this with map-matching done in Week10 to make an atlas of indentified objects if these pre-trained models are inside a swarm of flying drones with GPS locations that are map-matched, for instance.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
# MAGIC
# MAGIC
# MAGIC ### prepared by [Paul Brouwers](https://www.linkedin.com/in/paul-brouwers-5365117a), [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand)
# MAGIC
# MAGIC *supported by* [](https://databricks.com/)
# MAGIC and
# MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome) | unlicense |
yxiong/xy_python_utils | xy_python_utils/quaternion.py | 1 | 4627 | #!/usr/bin/env python
#
# Author: Ying Xiong.
# Created: Mar 18, 2014.
"""Utility functions for quaternion and spatial rotation.
A quaternion is represented by a 4-vector `q` as::
q = q[0] + q[1]*i + q[2]*j + q[3]*k.
The validity of input to the utility functions are not explicitly checked for
efficiency reasons.
======== ================================================================
Abbr. Meaning
======== ================================================================
quat Quaternion, 4-vector.
vec Vector, 3-vector.
ax, axis Axis, 3- unit vector.
ang Angle, in unit of radian.
rot Rotation.
rotMatx Rotation matrix, 3x3 orthogonal matrix.
HProd Hamilton product.
conj Conjugate.
recip Reciprocal.
======== ================================================================
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def quatConj(q):
"""Return the conjugate of quaternion `q`."""
return np.append(q[0], -q[1:])
def quatHProd(p, q):
"""Compute the Hamilton product of quaternions `p` and `q`."""
r = np.array([p[0]*q[0] - p[1]*q[1] - p[2]*q[2] - p[3]*q[3],
p[0]*q[1] + p[1]*q[0] + p[2]*q[3] - p[3]*q[2],
p[0]*q[2] - p[1]*q[3] + p[2]*q[0] + p[3]*q[1],
p[0]*q[3] + p[1]*q[2] - p[2]*q[1] + p[3]*q[0]])
return r
def quatRecip(q):
"""Compute the reciprocal of quaternion `q`."""
return quatConj(q) / np.dot(q,q)
def quatFromAxisAng(ax, theta):
"""Get a quaternion that performs the rotation around axis `ax` for angle
`theta`, given as::
q = (r, v) = (cos(theta/2), sin(theta/2)*ax).
Note that the input `ax` needs to be a 3x1 unit vector."""
return np.append(np.cos(theta/2), np.sin(theta/2)*ax)
def quatFromRotMatx(R):
"""Get a quaternion from a given rotation matrix `R`."""
q = np.zeros(4)
q[0] = ( R[0,0] + R[1,1] + R[2,2] + 1) / 4.0
q[1] = ( R[0,0] - R[1,1] - R[2,2] + 1) / 4.0
q[2] = (-R[0,0] + R[1,1] - R[2,2] + 1) / 4.0
q[3] = (-R[0,0] - R[1,1] + R[2,2] + 1) / 4.0
q[q<0] = 0 # Avoid complex number by numerical error.
q = np.sqrt(q)
q[1] *= np.sign(R[2,1] - R[1,2])
q[2] *= np.sign(R[0,2] - R[2,0])
q[3] *= np.sign(R[1,0] - R[0,1])
return q
def quatToRotMatx(q):
"""Get a rotation matrix from the given unit quaternion `q`."""
R = np.zeros((3,3))
R[0,0] = 1 - 2*(q[2]**2 + q[3]**2)
R[1,1] = 1 - 2*(q[1]**2 + q[3]**2)
R[2,2] = 1 - 2*(q[1]**2 + q[2]**2)
R[0,1] = 2 * (q[1]*q[2] - q[0]*q[3])
R[1,0] = 2 * (q[1]*q[2] + q[0]*q[3])
R[0,2] = 2 * (q[1]*q[3] + q[0]*q[2])
R[2,0] = 2 * (q[1]*q[3] - q[0]*q[2])
R[1,2] = 2 * (q[2]*q[3] - q[0]*q[1])
R[2,1] = 2 * (q[2]*q[3] + q[0]*q[1])
return R
def rotVecByQuat(u, q):
"""Rotate a 3-vector `u` according to the quaternion `q`. The output `v` is
also a 3-vector such that::
[0; v] = q * [0; u] * q^{-1}
with Hamilton product."""
v = quatHProd(quatHProd(q, np.append(0, u)), quatRecip(q))
return v[1:]
def rotVecByAxisAng(u, ax, theta):
"""Rotate the 3-vector `u` around axis `ax` for angle `theta` (radians),
counter-clockwisely when looking at inverse axis direction. Note that the
input `ax` needs to be a 3x1 unit vector."""
q = quatFromAxisAng(ax, theta)
return rotVecByQuat(u, q)
def quatDemo():
# Rotation axis.
ax = np.array([1.0, 1.0, 1.0])
ax = ax / np.linalg.norm(ax)
# Rotation angle.
theta = -5*np.pi/6
# Original vector.
u = [0.5, 0.6, np.sqrt(3)/2];
u /= np.linalg.norm(u)
# Draw the circle frame.
nSamples = 1000
t = np.linspace(-np.pi, np.pi, nSamples)
z = np.zeros(t.shape)
fig = plt.figure()
fig_ax = fig.add_subplot(111, projection="3d", aspect="equal")
fig_ax.plot(np.cos(t), np.sin(t), z, 'b')
fig_ax.plot(z, np.cos(t), np.sin(t), 'b')
fig_ax.plot(np.cos(t), z, np.sin(t), 'b')
# Draw rotation axis.
fig_ax.plot([0, ax[0]*2], [0, ax[1]*2], [0, ax[2]*2], 'r')
# Rotate the `u` vector and draw results.
fig_ax.plot([0, u[0]], [0, u[1]], [0, u[2]], 'm')
v = rotVecByAxisAng(u, ax, theta)
fig_ax.plot([0, v[0]], [0, v[1]], [0, v[2]], 'm')
# Draw the circle that is all rotations of `u` across `ax` with different
# angles.
v = np.zeros((3, len(t)))
for i,theta in enumerate(t):
v[:,i] = rotVecByAxisAng(u, ax, theta)
fig_ax.plot(v[0,:], v[1,:], v[2,:], 'm')
fig_ax.view_init(elev=8, azim=80)
plt.show()
if __name__ == "__main__":
quatDemo()
| mit |
mblondel/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 3 | 44257 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_warns_message
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
"""Input format tests. """
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
"""Test whether clone works ok. """
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
"""Check that SGD gives any results :-)"""
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
"""Check whether expected ValueError on bad l1_ratio"""
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
"""Check whether expected ValueError on bad learning_rate"""
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
"""Check whether expected ValueError on bad eta0"""
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
"""Check whether expected ValueError on bad alpha"""
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
"""Test parameter validity check"""
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
"""Test parameter validity check"""
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
"""Checks coef_init not allowed as model argument (only fit)"""
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
"""Checks coef_init shape for the warm starts"""
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
"""Checks intercept_ shape for the warm starts"""
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
"""Checks intercept_ shape for the warm starts in binary case"""
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
"""Checks the SGDClassifier correctly computes the average weights"""
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
"""Checks intercept_ shape consistency for the warm starts"""
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
"""Target must have at least two labels"""
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_auto(self):
"""partial_fit with class_weight='auto' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'auto' is not supported for "
"partial_fit. In order to use 'auto' weights, "
"use compute_class_weight\('auto', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='auto').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
"""Multi-class average test case"""
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
"""Multi-class test case with multi-core support"""
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
"""Checks coef_init and intercept_init shape for for multi-class
problems"""
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
"""Check SGD.predict_proba"""
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
"""Test L1 regularization"""
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
"""Test if equal class weights approx. equals no class weights. """
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
"""ValueError due to not existing class label."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
"""ValueError due to wrong class_weight argument type."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_class_weight_warning(self):
"""Tests that class_weight passed through fit raises warning.
This test should be removed after deprecating support for this"""
clf = self.factory()
warning_message = ("You are trying to set class_weight through the fit "
"method, which is deprecated and will be removed in"
"v0.17 of scikit-learn. Pass the class_weight into "
"the constructor instead.")
assert_warns_message(DeprecationWarning,
warning_message,
clf.fit, X4, Y4,
class_weight=1)
def test_weights_multiplied(self):
"""Tests that class_weight and sample_weight are multiplicative"""
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_array_equal(clf1.coef_, clf2.coef_)
def test_auto_weight(self):
"""Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001, n_iter=1000,
class_weight="auto", shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000, class_weight="auto", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="auto", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
"""Test weights on individual samples"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
"""Test if ValueError is raised if sample_weight has wrong shape"""
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
"""Partial_fit should work after initial fit in the multiclass case.
Non-regression test for #2496; fit would previously produce a
Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
"""
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
"""Test multiple calls of fit w/ different shaped inputs."""
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
"""Check that SGD gives any results."""
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
"""Tests the average regressor matches the naive implementation"""
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
"""Tests whether the partial fit yields the same average as the fit"""
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
"""Checks the average weights on data with 0s"""
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
"""Check that the SGD output is consistent with coordinate descent"""
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDRegressor
def test_l1_ratio():
"""Test if l1 ratio extremes match L1 and L2 penalty settings. """
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
nicholsn/ncanda-data-integration | scripts/reporting/check_gradient_tables.py | 4 | 6786 | #!/usr/bin/en python
import os
import sys
import glob
import json
import numpy as np
import pandas as pd
from lxml import objectify
def read_xml_sidecar(filepath):
"""
Read a CMTK xml sidecar file.
Returns
=======
lxml.objectify
"""
abs_path = os.path.abspath(filepath)
with open(abs_path, 'rb') as fi:
lines = fi.readlines()
lines.insert(1, '<root>')
lines.append('</root>')
string = ''.join(lines)
strip_ge = string.replace('dicom:GE:', '')
strip_dicom = strip_ge.replace('dicom:','')
result = objectify.fromstring(strip_dicom)
return result
def get_array(array_string):
"""
Parse an array from XML string
Returns
=======
np.array
"""
l = array_string.text.split(' ')
return np.fromiter(l, np.float)
def get_gradient_table(parsed_sidecar, decimals=None):
"""
Get the bvector table for a single image
Returns
=======
np.array (rounded to 1 decimal)
"""
b_vector = get_array(parsed_sidecar.mr.dwi.bVector)
b_vector_image = get_array(parsed_sidecar.mr.dwi.bVectorImage)
b_vector_standard = get_array(parsed_sidecar.mr.dwi.bVectorStandard)
if not decimals:
decimals = 1
return np.around([b_vector,
b_vector_image,
b_vector_standard],
decimals=decimals)
def get_cases(cases_root, case=None):
"""
Get a list of cases from root dir, optionally for a single case
"""
match = 'NCANDA_S*'
if case:
match = case
return glob.glob(os.path.join(cases_root, match))
def get_dti_stack(case, arm=None, event=None):
if arm:
path = os.path.join(case, arm)
else:
path = os.path.join(case, '*')
if event:
path = os.path.join(path, event)
else:
path = os.path.join(path,'*')
path = os.path.join(path, 'diffusion/native/dti60b1000/*.xml')
return glob.glob(path)
def get_all_gradients(dti_stack, decimals=None):
"""
Parses a list of dti sidecar files for subject.
Returns
=======
list of np.array
"""
gradiets_per_frame = list()
for xml in dti_stack:
sidecar = read_xml_sidecar(xml)
gradiets_per_frame.append(get_gradient_table(sidecar,
decimals=decimals))
return gradiets_per_frame
def get_site_scanner(site):
"""
Returns the "ground truth" case for gradients.
"""
site_scanner = dict(A='Siemens',
B='GE',
C='GE',
D='Siemens',
E='GE')
return site_scanner.get(site)
def get_ground_truth_gradients(args=None):
"""
Return a dictionary for scanner:gratient
"""
# Choose arbitrary cases for ground truth
test_path = '/fs/ncanda-share/pipeline/cases'
scanner_subject = dict(Siemens='NCANDA_S00061',
GE='NCANDA_S00033')
# Paths to scanner specific gradients
siemens_path = os.path.join(test_path, scanner_subject.get('Siemens'))
ge_path = os.path.join(test_path, scanner_subject.get('GE'))
# Get ground truth for standard baseline
test_arm = 'standard'
test_event = 'baseline'
# Gets files for each scanner
siemens_stack = get_dti_stack(siemens_path, arm=test_arm, event=test_event)
ge_stack = get_dti_stack(ge_path, arm=test_arm, event=test_event)
siemens_stack.sort()
ge_stack.sort()
# Parse the xml files to get scanner specific gradients per frame
siemens_gradients = get_all_gradients(siemens_stack, decimals=args.decimals)
ge_gradients = get_all_gradients(ge_stack, decimals=args.decimals)
return dict(Siemens=siemens_gradients, GE=ge_gradients)
def main(args=None):
# Get the gradient tables for all cases and compare to ground truth
cases = get_cases(args.base_dir, case=args.case)
# Demographics from pipeline to grab case to scanner mapping
demo_path = '/fs/ncanda-share/pipeline/summaries/demographics.csv'
demographics = pd.read_csv(demo_path, index_col=['subject',
'arm',
'visit'])
gradient_map = get_ground_truth_gradients(args=args)
for case in cases:
if args.verbose:
print("Processing: {}".format(case))
# Get the case's site
sid = os.path.basename(case)
site = demographics.loc[sid, args.arm, args.event].site
scanner = get_site_scanner(site)
gradients = gradient_map.get(scanner)
case_dti = os.path.join(args.base_dir, case)
case_stack = get_dti_stack(case_dti, arm=args.arm, event=args.event)
case_stack.sort()
case_gradients = get_all_gradients(case_stack, decimals=args.decimals)
errors = list()
for idx, frame in enumerate(case_gradients):
# if there is a frame that doesn't match, report it.
if not (gradients[idx]==frame).all():
errors.append(idx)
if errors:
key = os.path.join(case, args.arm, args.event, 'diffusion/native/dti60b1000')
result = dict(subject_site_id=key,
frames=errors,
error="Gradient tables do not match for frames.")
print(json.dumps(result, sort_keys=True))
if __name__ == '__main__':
import argparse
formatter = argparse.RawDescriptionHelpFormatter
default = 'default: %(default)s'
parser = argparse.ArgumentParser(prog="check_gradient_tables.py",
description=__doc__,
formatter_class=formatter)
parser.add_argument('-a', '--arm', dest="arm",
help="Study arm. {}".format(default),
default='standard')
parser.add_argument('-b', '--base-dir', dest="base_dir",
help="Study base directory. {}".format(default),
default='/fs/ncanda-share/pipeline/cases')
parser.add_argument('-d', '--decimals', dest="decimals",
help="Number of decimals. {}".format(default),
default=3)
parser.add_argument('-e', '--event', dest="event",
help="Study event. {}".format(default),
default='baseline')
parser.add_argument('-c', '--case', dest="case",
help="Study case. {}".format(default),
default=None)
parser.add_argument('-v', '--verbose', dest="verbose",
help="Turn on verbose", action='store_true')
argv = parser.parse_args()
sys.exit(main(args=argv))
| bsd-3-clause |
liangz0707/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
Roboticmechart22/sms-tools | lectures/06-Harmonic-model/plots-code/f0Twm-piano.py | 19 | 1261 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackman
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import stft as STFT
import sineModel as SM
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/piano.wav')
w = np.blackman(1501)
N = 2048
t = -90
minf0 = 100
maxf0 = 300
f0et = 1
maxnpeaksTwm = 4
H = 128
x1 = x[1.5*fs:1.8*fs]
plt.figure(1, figsize=(9, 7))
mX, pX = STFT.stftAnal(x, fs, w, N, H)
f0 = HM.f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et)
f0 = UF.cleaningTrack(f0, 5)
yf0 = UF.sinewaveSynth(f0, .8, H, fs)
f0[f0==0] = np.nan
maxplotfreq = 800.0
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N*maxplotfreq/fs+1]))
plt.autoscale(tight=True)
plt.plot(frmTime, f0, linewidth=2, color='k')
plt.autoscale(tight=True)
plt.title('mX + f0 (piano.wav), TWM')
plt.tight_layout()
plt.savefig('f0Twm-piano.png')
UF.wavwrite(yf0, fs, 'f0Twm-piano.wav')
plt.show()
| agpl-3.0 |
caryan/PyBio | OrthoGroups/make_matrix.py | 1 | 2302 | import glob
from Bio import SeqIO
import pandas as pd
from progressbar import Percentage, ProgressBar, RotatingMarker, ETA
MICdf = pd.read_csv('antibiogram_july.csv', index_col=0)
fileNames = glob.glob('*.afa')
matrixDFs = {}
#This can take a long time so put up a progress bar
widgets = ['Working: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
' ', ETA(), ' ']
pbar = ProgressBar(widgets=widgets, maxval=len(fileNames))
pbar.start()
for ct, fileName in enumerate(fileNames):
pbar.update(ct)
with open(fileName, 'r') as FID:
seqs = list(SeqIO.parse(FID, 'fasta'))
#Find the reference record first
refName = 'PAO1'
## seqNames = []
## for seq in seqs:
## seqNames.append(seq.id.split('|')[0])
## if seqNames[-1] == refName:
## refSeq = seq.seq.tostring()
for seq in seqs:
if seq.id.split('|')[0] == refName:
refSeq = seq.seq.tostring()
break
#Now loop through again and output the matrix
## with open(fileName.split('.')[0] + '.matrix', 'w') as FID:
## for seq in MICdf.index:
## FID.write(seq + ', ')
## FID.write(str(MICdf['meropenem'][seq])+', ')
## if seq in seqNames:
## FID.write(', '.join(['0' if (c1 == c2) else '1' for c1,c2 in zip(seqs[seqNames.index(seq)].seq.tostring(), refSeq)]))
## else:
## FID.write(', '.join(['2']*len(refSeq)))
## FID.write('\n')
#Create a DF comparing each to the reference strain
groupName = fileName.split('.')[0]
matrixDFs[groupName] = pd.DataFrame(
{
seq.id.split('|')[0]:
pd.Series([0 if (c1 == c2) else 1
for c1,c2 in zip(seq.seq.tostring(), refSeq)],
index = ['V' + str(ct+1) + '_' + groupName for ct in range(len(refSeq))]
)
for seq in seqs })
#Concatentate into one DF
totMatrix = pd.concat(matrixDFs.values(), axis=0)
#We now sum along the rows and discard values close to zero or close the total number
#in order to only look at interesting ones
counts = totMatrix.sum(axis=1)
#Uncomment to plot the histogram of counts
import matplotlib.pyplot as plt
plt.hist(counts.values, 100)
plt.show()
cutOff = 10
interestingMatrix = totMatrix.loc[(counts>cutOff) & (counts < totMatrix.shape[1]-cutOff), :]
#Replace the NaN's with 2
interestingMatrix.fillna(2, inplace=True)
#Push the result out to file
interestingMatrix.to_csv('BigMatrix2.tsv', sep='\t')
| gpl-3.0 |
mhdella/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
allenlavoie/tensorflow | tensorflow/contrib/timeseries/examples/predict_test.py | 80 | 2487 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the prediction example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from tensorflow.contrib.timeseries.examples import predict
from tensorflow.python.platform import test
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/period_trend.csv")
class PeriodTrendExampleTest(test.TestCase):
def test_shapes_and_variance_structural(self):
(times, observed, all_times, mean, upper_limit, lower_limit
) = predict.structural_ensemble_train_and_predict(_DATA_FILE)
# Just check that plotting will probably be OK. We can't actually run the
# plotting code since we don't want to pull in matplotlib as a dependency
# for this test.
self.assertAllEqual([500], times.shape)
self.assertAllEqual([500], observed.shape)
self.assertAllEqual([700], all_times.shape)
self.assertAllEqual([700], mean.shape)
self.assertAllEqual([700], upper_limit.shape)
self.assertAllEqual([700], lower_limit.shape)
# Check that variance hasn't blown up too much. This is a relatively good
# indication that training was successful.
self.assertLess(upper_limit[-1] - lower_limit[-1],
1.5 * (upper_limit[0] - lower_limit[0]))
def test_ar(self):
(times, observed, all_times, mean,
upper_limit, lower_limit) = predict.ar_train_and_predict(_DATA_FILE)
self.assertAllEqual(times.shape, observed.shape)
self.assertAllEqual(all_times.shape, mean.shape)
self.assertAllEqual(all_times.shape, upper_limit.shape)
self.assertAllEqual(all_times.shape, lower_limit.shape)
self.assertLess((upper_limit - lower_limit).mean(), 4.)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ZenDevelopmentSystems/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
KasperPRasmussen/bokeh | bokeh/core/compat/mplexporter/renderers/vega_renderer.py | 54 | 5284 | import warnings
import json
import random
from .base import Renderer
from ..exporter import Exporter
class VegaRenderer(Renderer):
def open_figure(self, fig, props):
self.props = props
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
self.data = []
self.scales = []
self.axes = []
self.marks = []
def open_axes(self, ax, props):
if len(self.axes) > 0:
warnings.warn("multiple axes not yet supported")
self.axes = [dict(type="x", scale="x", ticks=10),
dict(type="y", scale="y", ticks=10)]
self.scales = [dict(name="x",
domain=props['xlim'],
type="linear",
range="width",
),
dict(name="y",
domain=props['ylim'],
type="linear",
range="height",
),]
def draw_line(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'line',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"stroke": {"value": style['color']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['linewidth']},
}
}
})
def draw_markers(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'symbol',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"fill": {"value": style['facecolor']},
"fillOpacity": {"value": style['alpha']},
"stroke": {"value": style['edgecolor']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['edgewidth']},
}
}
})
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
if text_type == 'xlabel':
self.axes[0]['title'] = text
elif text_type == 'ylabel':
self.axes[1]['title'] = text
class VegaHTML(object):
def __init__(self, renderer):
self.specification = dict(width=renderer.figwidth,
height=renderer.figheight,
data=renderer.data,
scales=renderer.scales,
axes=renderer.axes,
marks=renderer.marks)
def html(self):
"""Build the HTML representation for IPython."""
id = random.randint(0, 2 ** 16)
html = '<div id="vis%d"></div>' % id
html += '<script>\n'
html += VEGA_TEMPLATE % (json.dumps(self.specification), id)
html += '</script>\n'
return html
def _repr_html_(self):
return self.html()
def fig_to_vega(fig, notebook=False):
"""Convert a matplotlib figure to vega dictionary
if notebook=True, then return an object which will display in a notebook
otherwise, return an HTML string.
"""
renderer = VegaRenderer()
Exporter(renderer).run(fig)
vega_html = VegaHTML(renderer)
if notebook:
return vega_html
else:
return vega_html.html()
VEGA_TEMPLATE = """
( function() {
var _do_plot = function() {
if ( (typeof vg == 'undefined') && (typeof IPython != 'undefined')) {
$([IPython.events]).on("vega_loaded.vincent", _do_plot);
return;
}
vg.parse.spec(%s, function(chart) {
chart({el: "#vis%d"}).update();
});
};
_do_plot();
})();
"""
| bsd-3-clause |
airanmehr/bio | Scripts/TimeSeriesPaper/Simulation/msmsSelection.py | 1 | 6914 | '''
Copyleft May 27, 2016 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: [email protected]
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import seaborn as sns
import pylab as plt;
import os;
home = os.path.expanduser('~') + '/'
import Utils.Util as utl
import Utils.Estimate as est
import Utils.Simulation as Simulation
import Scripts.Miscellaneous.RNN.Evaluate as evl
import Utils.Plots as pplt
reload(Simulation)
comaleName = r'\sc{Clear}'
sns.set_style("whitegrid", {"grid.color": ".9", 'axes.linewidth': .5, "grid.linewidth": ".09"})
def SFS():
suffix = 'sweep'
for suffix in ('sweep', 'finale'):
df = pd.read_pickle(utl.outpath + 'msmsSelection/{}.df'.format(suffix))
sfs = []
for i, x in df.iteritems():
zz = pd.concat([est.Estimate.getAllEstimatesX(y.dropna()).set_index('method') for t, y in x.iterrows()],
axis=1).T
zz.index = pd.MultiIndex.from_product([[i[0]], [i[1]], x.index], names=['s', 'i', 'gen'])
sfs += [zz]
a = pd.concat(sfs).sort_index()
df = a.groupby(level=[0, 1]).sum()
df.columns = map(lambda x: x + '(Dynamic)', df.columns)
dfl = a.groupby(level=[0, 1]).apply(lambda x: x.iloc[-1])
dfl.columns = map(lambda x: x + '(Static)', dfl.columns)
pd.concat([df, dfl], axis=1).to_pickle(utl.outpath + 'ROC/SFS.{}.msms.df'.format(suffix))
def loadNu(suffix):
df = pd.read_pickle(utl.outpath + 'msmsSelection/{}.df'.format(suffix))
x = df.apply(lambda x: pd.Series(x[25000].sort_index().values)).applymap(lambda x: (x, 1 - x)[x < 0.5]).round(3);
x.columns = x.columns * 10
return x
def comale():
for suffix in ('sweep', 'finale'):
x = loadNu(suffix)
res = []
for s in np.arange(0, 0.50001, 0.01):
T = pd.read_pickle(utl.outpath + 'transition/simulation/S{:02.0f}.df'.format(s * 100))
res += [x.apply(lambda y: sum([T.loc[y.values[i], y.values[i + 1]] for i in range(y.size - 1)]), axis=1)]
res = pd.concat(res, axis=1);
res.columns = np.arange(0, 0.50001, 0.01)
df = pd.concat([res[0], res.idxmax(1), res.max(1)], axis=1)
df.columns = ['null', 's', 'alt']
df.to_pickle(utl.outpath + 'ROC/COMALE.{}.msms.df'.format(suffix))
return df
def FIT():
for suffix in ('sweep', 'finale'):
X = loadNu(suffix)
import scipy.stats as sc
MAF = 1. / (1000 * 2)
MIN_VAR = MAF * (1 - MAF)
n = X.shape[1]
def FITsite(xx):
x = pd.DataFrame(xx.values, index=xx.index)
increments = x.diff().iloc[1:]
std = 2 * pd.DataFrame(((x * (1 - x)).iloc[:-1]).applymap(np.sqrt).values, index=increments.index).applymap(
lambda x: max(x, MIN_VAR)) * 10
y = increments / std
statistic = y.mean() / y.std() / np.sqrt(n)
return -np.log((sc.t.sf(np.abs(statistic), n - 1) * 2).mean())
df = pd.DataFrame(X.apply(FITsite, axis=1).fillna(0), columns=['FIT'])
df.to_pickle(utl.outpath + 'ROC/FIT.{}.msms.df'.format(suffix))
def GP():
suffix = 'sweep'
for suffix in ('sweep', 'finale'):
df = loadNu(suffix)
import Scripts.Miscellaneous.GaussianProcess.Estimate as GP
df = df.apply(lambda x: GP.singleLocus(x.values[:, None, None], np.append(np.ones(int(x.values[0] * 200)),
np.zeros(
int((1 - x.values[0]) * 200)))[:,
None], 1000, 2e-8,
np.array([0, 10, 20, 30, 40, 50]), 0), axis=1)
df.to_pickle('{}ROC/GP.{}.msms.df'.format(utl.outpath, suffix))
def getPower(df):
neg = pd.DataFrame(df.loc[0])
neg['label'] = -1;
def f(x):
pos = pd.DataFrame(x.loc[x.name])
pos.columns = [df.name]
pos['label'] = 1;
return evl.Power(pd.concat([pos, neg]), FPth=0.05)
sel = df[df.index.get_level_values('s') != 0]
return sel.groupby(level=0).apply(f)
def plotPowerSFS(fontsize=6):
dpi = pplt.PLOS.dpi
fig, axes = plt.subplots(1, 2, sharey=True, sharex=True, figsize=(4, 2), dpi=pplt.PLOS.dpi);
pplt.setStyle(lw=1);
print (axes)
suffix = 'finale'
i = 0
titles = {0: '(A)', 1: '(B)'}
for i, suffix in enumerate(['sweep', 'finale']):
X = pd.read_pickle(utl.outpath + 'msmsSelection/finale.df').apply(
lambda x: pd.Series(x[25000].sort_index().values));
X.columns = X.columns * 10
gp = pd.read_pickle('{}ROC/GP.{}.msms.df'.format(utl.outpath, suffix))['LR'];
gp.name = 'GP'
gp[0.005] -= 5e-2
# b=50
# gp[0.005].hist(alpha=0.5,color='r',bins=b);gp[0].hist(bins=b)
a = pd.read_pickle('{}ROC/COMALE.{}.msms.df'.format(utl.outpath, suffix));
a = (a.alt - a.null) + a.index.get_level_values('s') * (0, 20)[suffix == 'sweep'];
a.name = comaleName
a.fillna(0, inplace=True)
a = pd.concat([a, gp,
pd.read_pickle('{}ROC/FIT.{}.msms.df'.format(utl.outpath, suffix)),
pd.read_pickle('{}ROC/SFS.{}.msms.df'.format(utl.outpath, suffix))[
['SFSelect(Dynamic)', 'TajimaD(Dynamic)', 'SFSelect(Static)', 'TajimaD(Static)']]], axis=1)
a[['TajimaD(Dynamic)', 'TajimaD(Static)']] *= -1
df = a.apply(getPower)
df
color = list(np.array(pplt.getColorMap(df.shape[1]))[[1, 0] + range(2, df.shape[1])])
markers = list(np.array(pplt.getMarker(10))[[0, 2, 7, 1, 3, 4, 5]])
xticks = df.index.values
df.index = range(1, df.shape[0] + 1)
df.plot(ax=axes[i], legend=False, color=color, grid=True, markersize=6, style=markers);
axes[i].set_xticks(df.index)
axes[i].set_xticklabels(xticks);
# df.plot(ax=plt.gca(),kind='bar', legend=False, color=color, grid=True)
axes[i].axhline(y=5, color='k');
axes[i].set_xlabel(r'$s$');
axes[i].set_yticks(np.sort(np.append(np.arange(20, 101, 20), [5])))
axes[i].set_ylim([-5, 110])
axes[i].set_xlim([0.75, 4.25])
if not i:
plt.ylabel('Power');
handles, labels = axes[i].get_legend_handles_labels()
axes[i].legend(handles[::-1], labels[::-1], loc='upper left', fontsize=fontsize)
axes[i].set_title(titles[i])
pplt.setSize(axes[i], fontsize)
plt.gcf().subplots_adjust(bottom=0.2)
pplt.savefig('naturalee', dpi=dpi)
plt.show()
# GP()
# FIT()
# comale()
# SFS()
# plotPowerSFS()
| mit |
liebermeister/flux-enzyme-cost-minimization | scripts/plot_supp_figures.py | 1 | 40078 | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 3 17:32:45 2016
@author: eladn
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from mpl_toolkits.mplot3d import Axes3D # NOTE!!! keep this for the 3D plots
import zipfile
import definitions as D
from prepare_data import get_concatenated_raw_data, get_df_from_pareto_zipfile
from sensitivity_analysis import Sensitivity
from monod_surface import plot_surface, \
plot_surface_diff, \
plot_monod_surface, \
plot_conc_versus_uptake_figure, \
plot_glucose_dual_pareto, \
plot_growth_rate_hist, \
interpolate_single_condition, \
plot_glucose_sweep, \
SweepInterpolator, \
get_glucose_sweep_df, \
get_anaerobic_glucose_sweep_df
from monod_curve import plot_monod_scatter, calculate_monod_parameters
from epistasis import Epistasis
from tsne import plot_tsne_figure
import tempfile
import shutil
import pareto_sampling
import seaborn as sns
figure_data = D.get_figure_data()
if __name__ == '__main__':
# %% Figure S1 - same as 3c, but compared to the biomass rate
# instead of growth rate
figS1, axS1 = plt.subplots(1, 2, figsize=(9, 4.5))
data = figure_data['standard']
# remove oxygen-sensitive EFMs
data.loc[data[D.STRICTLY_ANAEROBIC_L], D.GROWTH_RATE_L] = 0
D.plot_basic_pareto(data, axS1[0], x=D.YIELD_L, y=D.BIOMASS_PROD_PER_ENZ_L,
facecolors=D.PARETO_NEUTRAL_COLOR, edgecolors='none')
axS1[0].set_ylabel('enzyme-specific biomass production\n$r_{BM} = v_{BM}/E_{met}$ [gr dw h$^{-1}$ / gr enz]')
axS1[0].set_xlim(-1e-3, 1.1*data[D.YIELD_L].max())
axS1[0].set_ylim(-1e-3, 1.15*data[D.BIOMASS_PROD_PER_ENZ_L].max())
axS1[0].set_title('glucose = 100 mM, O$_2$ = 3.7 mM')
axS1[0].annotate('c', xy=(0.02, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20)
for y in range(0, 14, 2):
axS1[0].plot([-1e-3, 1.1*data[D.YIELD_L].max()], [y, y], 'k-',
alpha=0.2)
D.plot_basic_pareto(data, axS1[1], x=D.YIELD_L, y=D.GROWTH_RATE_L,
facecolors=D.PARETO_NEUTRAL_COLOR, edgecolors='none')
axS1[1].set_xlim(-1e-3, 1.1*data[D.YIELD_L].max())
axS1[1].set_ylim(-1e-3, 1.15*data[D.GROWTH_RATE_L].max())
axS1[1].set_title('glucose = 100 mM, O$_2$ = 3.7 mM')
axS1[1].annotate('d', xy=(0.02, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20)
for y in map(D.GR_FUNCTION, range(0, 18, 2)):
axS1[1].plot([-1e-3, 1.1*data[D.YIELD_L].max()], [y, y], 'k-',
alpha=0.2)
figS1.tight_layout()
D.savefig(figS1, 'S1')
# %%
# SI Figure 3: comparing sum of flux/SA to the total enzyme cost
figS3, axS3 = plt.subplots(1, 1, figsize=(5, 5))
data = figure_data['standard']
inds_to_remove = data.isnull().any(axis=1) | data[D.STRICTLY_ANAEROBIC_L]
data = data.loc[~inds_to_remove, :]
D.plot_basic_pareto(data, x=D.TOT_FLUX_SA_L, y=D.TOT_ENZYME_L,
ax=axS3, edgecolors='none',
facecolors=D.PARETO_NEUTRAL_COLOR)
axS3.set_xscale('log')
axS3.set_yscale('log')
minval, maxval = (1e-2, 40)
# draw the x=y line
axS3.plot([minval, maxval], [minval, maxval], '-',
color='k', linewidth=1)
# draw the two bounding diagonal lines (which bound the function x-y)
# from above and below
min_ratio = (data[D.TOT_ENZYME_L] / data[D.TOT_FLUX_SA_L]).min()
max_ratio = (data[D.TOT_ENZYME_L] / data[D.TOT_FLUX_SA_L]).max()
color_min = (1, 0, 0)
color_max = (0, 0, 1)
axS3.plot([minval, maxval/min_ratio], [minval*min_ratio, maxval], '-',
color=color_min, linewidth=1)
axS3.annotate(xy=(10, 10*min_ratio),
s='y = %.1f x' % min_ratio,
xycoords='data', xytext=(10, 3),
va='top', ha='center', color=color_min,
arrowprops=dict(color=color_min,
shrink=0.02, width=1, headwidth=3))
axS3.plot([minval, maxval/max_ratio], [minval*max_ratio, maxval], '-',
color=color_max, linewidth=1, alpha=1)
axS3.annotate(xy=(20/max_ratio, 20),
s='y = %.1f x' % max_ratio,
xycoords='data', xytext=(1, 20),
va='center', ha='right', color=color_max,
arrowprops=dict(color=color_max,
shrink=0.02, width=1, headwidth=3))
# mark the two extreme points (the ones with the minimal x and minimal
# y values)
min_x = data[D.TOT_FLUX_SA_L].min()
min_y = data[D.TOT_ENZYME_L].min()
for efm, row in data.iterrows():
x = row[D.TOT_FLUX_SA_L]
y = row[D.TOT_ENZYME_L]
if x == min_x:
axS3.annotate(xy=(x, y), s='min. ideal\ncost = %.3f [h$^{-1}$]' % min_x,
xycoords='data', fontsize=12, ha='center',
xytext=(x*1.4, y*20), rotation=0,
arrowprops=dict(color='black',
shrink=0.02, width=1, headwidth=3))
if y == min_y:
axS3.annotate(xy=(x, y), s='min. actual\ncost = %.3f [h$^{-1}$]' % min_y,
xycoords='data', fontsize=12, va='center',
xytext=(x*5, y), rotation=0,
arrowprops=dict(color='black',
shrink=0.02, width=1, headwidth=3))
axS3.set_xlim(minval, maxval)
axS3.set_ylim(minval, maxval)
axS3.set_xlabel(r'ideal cost [gr enz / gr dw h$^{-1}$]')
axS3.set_ylabel(r'actual cost [gr enz / gr dw h$^{-1}$]')
figS3.tight_layout()
D.savefig(figS3, 'S3')
# %% SI Figure 5 - sweep for the kcat values of biomass reaction
figS5, axS5 = plt.subplots(1, 1, figsize=(5, 5))
D.plot_sweep(figure_data['sweep_kcat_r70'],
r'$k_{cat}$ of biomass reaction [s$^{-1}$]',
efm_dict=D.efm_dict, ax=axS5, legend_loc='lower center')
axS5.set_xscale('log')
maxy = axS5.axes.yaxis.get_view_interval()[1]
axS5.plot([100, 100], [0.0, maxy], '--', color='grey', linewidth=1)
axS5.text(100, maxy, r'default $k_{cat}$',
va='bottom', ha='center', color='grey')
D.savefig(figS5, 'S5')
# %% SI Figure 6 - t-SNE projections
figS6 = plot_tsne_figure(figure_data)
D.savefig(figS6, 'S6')
# %% SI Figure 7
# make bar plots for each reaction, counting how many EFMs it participates
figS7, axS7 = plt.subplots(2, 2, figsize=(15, 12))
for i, ax in enumerate(axS7.flat):
ax.annotate(chr(ord('a')+i), xy=(0.04, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20)
rates1_df, _, _, _ = get_concatenated_raw_data('standard')
rates2_df, _, _, _ = get_concatenated_raw_data('anaerobic')
rates_df = pd.concat([rates1_df, rates2_df]).drop_duplicates()
reaction_counts = 100 * (rates_df.abs() > 1e-8).sum(0) / rates_df.shape[0]
plt.subplots_adjust(hspace=0.3)
reaction_counts.sort_values(inplace=True)
reaction_counts.plot(kind='bar', ax=axS7[0, 0], color=D.BAR_COLOR, linewidth=0)
axS7[0, 0].set_ylim(0, 100)
axS7[0, 0].set_ylabel('\% of EFMs using this reaction')
axS7[0, 0].set_xticklabels(map(D.GET_REACTION_NAME, reaction_counts.index))
rates_df = rates_df.drop(9999) # remove "exp" which is the last index
efm_counts = (rates_df.abs() > 1e-8).sum(1)
efm_counts.hist(bins=np.arange(20, 40)-0.5, ax=axS7[0, 1],
color=D.BAR_COLOR, rwidth=0.4)
axS7[0, 1].set_xlabel('no. of active reactions')
axS7[0, 1].set_ylabel('no. of EFMs')
axS7[0, 1].set_xlim(22, 36)
# Figure that calculates the correlation between each EFM and
# the "experimental" flow, and overlays that information on the
# standard "Pareto" plot
data = figure_data['standard'].copy()
data.loc[data[D.STRICTLY_ANAEROBIC_L], D.GROWTH_RATE_L] = 0
CORR_FLUX_L = 'Flux Spearman correlation'
CORR_ENZ_L = 'Enzyme Spearman correlation'
# read the measured fluxes
exp_flux_df = D.get_projected_exp_fluxes()
# remove the exchange reactions (xchg_*)
exp_flux_df = exp_flux_df.loc[exp_flux_df.index.str.find('xchg') != 0, :]
exp_flux_df.index = map(D.FIX_REACTION_ID, exp_flux_df.index)
rates_df, params_df, km_df, enzyme_abundance_df = \
get_concatenated_raw_data('standard')
# calculate correlation coefficients between the enzyme abundances and
# the measured abundances (from Schmidt et al. 2015, glucose batch)
X = enzyme_abundance_df.transpose()
# in order to convert the enzyme abundances to realistic values, we need
# to scale by a factor of 0.004 (see SI text, section S2.5)
X *= 0.004
y = map(D.PROTEOME_DICT.get, enzyme_abundance_df.columns)
X['measured'] = pd.Series(index=enzyme_abundance_df.columns, data=y)
X_pred = X.iloc[:, 0:-1].as_matrix()
X_meas = X.iloc[:, -1].as_matrix()
data[CORR_FLUX_L] = rates_df.transpose().corr('spearman').loc[9999]
data[CORR_ENZ_L] = X.corr('spearman').loc['measured']
# Pareto plot of correlation between predicted and measured fluxes
axS7[1, 0].set_title('Match with measured fluxes')
D.plot_basic_pareto(data, axS7[1, 0], x=D.YIELD_L, y=D.GROWTH_RATE_L,
c=CORR_FLUX_L, cmap='copper_r',
vmin=0, vmax=1, linewidth=0, s=30, edgecolor='k')
# Pareto plot of correlation between predicted and measured enzyme levels
axS7[1, 1].set_title('Match with measured enzyme abundance')
D.plot_basic_pareto(data, axS7[1, 1], x=D.YIELD_L, y=D.GROWTH_RATE_L,
c=CORR_ENZ_L, cmap='copper_r',
vmin=0, vmax=1, linewidth=0, s=30, edgecolor='k')
annot_color = (0.1, 0.1, 0.8)
for ax in axS7[1, :]:
ax.set_xlim(-1e-3, 1.1*data[D.YIELD_L].max())
ax.set_ylim(-1e-3, 1.15*data[D.GROWTH_RATE_L].max())
for efm in D.efm_dict.keys():
xy = np.array(data.loc[efm, [D.YIELD_L, D.GROWTH_RATE_L]].tolist())
xytext = xy.copy()
xytext[0] = 0.1 * ax.get_xlim()[1] + 0.8 * xy[0]
xytext[1] += 0.07
ax.annotate(xy=xy, s=D.efm_dict[efm]['label'],
xycoords='data', xytext=xytext, ha='left', va='bottom',
color=annot_color, fontsize=16,
arrowprops=dict(facecolor=annot_color,
shrink=0.1, width=3, headwidth=6))
figS7.tight_layout()
D.savefig(figS7, 'S7')
# %% SI Figure 8 - pareto plot with 4 alternative EFM features
figS8, axS8 = plt.subplots(2, 3, figsize=(13, 8), sharex=True, sharey=True)
plot_parameters = [{'title': 'succinate:fumarate cycling',
'c': D.SUC_FUM_CYCLE_L},
{'title': 'ammonia uptake', 'c': D.NH3_L},
{'title': 'ED pathway', 'c': D.ED_L},
{'title': 'pentose phosphate pathway', 'c': D.PPP_L},
{'title': 'upper glycolysis', 'c': D.UPPER_GLYCOLYSIS_L, 'cmap': 'coolwarm'},
{'title': 'pyruvate dehydrogenase', 'c': D.PDH_L}
]
data = figure_data['standard']
for i, (ax, d) in enumerate(zip(list(axS8.flat), plot_parameters)):
ax.annotate(chr(ord('a')+i), xy=(0.04, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20)
D.plot_basic_pareto(data, ax=ax,
x=D.YIELD_L, y=D.GROWTH_RATE_L,
c=d['c'], cmap=d.get('cmap', 'copper_r'),
linewidth=0, s=20)
ax.set_title(d['title'])
ax.set_xlim(-1e-3, 1.05*data[D.YIELD_L].max())
ax.set_ylim(-1e-3, 1.05*data[D.GROWTH_RATE_L].max())
figS8.tight_layout()
D.savefig(figS8, 'S8')
# %% SI Figure 9 - comparing yield to other EFM parameters
figS9, axS9 = plt.subplots(2, 2, figsize=(7, 7))
plot_parameters = [{'y': D.ACE_L, 'ymin': -0.001, 'ax': axS9[0, 0]},
{'y': D.OXYGEN_L, 'ymin': -0.001, 'ax': axS9[0, 1]},
{'y': D.N_REACTION_L, 'ymin': 20.0, 'ax': axS9[1, 0]},
{'y': D.TOT_FLUX_L, 'ymin': 0.0, 'ax': axS9[1, 1]}]
data = pd.concat([figure_data['standard'], figure_data['anaerobic']])
data = data.reset_index().groupby('EFM').first()
data.fillna(0, inplace=True)
for i, d in enumerate(plot_parameters):
d['ax'].annotate(chr(ord('a')+i), xy=(0.04, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20)
D.plot_basic_pareto(data, ax=d['ax'],
x=D.YIELD_L, y=d['y'], efm_dict=D.efm_dict,
edgecolors='none',
facecolors=(0.85, 0.85, 0.85),
show_efm_labels=True)
d['ax'].set_xlim(-0.1, None)
d['ax'].set_ylim(d['ymin'], None)
figS9.tight_layout()
D.savefig(figS9, 'S9')
# %% SI figure 10 - histogram of all different EFM growth
# rates in a specific condition
figS10, (axS10a, axS10b) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)
plot_growth_rate_hist(ax=axS10a)
plot_growth_rate_hist(oxygen=D.LOW_CONC['oxygen'], ax=axS10b)
axS10b.set_ylabel('')
figS10.tight_layout()
D.savefig(figS10, 'S10')
# %% SI Figure 11
figS11, axS11 = plt.subplots(1, 1, figsize=(5, 5))
plot_glucose_dual_pareto(figure_data['standard'], axS11,
draw_lines=False)
axS11.set_xlim(-1e-3, None)
axS11.set_ylim(-1e-3, None)
D.savefig(figS11, 'S11')
# %% SI Figure 12
figS12 = plot_monod_surface(figure_data)
figS12.tight_layout(pad=0.1)
D.savefig(figS12, 'S12')
# %% SI figure 13 - scatter 3D plot of the glucose uptake, oxygen uptake,
# growth rate
figS13 = plot_conc_versus_uptake_figure(figure_data)
figS13.tight_layout(w_pad=3.5, h_pad=2)
D.savefig(figS13, 'S13')
# %% SI figure 14 - scatter plots in different environmental conditions
figS14, axS14 = plt.subplots(2, 2, figsize=(8, 8),
sharex=True, sharey=True)
params = [{'glucose': D.STD_CONC['glucoseExt'],
'oxygen': D.STD_CONC['oxygen'],
'ax': axS14[0, 0]},
{'glucose': D.STD_CONC['glucoseExt'],
'oxygen': D.LOW_CONC['oxygen'],
'ax': axS14[0, 1]},
{'glucose': D.LOW_CONC['glucoseExt'],
'oxygen': D.STD_CONC['oxygen'],
'ax': axS14[1, 0]}]
data = figure_data['standard']
plot_list = [("require oxygen", (0.95, 0.7, 0.7),
~data[D.STRICTLY_ANAEROBIC_L] & data[D.STRICTLY_AEROBIC_L]),
("oxygen sensitive", (0.4, 0.7, 0.95),
data[D.STRICTLY_ANAEROBIC_L] & ~data[D.STRICTLY_AEROBIC_L]),
("facultative", (0.9, 0.5, 0.9),
~data[D.STRICTLY_ANAEROBIC_L] & ~data[D.STRICTLY_AEROBIC_L])]
x = D.YIELD_L
y = D.GROWTH_RATE_L
for d in params:
ax = d['ax']
ax.set_title('glucose = %g mM, O$_2$ = %g mM' %
(d['glucose'], d['oxygen']))
gr = interpolate_single_condition(glucose=d['glucose'],
oxygen=d['oxygen'])
for label, color, efms in plot_list:
xdata = data.loc[efms, x]
ydata = gr[efms]
ax.scatter(xdata, ydata, s=12, marker='o', alpha=1,
edgecolors='none', color=color,
label=label)
for efm, (col, lab) in D.efm_dict.items():
if efm in data.index:
ax.plot(data.at[efm, x], gr[efm], markersize=5,
marker='o', color=col, label=None)
ax.annotate(lab, xy=(data.at[efm, x], gr[efm]),
xytext=(0, 5), textcoords='offset points',
ha='center', va='bottom', color=col)
# plot the anaerobic condition data
ax = axS14[1, 1]
ax.set_title('glucose = %g mM, no O$_2$' % D.STD_CONC['glucoseExt'])
data = figure_data['anaerobic'].copy().drop(9999)
plot_list = [("require oxygen", (0.95, 0.7, 0.7), []),
("oxygen sensitive", (0.4, 0.7, 0.95),
data[D.STRICTLY_ANAEROBIC_L] & ~data[D.STRICTLY_AEROBIC_L]),
("facultative", (0.9, 0.5, 0.9),
~data[D.STRICTLY_ANAEROBIC_L] & ~data[D.STRICTLY_AEROBIC_L])]
for label, color, efms in plot_list:
xdata = data.loc[efms, D.YIELD_L]
ydata = data.loc[efms, D.GROWTH_RATE_L]
ax.scatter(xdata, ydata, s=12, marker='o', alpha=1,
edgecolors='none', color=color,
label=label)
for efm, (col, lab) in D.efm_dict.items():
if efm in data.index:
ax.plot(data.at[efm, x], gr[efm], markersize=5,
marker='o', color=col, label=None)
ax.annotate(lab, xy=(data.at[efm, x], gr[efm]),
xytext=(0, 5), textcoords='offset points',
ha='center', va='bottom', color=col)
leg = ax.legend(loc='lower right', frameon=True)
leg.get_frame().set_facecolor('#EEEEEE')
for i, (d, ax) in enumerate(zip(plot_parameters, axS14.flat)):
ax.annotate(chr(ord('a')+i), xy=(0.04, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20)
ax.set_xlim(-1e-3, None)
ax.set_ylim(-1e-3, None)
ax.set_ylabel(D.GROWTH_RATE_L)
ax.set_xlabel(D.YIELD_L)
figS14.tight_layout()
D.savefig(figS14, 'S14')
# %% SI Figure 15 - create protein allocation pie charts of selected EFMs
# focus only on the selected EFMs, and rename the columns according
# to the 3-letter acronyms
efms = D.efm_dict.keys()
_, efm_names = zip(*map(D.efm_dict.get, efms))
# load data for the pie charts from the pareto plot
rates_df, params_df, km_df, enzyme_abundance_df = \
get_concatenated_raw_data('standard')
# calculate the total cost of metabolic enzymes
# it is given in hours, for the time required from the biomass reaction
# to produce a mass equal to the mass of metabolic enzymes
E_i = enzyme_abundance_df.loc[efms, :].mul(params_df['weight'].fillna(0))
E_i.rename(index=dict(zip(efms, efm_names)), inplace=True)
E_met = E_i.sum(1) # total metabolic enzyme in grams per EFM
v_BM = D.BIOMASS_MW * rates_df.loc[efms, D.R_BIOMASS] * D.SECONDS_IN_HOUR
v_BM.rename(index=dict(zip(efms, efm_names)), inplace=True)
# the growth rate in [1/h] if the biomass was 100% metabolic enzymes
r_BM = v_BM / E_met
n_fig_rows = int(np.ceil((len(D.efm_dict))/2.0))
figS15, axS15 = plt.subplots(n_fig_rows, 2, figsize=(10, 5 * n_fig_rows))
for ax, efm in zip(axS15.flat, efm_names):
E_i_efm = E_i.loc[efm, :].sort_values(ascending=False)
E_i_efm = E_i_efm / E_i_efm.sum()
E_lumped = E_i_efm.drop(E_i_efm[E_i_efm.cumsum() > 0.95].index)
E_lumped.loc[D.REMAINDER_L] = E_i_efm[E_i_efm.cumsum() > 0.95].sum()
E_lumped.name = ''
E_lumped.plot.pie(colors=list(map(D.reaction_to_rgb, E_lumped.index)),
labels=list(map(D.GET_REACTION_NAME, E_lumped.index)),
ax=ax)
ax.set_title(r'\textbf{%s}' % efm + '\n' +
D.TOT_ENZYME_L + ' = %.2f' % (1.0/r_BM[efm]))
D.savefig(figS15, 'S15')
# %% SI Figure 16 - allocation area plots for glucose sweep
rates_df, full_df = get_concatenated_raw_data('sweep_glucose')
efms = D.efm_dict.keys()
figS16, axS16 = plt.subplots(len(efms), 4, figsize=(20, 4 * len(efms)))
for i, efm in enumerate(efms):
df = full_df[full_df['efm'] == efm]
if df.shape[0] == 0:
continue
v_BM = D.BIOMASS_MW * D.SECONDS_IN_HOUR * rates_df.at[efm, D.R_BIOMASS]
# make a new DataFrame where the index is the glucose concentration
# and the columns are the reactions and values are the costs.
absol = full_df[full_df['efm'] == efm].pivot(index=full_df.columns[1],
columns='reaction',
values='E_i')
D.allocation_area_plot(absol/v_BM, axS16[i, 0], axS16[i, 1],
xlabel='external glucose level [mM]')
axS16[i, 0].annotate(D.efm_dict[efm][1], xy=(0.04, 0.95),
xycoords='axes fraction', ha='left', va='top',
size=20)
# allocation area plots for oxygen sweep
rates_df, full_df = get_concatenated_raw_data('sweep_oxygen')
efms = D.efm_dict.keys()
reactions = list(rates_df.columns)
for i, efm in enumerate(efms):
df = full_df[full_df['efm'] == efm]
if df.shape[0] == 0:
continue
v_BM = D.BIOMASS_MW * D.SECONDS_IN_HOUR * rates_df.at[efm, D.R_BIOMASS]
# make a new DataFrame where the index is the glucose concentration
# and the columns are the reactions and values are the costs.
absol = full_df[full_df['efm'] == efm].pivot(index=full_df.columns[1],
columns='reaction',
values='E_i')
D.allocation_area_plot(absol/v_BM, axS16[i, 2], axS16[i, 3],
xlabel='O$_2$ level [mM]')
axS16[i, 2].annotate(D.efm_dict[efm][1], xy=(0.04, 0.95),
xycoords='axes fraction', ha='left', va='top',
size=20)
axS16[0, 1].set_title('Varying glucose levels', fontsize=25,
ha='right', va='bottom')
axS16[0, 2].set_title('Varying oxygen levels', fontsize=25,
ha='left', va='bottom')
figS16.tight_layout(h_pad=2.0)
D.savefig(figS16, 'S16')
# %% SI Figure 17 - Monod figure
monod_dfs = calculate_monod_parameters(figure_data)
figS17 = plot_monod_scatter(monod_dfs)
D.savefig(figS17, 'S17')
# %% SI Figure 18 - sensitivity to kcat of tpi (R6r)
figS18, axS18 = plt.subplots(1, 3, figsize=(12, 5), sharey=True)
for i, ax in enumerate(axS18):
ax.annotate(chr(ord('a')+i), xy=(0.04, 0.98), xycoords='axes fraction',
fontsize=20, ha='left', va='top')
axS18[0].set_title(r'effect of the $k_{cat}$ of \emph{tpi}')
D.plot_dual_pareto(figure_data['standard'],
'std. $k_{cat}$ (7800 [$s^{-1}$])',
figure_data['low_kcat_r6r'],
'low $k_{cat}$ (7.8 [$s^{-1}$])',
s=8,
ax=axS18[0], x=D.YIELD_L, y=D.GROWTH_RATE_L,
draw_lines=False)
axS18[0].set_xlim(0, None)
axS18[0].legend(loc='upper center', fontsize=12)
s = Sensitivity.from_figure_name('standard')
s.write_sensitivity_tables()
s.plot_sensitivity_as_errorbar(axS18[1], 'R6r', foldchange=2)
axS18[1].set_xlim(0, None)
axS18[1].set_title(r'sensitivity to 2-fold change in $k_{cat}$')
maxy = figure_data['sweep_kcat_r6r'].max().max() * 1.2
D.plot_sweep(figure_data['sweep_kcat_r6r'], r'$k_{cat}$ [$s^{-1}$]',
efm_dict=D.efm_dict, ax=axS18[2], legend_loc='center left',
legend_fontsize=10)
axS18[2].set_xscale('log')
axS18[2].set_ylim(0, maxy)
axS18[2].fill_between([7837/2.0, 7837*2.0], 0, maxy,
color=(0.9, 0.9, 0.9))
axS18[2].plot([7837, 7837], [0.0, maxy], '--',
color='grey', linewidth=1)
axS18[2].text(7837, maxy*1.01, r'std. $k_{cat}$', ha='center',
color='grey')
axS18[2].plot([7.837, 7.837], [0.0, maxy], '--',
color='grey', linewidth=1)
axS18[2].text(7.837, maxy*1.01, r'low $k_{cat}$', ha='center',
color='grey')
figS18.tight_layout()
D.savefig(figS18, 'S18')
# %%
figS19 = plt.figure(figsize=(10, 10))
axS19a = figS19.add_subplot(2, 2, 1, projection='3d')
axS19b = figS19.add_subplot(2, 2, 2, projection='3d')
axS19c = figS19.add_subplot(2, 2, 3, projection='3d')
axS19d = figS19.add_subplot(2, 2, 4, projection='3d')
plot_surface(axS19a, figure_data['standard'], c=D.GROWTH_RATE_L,
cmap='Oranges', vmax=0.7,
sweep_cache_fname='sweep2d_win_200x200.csv')
plot_surface_diff(axS19b, ko_cache_fname='sweep2d_edko_win_200x200.csv')
plot_surface_diff(axS19c, ko_cache_fname='sweep2d_empko_win_200x200.csv')
plot_surface_diff(axS19d, ko_cache_fname='sweep2d_oxphosko_win_200x200.csv')
axS19a.set_title('wild-type')
axS19b.set_title('ED knockout')
axS19c.set_title('EMP knockout')
axS19d.set_title('OxPhos knockout')
axS19a.set_zlim(0, 1)
axS19b.set_zlim(0, 1)
axS19c.set_zlim(0, 1)
axS19d.set_zlim(0, 1)
figS19.tight_layout(h_pad=2)
D.savefig(figS19, 'S19')
# %% S20 and S21 - epistasis plots
e = Epistasis(figure_data)
figS20 = e.plot_gr_epistasis()
figS20.savefig(os.path.join(D.OUTPUT_DIR, 'FigS20.pdf'))
figS21 = e.plot_yield_epistasis()
D.savefig(figS21, 'S21')
# %% convert EFM visualization flux plots from SVG to EPS
with zipfile.ZipFile(D.ZIP_SVG_FNAME, 'r') as z:
for efm, (_, efm_name) in D.efm_dict.items():
with tempfile.NamedTemporaryFile(delete=True, suffix='.svg') as tmp_fp:
# extract the SVG file from the ZIP file and save it as a temp
# file.
with z.open('efm%04d.svg' % efm, 'r') as svg_fp:
shutil.copyfileobj(svg_fp, tmp_fp)
# then use inkscape to convert the SVG to EPS
eps_fname = os.path.join(D.OUTPUT_DIR, 'FigS22_%s.eps' % efm_name)
os.system('inkscape %s -E %s' % (tmp_fp.name, eps_fname))
# %% fig S25 - glucose sweep at anaerobic conditions
# find the "winning" EFM for each glucose level and make a color-coded
# plot like the 3D surface plots
def plot_1D_sweep(interpolated_df, ax0, ax1, ax2, color_func=None):
best_df = pd.DataFrame(index=interpolated_df.index,
columns=[D.GROWTH_RATE_L, 'best_efm', 'hexcolor'])
best_df[D.GROWTH_RATE_L] = interpolated_df.max(axis=1)
best_df['best_efm'] = interpolated_df.idxmax(axis=1)
best_efms = sorted(best_df['best_efm'].unique())
if color_func is None:
color_dict = dict(zip(best_efms, D.cycle_colors(len(best_efms),
h0=0.02, s=1)))
color_func = color_dict.get
best_df['hexcolor'] = best_df['best_efm'].apply(color_func)
ax0.plot(interpolated_df.index, interpolated_df, '-',
linewidth=1, alpha=0.2, color=(0.5, 0.5, 0.8))
ax0.set_xscale('log')
ax0.set_xlim(0.6e-4, 1.5e4)
ax0.set_ylim(1e-3, 0.86)
ax0.set_xlabel(D.GLU_COL)
ax0.set_ylabel(D.GROWTH_RATE_L)
d = list(zip(best_df.index, best_df[D.GROWTH_RATE_L]))
segments = zip(d[:-1], d[1:])
colors = list(best_df['hexcolor'].iloc[1:].values)
for efm in best_efms:
if efm in D.efm_dict:
label = D.efm_dict[efm]['label']
else:
label = 'EFM %04d' % efm
ax1.plot(interpolated_df.index, interpolated_df[efm],
label='_nolegend_', linestyle='-',
color=D.efm_to_hex(efm), linewidth=0.5, alpha=0.5)
ax1.plot([0, 1], [-1, -1],
label=label,
color=color_func(efm), linewidth=2)
coll = LineCollection(segments, colors=colors, linewidths=2)
ax1.add_collection(coll)
ax1.legend(loc='best', fontsize=12)
ax1.set_xscale('log')
ax1.set_xlim(0.6e-4, 1.5e4)
ax1.set_ylim(1e-3, 0.86)
ax1.set_xlabel(D.GLU_COL)
ax1.set_ylabel(D.GROWTH_RATE_L)
ax2.plot(best_df[D.GROWTH_RATE_L], best_df.index,
'-', color=(0.5, 0.5, 0.8), linewidth=2)
ax2.set_xlim(0, 0.7)
ax2.set_ylim(0, 0.1)
ax2.set_xlabel(D.GROWTH_RATE_L)
ax2.set_ylabel('residual ' + D.GLU_COL)
low_o2 = 0.0115
figS25, axs25 = plt.subplots(3, 3, figsize=(10, 10))
axs25[0, 1].set_title('anaerobic')
axs25[1, 1].set_title(r'low $O_2$ (%.1f $\mu$M)' % (low_o2*1e3))
axs25[2, 1].set_title(r'std. $O_2$ (%g mM)' % D.STD_CONC['oxygen'])
interpolated_df = get_anaerobic_glucose_sweep_df(figure_data)
plot_1D_sweep(interpolated_df, axs25[0, 0], axs25[0, 1], axs25[0, 2], None)
interpolated_df = get_glucose_sweep_df(oxygen_conc=low_o2)
plot_1D_sweep(interpolated_df, axs25[1, 0], axs25[1, 1], axs25[1, 2], D.efm_to_hex)
interpolated_df = get_glucose_sweep_df(oxygen_conc=D.STD_CONC['oxygen'])
plot_1D_sweep(interpolated_df, axs25[2, 0], axs25[2, 1], axs25[2, 2], D.efm_to_hex)
for i, ax in enumerate(axs25.flat):
ax.annotate(chr(ord('a')+i), xy=(0.02, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20)
figS25.tight_layout()
D.savefig(figS25, 'S25')
# %% measured protein abundances (if available)
# S26 - correlation between EFM protein abundance predictions and
rates_df, params_df, km_df, enzyme_abundance_df = \
get_concatenated_raw_data('standard')
# calculate correlation coefficients between the enzyme abundances and
# the measured abundances (from Schmidt et al. 2015, glucose batch)
X = enzyme_abundance_df.transpose()
# in order to convert the enzyme abundances to realistic values, we need
# to scale by a factor of 0.004 (see SI text, section S2.5)
X *= 0.004
y = map(D.PROTEOME_DICT.get, enzyme_abundance_df.columns)
X['measured'] = pd.Series(index=enzyme_abundance_df.columns, data=y)
X_pred = X.iloc[:, 0:-1].as_matrix()
X_meas = X.iloc[:, -1].as_matrix()
data = figure_data['standard'].copy()
CORR_ENZ_L = 'enzyme abundance correlation'
data[CORR_ENZ_L] = X.corr('spearman').loc['measured']
# replace all zeros with a minimum protein level of 1 nM,
# which represents the noise level (~1 molecule per cell)
RMSE_ENZ_L = 'enzyme abundance RMSE'
y = np.tile(X_meas, (X_pred.shape[1], 1))
data[RMSE_ENZ_L] = np.sqrt(np.square(X_pred.T - y).mean(1))
figS26 = plt.figure(figsize=(12, 5))
axS26a = figS26.add_subplot(1, 2, 1)
axS26a.set_title('Spearman correlation')
axS26b = figS26.add_subplot(1, 2, 2)
axS26b.set_title('exp')
D.plot_basic_pareto(data, axS26a, x=D.YIELD_L, y=D.GROWTH_RATE_L,
c=CORR_ENZ_L, cmap='copper_r')
for efm in D.efm_dict.keys():
xy = np.array(data.loc[efm, [D.YIELD_L, D.GROWTH_RATE_L]].tolist())
xytext = xy + np.array((0, 0.07))
axS26a.annotate(xy=xy, s=D.efm_dict[efm]['label'],
xycoords='data', xytext=xytext, ha='center',
arrowprops=dict(facecolor='black',
shrink=0.05, width=2, headwidth=4))
axS26a.set_xlim(-1e-3, 1.1*data[D.YIELD_L].max())
axS26a.set_ylim(-1e-3, 1.15*data[D.GROWTH_RATE_L].max())
X[X == 0] = 1e-5
X.fillna(1e-5, inplace=True)
axS26b.plot(X.loc[:, 'measured'], X.loc[:, 9999], 'o', alpha=0.3)
axS26b.plot([1e-5, 1], [1e-5, 1], 'b--')
for i in X.index:
xy = np.array(X.loc[i, ['measured', 9999]].tolist())
axS26b.text(xy[0], xy[1], i, fontsize=8, ha='center', va='bottom')
axS26b.set_xscale('log')
axS26b.set_yscale('log')
axS26b.set_ylabel('predicted enzyme abundance [mM]')
axS26b.set_xlabel('measured enzyme abundance [mM]')
D.savefig(figS26, 'S26')
# %% glucose sweeps for low and high oxygen levels
figS27, axs27 = plt.subplots(1, 3, figsize=(12, 4))
low_o2 = 0.009
std_o2 = D.STD_CONC['oxygen']
axs27[0].set_title('low $O_2$ (%g mM)' % low_o2)
axs27[1].set_title('std. $O_2$ (%g mM)' % std_o2)
plot_glucose_sweep(axs27[0], oxygen_conc=low_o2,
ylim=(0, 0.86), legend_loc='upper center',
mark_glucose=False)
plot_glucose_sweep(axs27[1], oxygen_conc=std_o2,
ylim=(0, 0.86), legend_loc=None,
mark_glucose=False)
glu_grid = np.logspace(-4, -1, 200)
interp_data_df = pd.DataFrame(index=glu_grid, columns=D.efm_dict.keys())
interpolator = SweepInterpolator.interpolate_2D_sweep(D.efm_dict.keys())
for efm in [1565]:
gr = [interpolator.calc_gr(efm, g, std_o2)
for g in glu_grid]
axs27[2].plot(gr, glu_grid, '-', color=D.efm_dict[efm]['color'])
axs27[2].set_xlabel(D.GROWTH_RATE_L)
axs27[2].set_ylabel('residual ' + D.GLU_COL)
axs27[2].set_title(r'std. $O_2$, only \emph{max-gr}')
for i, ax in enumerate(axs27):
ax.annotate(chr(ord('a')+i), xy=(0.02, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20)
figS27.tight_layout()
D.savefig(figS27, 'S27')
# %% A Pareto figure focusing only on the Pareto-optimal EFMs and
# neighboring points (generated by random sampling)
figS28, axs28 = plt.subplots(1, 1, figsize=(5, 5))
sampled_data = pd.read_pickle(pareto_sampling.PICKLE_FNAME)
xdata = sampled_data[D.YIELD_L]
ydata = sampled_data[D.GROWTH_RATE_L]
sampled_data.plot.scatter(x=D.YIELD_L, y=D.GROWTH_RATE_L,
marker='.', c=(.9, .9, .9), s=10,
edgecolors=None, alpha=0.5, ax=axs28, zorder=1)
axs28.set_xlabel(D.YIELD_L)
axs28.set_ylabel(D.GROWTH_RATE_L)
pareto_df = D.get_pareto(sampled_data, D.YIELD_L, D.GROWTH_RATE_L)
pareto_df.plot.scatter(x=D.YIELD_L, y=D.GROWTH_RATE_L, marker='s',
color='k', s=15,
ax=axs28, legend=None, zorder=2)
efm_data = figure_data['standard']
efm_data.plot.scatter(x=D.YIELD_L, y=D.GROWTH_RATE_L,
marker='.', c=(1, 0.6, 0.6), s=15,
edgecolors=None, alpha=1, ax=axs28, zorder=3)
pareto_df = D.get_pareto(efm_data, D.YIELD_L, D.GROWTH_RATE_L)
pareto_df.plot.scatter(x=D.YIELD_L, y=D.GROWTH_RATE_L,
marker='s', c='r', s=30,
edgecolors=None, alpha=1, ax=axs28, zorder=4)
for efm, (col, lab) in D.efm_dict.items():
if efm in pareto_df.index:
axs28.annotate(lab, xy=(pareto_df.at[efm, D.YIELD_L], pareto_df.at[efm, D.GROWTH_RATE_L]),
xytext=(5, 6), textcoords='offset points',
ha='left', va='bottom', color='r')
axs28.set_xlim(18.5, 23.3)
axs28.set_ylim(0.3, 0.8)
D.savefig(figS28, 'S28')
# %% A box plot of the capacity utilization across EFMs (grouped by reactions)
figS29, axS29 = plt.subplots(2, 1, figsize=(10, 10))
fig_name_and_titles = [('standard', 'glucose = %g mM, O$_2$ = %g mM' %
(D.STD_CONC['glucoseExt'], D.STD_CONC['oxygen']),
axS29[0], True),
('anaerobic', 'glucose = %g mM, no O$_2$' %
(D.STD_CONC['glucoseExt']),
axS29[1], False)]
for i, (fig_name, fig_title, ax, show_exp) in enumerate(fig_name_and_titles):
ax.set_title(fig_title)
ax.annotate(chr(ord('a')+i), xy=(0.01, 0.98), xycoords='axes fraction',
fontsize=20, ha='left', va='top')
# read the raw files again, now including all kinetic parameters
# and individual enzyme abundances at the ECM optimum
zip_fnames, regex = D.DATA_FILES[fig_name]
rates, params, kms, enzymes = get_df_from_pareto_zipfile(zip_fnames[0])
rates.round(3).to_csv(
os.path.join(D.OUTPUT_DIR, '%s_rates.csv' % fig_name))
enzymes.round(3).to_csv(
os.path.join(D.OUTPUT_DIR, '%s_enzyme_abundance.csv' % fig_name))
# calculate the reverse kcats for the reversible reactions
kms['s_i * K_i'] = np.log(kms['Km']) * kms['coefficient']
pi_km = kms.groupby(['reaction']).sum()['s_i * K_i']
pi_km = pi_km.apply(np.exp)
params['prod(Km)'] = pi_km
params.rename(columns={'kcat': 'kcat_fwd [1/s]'}, inplace=True)
params['kcat_rev [1/s]'] = \
params['kcat_fwd [1/s]'] * params['prod(Km)'] / params['Keq']
# enzyme abundances in the result files are given in moles, and
# are the optimal amounts that enable the catalysis of the reaction
# according to the rates in the rates. Multiplying the abundance
# by the kcat values would give us the maximal capacity, which is
# higher than the actual rate (given in "rates")
rates = rates.reset_index().melt(
id_vars='efm', var_name='reaction', value_name='rate [mM/s]')
enzymes = enzymes.reset_index().melt(
id_vars='efm', var_name='reaction', value_name='enzyme [mM]')
caputil_df = pd.merge(rates, enzymes, on=['efm', 'reaction'])
# drop the cases where the enzyme levels were 0
caputil_df = caputil_df[caputil_df['enzyme [mM]'] > 0]
caputil_df['kapp [1/s]'] = \
caputil_df['rate [mM/s]'] / caputil_df['enzyme [mM]']
# to calculate the capacity usage,
# we need to divide each kapp by the kcat, which
# is tricky, because it depends on the flux direction.
caputil_df = caputil_df.join(
params[['kcat_fwd [1/s]', 'kcat_rev [1/s]']], on='reaction')
caputil_df['kcat [1/s]'] = caputil_df['kcat_fwd [1/s]']
# for all cases where the flux is negative
rev_idx = caputil_df[caputil_df['rate [mM/s]'] < 0].index
caputil_df.loc[rev_idx, 'kcat [1/s]'] = caputil_df.loc[rev_idx, 'kcat_rev [1/s]']
caputil_df.loc[rev_idx, 'kapp [1/s]'] = -caputil_df.loc[rev_idx, 'kapp [1/s]']
caputil_df['capacity utilization'] = \
caputil_df['kapp [1/s]'] / caputil_df['kcat [1/s]']
caputil_exp = caputil_df[caputil_df['efm'] == 9999].set_index('reaction')
caputil_df = caputil_df[caputil_df['efm'] != 9999]
cap_util_median = caputil_df.groupby('reaction').median()
order = cap_util_median.sort_values(by='capacity utilization').index
# TODO: add the cap. util. of the "exp" flux mode in a different color
# and also the measured one for glucose media (get from Dan)
ax_box = sns.boxplot(x='reaction', y='capacity utilization',
data=caputil_df,
order=order, ax=ax)
ax_box.set_xticklabels(order, rotation=90)
ax_box.set_xlabel('')
boxes = ax_box.artists
for i, r in enumerate(order):
boxes[i].set_facecolor(D.reaction_to_rgb(r))
if show_exp:
if r in caputil_exp.index:
ax.plot(i, caputil_exp.at[r, 'capacity utilization'],
markerfacecolor=(1, 0, 0), marker='o')
else:
ax.plot(i, -0.05, markerfacecolor=(1, 0, 0), marker='.')
figS29.tight_layout()
D.savefig(figS29, 'S29')
| gpl-2.0 |
peterwilletts24/Python-Scripts | EMBRACE/Plot_Time_Variablity_408_flymake.py | 2 | 13444 | """
Load npy xy, plot and save
"""
import os, sys
import matplotlib
#matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib import cm
import pdb
import linecache
import datetime
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rc('font', family = 'serif', serif = 'cmr10')
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 14}
matplotlib.rc('font', **font)
import numpy as np
from datetime import timedelta
import datetime
import math
import imp
import re
from textwrap import wrap
import iris.analysis.geometry
from shapely.geometry import Polygon
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
imp.load_source('IrisFunctions', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/IrisFunctions.py')
from IrisFunctions import *
#unrotate = imp.load_source('util', '/home/pwille/python_scripts/modules/unrotate_pole.py')
utc_to_local=datetime.timedelta(hours=5, minutes=30)
###############
# Things to change
top_dir='/nfs/a90/eepdw/Data/EMBRACE'
save_dir='/nfs/a90/eepdw/Figures/EMBRACE/TimeVariability'
file_name = 'TimeVar_408_land_domain_constrain__and_oro_not_greater_than_data_monsoon_trough.npz'
diag_title = 'Area Averaged Rainfall'
area='monsoon_trough'
types_of_plot=['large_domain_only', '8_and_12_km_only', 'all']
#types_of_plot=['8_and_12_km_only', 'all']
types_of_plot=['all']
types_of_plot=['large_domain_only', '8_and_12_km_only', 'all', '8_and_12_km_plus', '8_and_12_km_par_only', '8_and_12_km_exp_only']
formatter = matplotlib.dates.DateFormatter('%d %b')
# lon_max = 101.866
# lon_min = 64.115
# lat_max= 33.
# lat_min=-6.79
#trmm_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/'
#trmm_file = "trmm_diurnal_average_lat_%s_%s_lon_%s_%s.npz" % (lat_min,lat_max, lon_min, lon_max)
def main():
for type_of_plot in types_of_plot:
if type_of_plot=='large_domain_only':
experiment_ids_p = [ 'djznw', 'dkjxq', 'djznq' ] # Params
experiment_ids_e = ['dkhgu', 'dkbhu'] # Explicit
if type_of_plot=='8_and_12_km_par_only':
experiment_ids_p = [ 'dkmbq', 'dklzq' ] # Params
experiment_ids_e = [] # Explicit
if type_of_plot=='8_and_12_km_exp_only':
experiment_ids_p = [] # Params
experiment_ids_e = ['dklyu', 'dklwu', 'dkmgw'] # Explicit
if type_of_plot=='8_and_12_km_plus':
experiment_ids_p = [ 'djznw' ,'dkmbq', 'dklzq' ] # Params
experiment_ids_e = ['dklyu', 'dkmgw', 'dklwu', 'dkbhu'] # Explicit
if type_of_plot=='8_and_12_km_only':
experiment_ids_p = [ 'djznw', 'dkmbq', 'dklzq' ] # Params
experiment_ids_e = ['dklyu', 'dkmgw', 'dklwu', 'dkbhu'] # Explicit
if type_of_plot=='all':
experiment_ids_p = ['djznw', 'djzny', 'djznq', 'dklzq', 'dkmbq', 'dkjxq' ] # Most of Params
experiment_ids_e = ['dklwu', 'dklyu', 'dkmgw', 'djzns', 'dkbhu', 'djznu', 'dkhgu'] # Most of Explicit
NUM_COLOURS = 16
cmap=cm.get_cmap(cm.Set1, NUM_COLOURS)
#cgen = (cmap(1.*i/NUM_COLORS) for i in range(NUM_COLORS))
#for ls in ['land', 'sea', 'total']:
for ls in ['land']:
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
#legendEntries=[]
#legendtext=[]
if ls=='sea':
bbox_anchor=-0.25
else:
bbox_anchor=0
# Change the legend label colors to almost black
#texts = l0.texts
#for t in texts:
#t.set_color('#262626')
legendEntries=[]
legendtext=[]
for c, experiment_id in enumerate(experiment_ids_p):
expmin1 = experiment_id[:-1]
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
try:
plotnp = np.load('%s/%s/%s/%s' % (top_dir, expmin1, experiment_id, file_name))
#if (ls != 'total'):
# Make own time x-axis (local)
#pdb.set_trace()
time_arg_sort=np.argsort(plotnp['time_coords'])
#time_sort = plotnp[1][hour_arg_sort]
try:
data_sort = plotnp['data'][:,-3][time_arg_sort]
except Exception:
data_sort = plotnp['data'][time_arg_sort]
#minute_min,hour_min = math.modf(plotnp['time_coords'].points.min())
#minute_max,hour_max = math.modf(plotnp['time_coords'].points.max())
plot_dates = ConvertHoursSince1970ToDatetime(plotnp['time_coords'][time_arg_sort])
#pdb.set_trace()
l, = plt.plot_date(plot_dates, data_sort, label=model_name_convert_legend.main(experiment_id), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
#else:
# l, = plt.plot_date(d, plotnp*3600, label=model_name_convert_legend.main(experiment_id), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
PrintException()
pass
l1=plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, prop={'size':12},
bbox_to_anchor=(0+bbox_anchor, 0,1, 1))
# Change the legend label colors to almost black
texts = l1.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
c1=0
for c, experiment_id in enumerate(experiment_ids_e):
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
expmin1 = experiment_id[:-1]
try:
plotnp = np.load('%s/%s/%s/%s' % (top_dir, expmin1, experiment_id, file_name))
time_arg_sort=np.argsort(plotnp['time_coords'])
#time_sort = plotnp[1][hour_arg_sort]
try:
data_sort = plotnp['data'][:,-3][time_arg_sort]
except Exception:
data_sort = plotnp['data'][time_arg_sort]
#minute_min,hour_min = math.modf(plotnp['time_coords'].points.min())
#minute_max,hour_max = math.modf(plotnp['time_coords'].points.max())
#pdb.set_trace()
plot_dates = ConvertHoursSince1970ToDatetime(plotnp['time_coords'][time_arg_sort])
l, = plt.plot_date(plot_dates, data_sort, label=model_name_convert_legend.main(experiment_id), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
#else:
#else:
#l, = plt.plot_date(d, plotnp*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
PrintException()
pass
l2=plt.legend(legendEntries, legendtext, title='Explicit', loc=9, frameon=False, prop={'size':12},
bbox_to_anchor=(0.155+bbox_anchor, 0,1, 1))
plt.gca().add_artist(l1)
#plt.gca().add_artist(l0)
plt.gca().xaxis.set_major_formatter(formatter)
# Change the legend label colors to almost black
texts = l2.texts
for t in texts:
t.set_color('#262626')
plt.xlabel('Time (Local)')
plt.ylabel('${mm h^-1}$')
title='Domain Averaged %s - %s' % (diag_title, ls)
t=re.sub('(.{68} )', '\\1\n', str(title), 0, re.DOTALL)
t = re.sub(r'[(\']', ' ', t)
t = re.sub(r'[\',)]', ' ', t)
pp_filenodot= file_name.replace(".", "")
# Bit of formatting
# Set colour of axis lines
spines_to_keep = ['bottom', 'left']
for spine in spines_to_keep:
ax.spines[spine].set_linewidth(0.5)
ax.spines[spine].set_color('#262626')
# Remove top and right axes lines ("spines")
spines_to_remove = ['top', 'right']
for spine in spines_to_remove:
ax.spines[spine].set_visible(False)
# Get rid of ticks. The position of the numbers is informative enough of
# the position of the value.
#ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# Change the labels to the off-black
ax.xaxis.label.set_color('#262626')
ax.yaxis.label.set_color('#262626')
if not os.path.exists(save_dir): os.makedirs(save_dir)
plt.savefig('%s/EMBRACE_%s_%s_%s_%s_notitle.png' % (save_dir, area, pp_filenodot, ls, type_of_plot),
format='png', bbox_inches='tight')
fig.autofmt_xdate()
#plt.title('\n'.join(wrap('%s' % (t.title()), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
#plt.savefig('%s/EMBRACE_Diurnal__monsoon_trough_%s_%s_%s.png' % (save_dir, pp_filenodot, ls, type_of_plot),
# format='png', bbox_inches='tight')
#plt.close()
#except Exception, e:
#print e
if __name__ == '__main__':
main()
| mit |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/indexing/test_indexing.py | 2 | 36653 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
""" test fancy indexing & misc """
import pytest
from warnings import catch_warnings
from datetime import datetime
from pandas.core.dtypes.common import (
is_integer_dtype,
is_float_dtype)
from pandas.compat import range, lrange, lzip, StringIO
import numpy as np
import pandas as pd
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas import NaT, DataFrame, Index, Series, MultiIndex
import pandas.util.testing as tm
from pandas.tests.indexing.common import Base, _mklbl
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy(Base):
""" pure get/set item & fancy indexing """
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
pytest.raises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
pytest.raises(ValueError, f)
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = pd.DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = pd.Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
# Test with np.inf in columns
df = pd.DataFrame()
df.loc[0, 0] = 1
df.loc[1, 1] = 2
df.loc[0, np.inf] = 3
result = df.columns
expected = pd.Float64Index([0, 1, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
assert df['c'].dtype == np.float64
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left['foo'])
assert is_integer_dtype(left['baz'])
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
assert is_float_dtype(left['foo'])
assert is_float_dtype(left['baz'])
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
tm.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(np.random.randn(4, 3), index=list('ABCD'))
expected = df.reindex(['E'])
dfnu = DataFrame(np.random.randn(5, 3), index=list('AABCD'))
with catch_warnings(record=True):
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.loc[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.loc[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.loc[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.loc[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.loc[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.loc[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.loc[idx, 'test'] = temp
assert df.iloc[0, 2] == '-----'
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with pytest.raises(KeyError):
df[[22, 26, -8]]
assert df[21].shape[0] == df.shape[0]
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: np.nan,
21: np.nan,
22: np.nan,
23: np.nan,
24: 1.0,
25: np.nan,
26: np.nan,
27: np.nan,
28: np.nan,
29: np.nan,
30: np.nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isna()
cols = ['col1', 'col2']
dft = df2 * 2
dft.iloc[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
# coerces to float64 because values has float64 dtype
# GH 14001
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': [0., 1., 4., 6., 8., 10.],
'col2': [12, 7, 16, np.nan, 20, 22]})
df2 = df.copy()
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
with catch_warnings(record=True):
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
with catch_warnings(record=True):
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
with catch_warnings(record=True):
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
with catch_warnings(record=True):
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
with catch_warnings(record=True):
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
assert df.index.is_all_dates
with pytest.raises(KeyError):
df['2011']
with pytest.raises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
assert not df.index.is_all_dates
with pytest.raises(KeyError):
df['2011']
with pytest.raises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
tm.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
@pytest.mark.parametrize("index,val", [
(pd.Index([0, 1, 2]), 2),
(pd.Index([0, 1, '2']), '2'),
(pd.Index([0, 1, 2, np.inf, 4]), 4),
(pd.Index([0, 1, 2, np.nan, 4]), 4),
(pd.Index([0, 1, 2, np.inf]), np.inf),
(pd.Index([0, 1, 2, np.nan]), np.nan),
])
def test_index_contains(self, index, val):
assert val in index
@pytest.mark.parametrize("index,val", [
(pd.Index([0, 1, 2]), '2'),
(pd.Index([0, 1, '2']), 2),
(pd.Index([0, 1, 2, np.inf]), 4),
(pd.Index([0, 1, 2, np.nan]), 4),
(pd.Index([0, 1, 2, np.inf]), np.nan),
(pd.Index([0, 1, 2, np.nan]), np.inf),
# Checking if np.inf in Int64Index should not cause an OverflowError
# Related to GH 16957
(pd.Int64Index([0, 1, 2]), np.inf),
(pd.Int64Index([0, 1, 2]), np.nan),
(pd.UInt64Index([0, 1, 2]), np.inf),
(pd.UInt64Index([0, 1, 2]), np.nan),
])
def test_index_not_contains(self, index, val):
assert val not in index
def test_index_type_coercion(self):
with catch_warnings(record=True):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)),
Series(range(5), index=range(1, 6))]:
assert s.index.is_integer()
for indexer in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)['0'] = 0
assert s2.index.is_object()
for s in [Series(range(5), index=np.arange(5.))]:
assert s.index.is_floating()
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
idxr(s2)[0.1] = 0
assert s2.index.is_floating()
assert idxr(s2)[0.1] == 0
s2 = s.copy()
idxr(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
idxr(s2)['0'] = 0
assert s2.index.is_object()
class TestMisc(Base):
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df['a'] = 10
tm.assert_frame_equal(DataFrame({0.0: df[0.0],
1.0: df[1.0],
'a': [10] * 10}),
df)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df.loc[df.index[:2]] = 1
expected = DataFrame({'a': [1, 1, 3], 'b': [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_float_index_at_iat(self):
s = pd.Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in s.iteritems():
assert s.at[el] == item
for i in range(len(s)):
assert s.iat[i] == i + 1
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right):
# label, index, slice
r, i, s = list('bcd'), [1, 2, 3], slice(1, 4)
c, j, l = ['joe', 'jolie'], [1, 2], slice(1, 3)
left = df.copy()
left.loc[r, c] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.iloc[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
with catch_warnings(record=True):
left.ix[s, l] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
with catch_warnings(record=True):
left.ix[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
with catch_warnings(record=True):
left.ix[r, c] = rhs
tm.assert_frame_equal(left, right)
xs = np.arange(20).reshape(5, 4)
cols = ['jim', 'joe', 'jolie', 'joline']
df = pd.DataFrame(xs, columns=cols, index=list('abcde'))
# right hand side; permute the indices and multiplpy by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right = df.copy()
right.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right)
# make frames multi-type & re-run tests
for frame in [df, rhs, right]:
frame['joe'] = frame['joe'].astype('float64')
frame['jolie'] = frame['jolie'].map('@{0}'.format)
run_tests(df, rhs, right)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
if not idx.is_integer:
# For integer indices, ix and plain getitem are position-based.
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
for idx in [_mklbl('A', 20), np.arange(20) + 100,
np.linspace(100, 150, 20)]:
idx = Index(idx)
s = Series(np.arange(20), index=idx)
assert_slices_equivalent(SLC[idx[9]::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:idx[9]:-1], SLC[:8:-1])
assert_slices_equivalent(SLC[idx[13]:idx[9]:-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[idx[9]:idx[13]:-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
s = Series(np.arange(20), index=_mklbl('A', 20))
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: s[::0])
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: s.loc[::0])
with catch_warnings(record=True):
tm.assert_raises_regex(ValueError,
'slice step cannot be zero',
lambda: s.ix[::0])
def test_indexing_assignment_dict_already_exists(self):
df = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]}).set_index('z')
expected = df.copy()
rhs = dict(x=9, y=99)
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
def test_indexing_dtypes_on_empty(self):
# Check that .iloc and .ix return correct dtypes GH9983
df = DataFrame({'a': [1, 2, 3], 'b': ['b', 'b2', 'b3']})
with catch_warnings(record=True):
df2 = df.ix[[], :]
assert df2.loc[:, 'a'].dtype == np.int64
tm.assert_series_equal(df2.loc[:, 'a'], df2.iloc[:, 0])
with catch_warnings(record=True):
tm.assert_series_equal(df2.loc[:, 'a'], df2.ix[:, 0])
def test_range_in_series_indexing(self):
# range can cause an indexing error
# GH 11652
for x in [5, 999999, 1000000]:
s = pd.Series(index=range(x))
s.loc[range(1)] = 42
tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
def test_non_reducing_slice(self):
df = pd.DataFrame([[0, 1], [2, 3]])
slices = [
# pd.IndexSlice[:, :],
pd.IndexSlice[:, 1],
pd.IndexSlice[1, :],
pd.IndexSlice[[1], [1]],
pd.IndexSlice[1, [1]],
pd.IndexSlice[[1], 1],
pd.IndexSlice[1],
pd.IndexSlice[1, 1],
slice(None, None, None),
[0, 1],
np.array([0, 1]),
pd.Series([0, 1])
]
for slice_ in slices:
tslice_ = _non_reducing_slice(slice_)
assert isinstance(df.loc[tslice_], DataFrame)
def test_list_slice(self):
# like dataframe getitem
slices = [['A'], pd.Series(['A']), np.array(['A'])]
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}, index=['A', 'B'])
expected = pd.IndexSlice[:, ['A']]
for subset in slices:
result = _non_reducing_slice(subset)
tm.assert_frame_equal(df.loc[result], df.loc[expected])
def test_maybe_numeric_slice(self):
df = pd.DataFrame({'A': [1, 2], 'B': ['c', 'd'], 'C': [True, False]})
result = _maybe_numeric_slice(df, slice_=None)
expected = pd.IndexSlice[:, ['A']]
assert result == expected
result = _maybe_numeric_slice(df, None, include_bool=True)
expected = pd.IndexSlice[:, ['A', 'C']]
result = _maybe_numeric_slice(df, [1])
expected = [1]
assert result == expected
def test_partial_boolean_frame_indexing(self):
# GH 17170
df = pd.DataFrame(np.arange(9.).reshape(3, 3),
index=list('abc'),
columns=list('ABC'))
index_df = pd.DataFrame(1, index=list('ab'), columns=list('AB'))
result = df[index_df.notnull()]
expected = pd.DataFrame(np.array([[0., 1., np.nan],
[3., 4., np.nan],
[np.nan] * 3]),
index=list('abc'),
columns=list('ABC'))
tm.assert_frame_equal(result, expected)
class TestSeriesNoneCoercion(object):
EXPECTED_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[0] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_loc_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[0] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_setitem_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[start_series == start_series[0]] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_loc_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[start_series == start_series[0]] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
class TestDataframeNoneCoercion(object):
EXPECTED_SINGLE_ROW_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_loc(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe.loc[0, ['foo']] = None
expected_dataframe = DataFrame({'foo': expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_coercion_with_setitem_and_dataframe(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe[start_dataframe['foo'] == start_dataframe['foo'][
0]] = None
expected_dataframe = DataFrame({'foo': expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_none_coercion_loc_and_dataframe(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe.loc[start_dataframe['foo'] == start_dataframe[
'foo'][0]] = None
expected_dataframe = DataFrame({'foo': expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_none_coercion_mixed_dtypes(self):
start_dataframe = DataFrame({
'a': [1, 2, 3],
'b': [1.0, 2.0, 3.0],
'c': [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1,
3)],
'd': ['a', 'b', 'c']
})
start_dataframe.iloc[0] = None
exp = DataFrame({'a': [np.nan, 2, 3],
'b': [np.nan, 2.0, 3.0],
'c': [NaT, datetime(2000, 1, 2),
datetime(2000, 1, 3)],
'd': [None, 'b', 'c']})
tm.assert_frame_equal(start_dataframe, exp)
| apache-2.0 |
idc9/law-net | vertex_metrics_experiment/chalboards/rankscore_experiment_search.py | 1 | 3973 | import glob
import numpy as np
import random as random
import pandas as pd
from math import *
from datetime import datetime
from experiment_helper_functions import *
from pipeline_helper_functions import *
from attachment_model_inference import *
def get_rankscores_search(G, test_params, metrics, subnet_dir, num_to_keep):
"""
Computes rank scores for each metric individually in metrics list
"""
# sample test cases
test_cases = get_test_cases(G,
test_params['active_years'],
test_params['num_test_cases'],
test_params['seed'])
# load snapshots
snapshots_dict = load_snapshots(subnet_dir)
# load tfidf matrix
tfidf_matrix, op_id_to_bow_id = load_tf_idf(subnet_dir + 'nlp/')
# ranking scores for each test case
scores = pd.DataFrame(index=[c['name'] for c in test_cases],
columns=metrics)
# get scores for each metric
for metric in metrics:
# compute scores on test cases
scores[metric] = get_test_case_scores_search(G, test_cases, snapshots_dict,
metric, tfidf_matrix, op_id_to_bow_id, num_to_keep)
return scores
def get_test_case_scores_search(G, test_cases, snapshots_dict, metric,
tfidf_matrix, op_id_to_bow_id, num_to_keep):
"""
computes the scores for each test case
returns a pandas series indexed by test case clids
# TODO: this could be parallelized
"""
# compute scores for each test case
scores = pd.Series(index=[c['name'] for c in test_cases])
for test_case in test_cases:
scores[test_case['name']] = get_rankscore_search(G, test_case,
snapshots_dict,
metric, tfidf_matrix,
op_id_to_bow_id,
num_to_keep)
return scores
def get_rankscore_search(G, test_case, snapshots_dict, metric,
tfidf_matrix, op_id_to_bow_id, num_to_keep):
"""
Gets the rank score for a given test case
"""
# converted ig index to CL id
cited_cases = get_cited_cases(G, test_case)
# get vetex metrics in year before citing year
snapshot_year = test_case['year'] - 1
# grab data frame of vertex metrics for test case's snapshot
snapshot_df = snapshots_dict['vertex_metrics_' +
str(int(snapshot_year))]
# restrict ourselves to ancestors of ing
# case strictly before ing year
ancentors = [v.index for v in G.vs.select(year_le=snapshot_year)]
# all edges from ing case to previous cases
edgelist = zip([test_case.index] * len(ancentors), ancentors)
columns_to_use = [metric, 'similarity']
# grab edge data
edge_data = get_edge_data(G, edgelist, snapshot_df, columns_to_use=columns_to_use,
tfidf_matrix=tfidf_matrix, op_id_to_bow_id=op_id_to_bow_id,
metric_normalization=None, edge_status=None)
# retun subset of similar cases
edge_data = edge_data.sort_values(by='similarity',
ascending=False).iloc[0:num_to_keep]
# case rankings (CL ids)
ancestor_ranking = rank_cases_by_metric(edge_data, metric)
# cited cases that are apart of the 'search'
searched_cases = [e.split('_')[1] for e in edge_data.index]
# [e[1] for e in sored_edges]
searched_cases_cited = set(cited_cases).intersection(set(searched_cases))
# if none of the cited cases are in the search return nan
if len(searched_cases_cited) > 0:
# compute rank score
score = score_ranking(searched_cases_cited, ancestor_ranking)
else:
score = np.nan
return score
| mit |
thalro/TFModels | tfmodels/base.py | 1 | 13766 |
import os
import cPickle as pickle
import inspect
import numpy as np
from sklearn.base import BaseEstimator,ClassifierMixin
from sklearn.preprocessing import LabelBinarizer
import tensorflow as tf
tfDtype = tf.float32
npDtype = np.float32
TFMODEL_SAVE_SCOPE = 'tfmodel'
def recursive_argspec(cls):
if cls is object:
return []
try:
argspec = inspect.getargspec(cls.__init__)
args = argspec.args
except:
args = []
if isinstance(cls,type):
bases = cls.__bases__
else:
bases = cls.__class__.__bases__
for base in bases:
args += recursive_argspec(base)
return [a for a in args if a!='self']
class BatchIndGernerator(object):
def __init__(self, batchsize,N,iterations,shuffle = True,start_iteration = 0):
if batchsize is None:
self.batchsize = N
else:
self.batchsize = batchsize
self.N = N
self.iterations = iterations
self.currentiteration = start_iteration
self.queue = []
self.shuffle = shuffle
def __iter__(self):
return self
def next(self):
if self.iterations == 0 or \
len(self.queue)==0 and self.currentiteration >= self.iterations:
raise StopIteration
else:
if len(self.queue) ==0 :
self.currentiteration += 1
inds = np.arange(self.N)
if self.shuffle:
np.random.shuffle(inds)
self.queue = inds
inds = self.queue[:self.batchsize]
self.queue = self.queue[self.batchsize:]
return inds,self.currentiteration
class BatchGernerator(object):
def __init__(self,X,y=None,batchsize=128,iterations=10,shuffle = True,start_iteration = 0,preprocessors = [],preprocessor_args = [],is_training = True):
self.ind_gernerator = BatchIndGernerator(batchsize=batchsize, N=X.shape[0], iterations=iterations,shuffle = shuffle,start_iteration = start_iteration)
self.preprocessors = [p(**pa) for p,pa in zip(preprocessors,preprocessor_args)]
self.X = X
self.y = y
self.is_training = is_training
def __iter__(self):
return self
def next(self):
inds,currrentiteration = self.ind_gernerator.next()
Xbatch = self.X[inds]
if self.y is not None:
ybatch = self.y[inds]
else:
ybatch = None
for prep in self.preprocessors:
try:
Xbatch = prep.fit_transform(Xbatch,ybatch,is_training = self.is_training)
except:
Xbatch = prep.fit_transform(Xbatch,ybatch)
return Xbatch,ybatch,currrentiteration
class TFBaseEstimator(BaseEstimator):
def __init__(self,n_jobs = 1):
self.train_step = None
self.predict_step = None
self.is_fitted = False
try:
tf.reset_default_graph()
except:
print 'could not reset default graph'
if n_jobs!=-1:
config = tf.ConfigProto(intra_op_parallelism_threads=n_jobs, inter_op_parallelism_threads=n_jobs, \
allow_soft_placement=True, device_count = {'CPU': n_jobs})
self.session = tf.Session(config=config)
else:
self.session = tf.Session()
with tf.variable_scope(TFMODEL_SAVE_SCOPE):
self.global_step_tensor = tf.Variable(0,name = 'global_step', trainable=False)
@classmethod
def _get_param_names(cls):
""" this overrides the method of sklearn BaseEstimator
so that param names are also collected from
super classes.
"""
return sorted(recursive_argspec(cls))
def get_tf_vars(self):
vars = tf.trainable_variables()
var_names = [v.name for v in vars]
return zip(self.session.run(vars),var_names)
def save(self,fname):
#tf_vars =self.get_tf_vars_as_ndarrays()
params = self.get_params()
pickle.dump((params,self.is_fitted), open(fname+'.params','w'),protocol = 2)
# save the tf session
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=TFMODEL_SAVE_SCOPE)
saver = tf.train.Saver(var_list = var_list)
saver.save(self.session,fname+'.session')
def load(self,fname):
params,is_fitted = pickle.load(open(fname+'.params'))
self.set_params(**params)
# fit has to be run to construct the graph first
iterations = self.iterations
self.iterations =0
dummy_X = np.zeros([1]+list(self.feature_shape))
dummy_y = np.zeros([1,self.n_outputs])
self.fit(dummy_X,dummy_y)
self.iterations =iterations
saver = tf.train.Saver()#import_meta_graph(fname+'.session.meta')
saver.restore(self.session,fname+'.session')
def __del__(self):
self.session.close()
del self.session
class TFBaseClassifier(TFBaseEstimator,ClassifierMixin):
""" a base class for classifier models.
this class should be instantiated.
"""
def __init__(self,random_state=None,learning_rate = 0.1,learning_rates=None,iterations = 10,batchsize = None,print_interval= 10,verbose = False,output_type ='softmax',epsilon = 1e-9,multilabel = False,multilabel_threshold = 0.2,batch_preprocessors = [], batch_preprocessor_args = [],feature_shape = None,n_outputs = None,*kwargs):
super(TFBaseClassifier, self).__init__(*kwargs)
self.classes_ = None
self.n_classes = None
self.x = None
self.y_ = None
self.feature_shape = None
self.n_outputs = None
self.warm_start = False
self.learning_rate_tensor = None
self.prediction = None
self.random_state = random_state
self.feature_shape = feature_shape
self.n_outputs = n_outputs
# learning rate and iterations can also be lists
if not isinstance(iterations, (list, tuple, np.ndarray)):
iterations = [iterations]
if learning_rates is None:
learning_rates = [learning_rate]
assert len(learning_rates)==len(iterations), 'learning_rates and iterations must have same length'
self.learning_rates = learning_rates
self.learning_rate = learning_rate
self.iterations = iterations
self.batchsize = batchsize
self.print_interval = print_interval
self.verbose = verbose
if output_type not in ['softmax','sigmoid']:
raise ValueError('output_type must be either softmax or sigmoid')
self.output_type = output_type
self.epsilon = epsilon
self.is_training = tf.placeholder(tf.bool)
self.multilabel = multilabel
self.multilabel_threshold = multilabel_threshold
self.batch_preprocessors = batch_preprocessors
self.batch_preprocessor_args = batch_preprocessor_args
self.train_feed_dict = {}
self.test_feed_dict = {}
def fit(self,X,y,warm_start = False):
original_batchsize = self.batchsize
if self.batchsize is None:
self.batchsize = X.shape[0]
self.warm_start = warm_start
if self.random_state is not None and not warm_start:
np.random.seed(self.random_state)
tf.set_random_seed(self.random_state)
# targets need to be binarized
lb = LabelBinarizer()
bin_y = lb.fit_transform(y)
if bin_y.shape[1]==1:
bin_y = np.concatenate([1-bin_y,bin_y],axis = 1)
self.classes_ = np.arange(bin_y.shape[1])
self.n_classes = len(self.classes_)
self.n_outputs = bin_y.shape[1]
# push some data through preprocessors, to see final feature shape
feature = X[:1]
for prep,args in zip(self.batch_preprocessors,self.batch_preprocessor_args):
preproc = prep(**args)
feature = preproc.fit_transform(feature)
self.feature_shape = list(feature.shape[1:])
if not self.is_fitted:
with tf.variable_scope(TFMODEL_SAVE_SCOPE):
# place holder for the input and output
self.x = tf.placeholder(tf.float32,[None]+self.feature_shape,name = 'self.x')
self.y = tf.placeholder(tf.float32,[None,self.n_outputs],name = 'self.y')
self.learning_rate_tensor = tf.placeholder(tf.float32,shape = [],name = 'learning_rate')
# create graph
self.predict_step = self._predict_step()
self.loss = self._loss_func()
# op for train step
self.train_step = self._train_step()
if self.output_type == 'softmax':
self.prediction = tf.nn.softmax(self.predict_step)
elif self.output_type == 'sigmoid':
self.prediction = tf.nn.sigmoid(self.predict_step)
# initialize variables
if not self.warm_start:
self._init_vars()
# run the training
self._train_loop(X,bin_y)
self.is_fitted = True
self.batchsize = original_batchsize
return self
def _init_vars(self):
self.session.run(tf.global_variables_initializer())
def _opt_var_list(self):
return tf.trainable_variables()
def predict_proba(self,X):
if not self.is_fitted:
print 'not fitted'
return
output = []
with self.session.as_default():
# make sure that batch preprocessors are created in the same graph
batches = BatchGernerator(X,None,self.batchsize,1,shuffle = False,
preprocessors = self.batch_preprocessors,
preprocessor_args = self.batch_preprocessor_args,
is_training = False)
for (Xbatch,ybatch,iteration) in batches:
feed_dict = {self.x:Xbatch.astype(float),self.is_training:False}
feed_dict.update(self.test_feed_dict)
output.append(self.session.run(self.prediction,feed_dict=feed_dict))
return np.concatenate(output,axis =0)
def predict(self,X):
if not self.is_fitted:
print 'not fitted'
return
proba = self.predict_proba(X)
if self.multilabel:
return proba>self.multilabel_threshold
else:
return self.classes_[np.argmax(proba,axis=1)]
def _train_loop(self,X,y):
#ensure that iterations is list in case it has been changed
if not isinstance(self.iterations, (list, tuple, np.ndarray)):
self.iterations = [self.iterations]
current_iteration = 0
iteration = 0
for iterations,learning_rate in zip(self.iterations,self.learning_rates):
self.learning_rate = learning_rate
with self.session.as_default():
# make sure that batch preprocessors are created in the same graph
batches = BatchGernerator(X,y,self.batchsize, iterations+iteration,
start_iteration = iteration,preprocessors = self.batch_preprocessors,
preprocessor_args = self.batch_preprocessor_args,is_training = True)
for i,(Xbatch,ybatch,iteration) in enumerate(batches):
if iteration>current_iteration:
current_iteration+=1
self._iteration_callback()
feed_dict = {self.x:Xbatch,self.y:ybatch,self.is_training:True,self.learning_rate_tensor:self.learning_rate}
feed_dict.update(self.train_feed_dict)
self.session.run(self.train_step,feed_dict = feed_dict)
if self.verbose and i%self.print_interval ==0:
feed_dict = {self.x:Xbatch,self.y:ybatch,self.is_training:False}
feed_dict.update(self.test_feed_dict)
loss = self.session.run(self.loss,feed_dict = feed_dict)
print 'iteration ',iteration,', batch ',i ,', loss ',loss,', learning rate ',learning_rate
def _loss_func(self):
# override for more fancy stuff
if self.output_type == 'softmax':
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.predict_step))+tf.reduce_sum(tf.losses.get_regularization_losses())
elif self.output_type == 'sigmoid':
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y, logits=self.predict_step))+tf.reduce_sum(tf.losses.get_regularization_losses())
return loss
def _train_step(self):
#override for more fancy stuff
# this is needed for so that batch_normalization forks
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = tf.train.AdamOptimizer(learning_rate = self.learning_rate_tensor,epsilon = self.epsilon).minimize(self.loss,global_step = self.global_step_tensor,var_list = self._opt_var_list())
return train_op
def _iteration_callback(self):
# this is executed at the beginning of each iteration
# and can be overridden with useful stuff
return None
def _create_graph(self):
# this needs to be filled
raise NotImplementedError
def _predict_step(self):
# this needs to be filled
raise NotImplementedError
| gpl-3.0 |
ajmedford/catmap | catmap/mappers/mapper_base.py | 1 | 5839 | """Class for `mapping' equilibrium coverages and rates through
descriptor space. This class acts as a base class to be inherited
by other mapper classes, but is not functional on its own.
get_rxn_parameter_map(descriptor_ranges,resolution): Uses a
scaler object to determine the reaction parameters as a function of
descriptor space. May be useful for debugging or providing
intuition about rate determining steps. Should return a list of
the form
[[descriptor_1,descriptor_2,...],[rxn_parameter1, rxn_parameter2, ...]]
save_map(map,map_file): creates a pickle of the "map" list and dumps it
to the map_file
load_map(map_file): loads a "map" list by loading a pickle from
the map_file
A functional derived mapper class must also contain the methods:
get_coverage_map(descriptor_ranges,resolution): a function which
returns a list of the form
[[descriptor_1,descriptor_2,...], [cvg_ads1,cvg_ads2,...]]
get_rates_map(descriptor_ranges,resolution): a function which returns
a list of the form
[[descriptor_1,descriptor_2,...], [rate_rxn1,rate_rxn2,...]]
"""
from matplotlib.mlab import griddata
import numpy as np
import mpmath as mp
import cPickle as pickle
import os
from copy import copy
from catmap.model import ReactionModel
from catmap import ReactionModelWrapper
from catmap import plt
class MapperBase(ReactionModelWrapper):
# XXX : Having an instantiated object as default parameter
# may have side-effects since every instance of MapperBase will have
# the identical instance of ReactionModel as its attribute
# Unless this is deliberately so, one should better use e.g. None
# as the default value and then instantiate ReactionModel in the
# function body of __init__ .
def __init__(self,reaction_model=ReactionModel()):
self._rxm = reaction_model
self._solver_output = ['coverage','rate', #outputs requiring solver
'turnover_frequency','selectivity','rate_control',
'noninteracting_coverages']
def get_point_output(self,descriptors,*args,**kwargs):
self.solver.compile()
self._output_variables = [v for v in self.output_variables]
self._descriptors = descriptors
params = self.scaler.get_rxn_parameters(descriptors)
self._params = params
if True in [v in self._solver_output for v in self.output_variables]:
if 'coverage' not in self._output_variables:
self._output_variables = ['coverage'] + self._output_variables
elif self._output_variables[0] != 'coverage':
self._output_variables.remove('coverage')
self._output_variables = ['coverage'] + self._output_variables
self.output_labels['coverage'] = self.adsorbate_names
self.output_labels['rate'] = self.elementary_rxns
# Need coverages for solver vars
for out in self._output_variables:
if getattr(self,'get_point_'+out):
val = getattr(self,'get_point_'+out)(descriptors,*args,**kwargs)
setattr(self,'_'+out,val)
self.solver.set_output_attrs(params)
self.scaler.set_output_attrs(descriptors)
for out in self.output_variables:
mapp = getattr(self,'_'+out+'_temp',{})
mapp[repr(descriptors)] = getattr(self,'_'+out)
setattr(self,'_'+out+'_temp',mapp)
def get_output_map(self,descriptor_ranges,resolution,*args,**kwargs):
self.solver.compile()
self._output_variables = [v for v in self.output_variables]
if True in [v in self._solver_output for v in self.output_variables]:
#determines whether or not solver is needed
if 'coverage' not in self._output_variables:
self._output_variables = ['coverage'] + self._output_variables
self._coverage_map = None
elif self._output_variables[0] != 'coverage':
self._output_variables.remove('coverage')
self._output_variables = ['coverage'] + self._output_variables
# Need coverages for solver vars
ismapped = False
for out in self._output_variables:
if getattr(self,'get_'+out+'_map'):
val = getattr(self,'get_'+out+'_map')(
descriptor_ranges,resolution,*args,**kwargs)
setattr(self,out+'_map',val)
ismapped = True
if ismapped == False:
#HACK - the following code is copy-pasted from min_resid_mapper.
#need to abstract it out.
resolution = np.array(resolution)
if resolution.size == 1:
resx = resy = float(resolution)
elif resolution.size == 2:
resx = resolution[0]
resy = resolution[1]
else:
raise ValueError('Resolution is not the correct shape')
d1min,d1max = descriptor_ranges[0]
d2min,d2max = descriptor_ranges[1]
d1Vals = np.linspace(d1min,d1max,resx)
d2Vals = np.linspace(d2min,d2max,resy)
#ENDHACK
for d1V in d1Vals:
for d2V in d2Vals:
self._descriptors = [d1V,d2V]
self.get_point_output(self._descriptors)
for out in self.output_variables:
# if getattr(self,out+'_map'):
# mapp = getattr(self,out+'_map')
# else:
map_dict = getattr(self,'_'+out+'_temp',[])
mapp = []
for key in map_dict:
mapp.append([eval(key),map_dict[key]])
setattr(self,out+'_map',mapp)
if getattr(self,out+'_map_file'):
outfile = getattr(self,out+'_map_file')
self.save_map(mapp,outfile)
| gpl-3.0 |
idlead/scikit-learn | doc/conf.py | 26 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2015, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
matthewpklein/battsimpy | tests/heat_grad_test.py | 1 | 1297 | import numpy
from matplotlib import pyplot as plt
def grad_mat( N, x ) :
G = numpy.zeros( [N,N] )
for i in range(1,N-1) :
G[i,[i-1, i+1]] = [ -1./(x[i+1]-x[i-1]), 1./(x[i+1]-x[i-1]) ]
G[0,[0,1]] = [-1./(x[1]-x[0]),1./(x[1]-x[0])]
G[-1,[-2,-1]] = [-1./(x[-1]-x[-2]),1./(x[-1]-x[-2])]
return G
### Control volumes and node points (mid node points and edge node points)
Ns = 10
Na = 20
Nc = 20
N = Na + Ns + Nc
X = 1e-6*(55+25+65)
x_e = numpy.linspace( 0.0, X, N+1 )
x_m = numpy.array( [ 0.5*(x_e[i+1]+x_e[i]) for i in range(N) ], dtype='d' )
vols = numpy.array( [ (x_e[i+1] - x_e[i]) for i in range(N)], dtype='d' )
# Useful sub-meshes for the phi_s functions
x_m_a = x_m[:Na]
x_m_c = x_m[-Nc:]
x_e_a = x_e[:Na+1]
x_e_c = x_e[-Nc-1:]
vols_a = vols[:Na]
vols_c = vols[-Nc:]
k=0.1
K = numpy.diag( numpy.ones(N)*k )
G = grad_mat( N, x_m )
phi = numpy.linspace( 0.2, 0., N )
phi = phi**2 / 0.1
dphi = numpy.gradient(phi)/numpy.gradient(x_m)
eq0 = sum( k*(dphi**2)*vols )
eq1 = sum( k*(G.dot(phi))*(G.dot(phi))*vols )
eq2 = vols.dot( k*(G.dot(phi))*(G.dot(phi)) )
eq3 = (vols.dot( k*numpy.diag(G.dot(phi)).dot(G) )).dot(phi)
print (vols.dot( k*numpy.diag(G.dot(phi)).dot(G) ))
print eq0, eq1, eq2, eq3
plt.figure()
plt.plot( x_m, G.dot(phi) )
plt.show()
| gpl-3.0 |
AlexisEidelman/Til | til/data/utils/matching.py | 2 | 7963 | # -*- coding:utf-8 -*-
'''
Created on 23 juil. 2013
Alexis Eidelman
'''
import pdb
import string
import pandas as pd
import time
import numpy as np
import sys
def _varname_or_index(word, list_col):
if list_col is None:
return "'" + word + "'"
else:
return str(list_col.index(word))
def _rewrite_score(score, first, other, list_col1=None, list_col2=None ):
'''
Return a new string and a list of variables of other
If list_columns is not None, change name by position, useful for the use of numpy
'''
other_var = []
list_words = score.replace("(", "( ").replace(")", " )").split()
final = ''
exclude = set(string.punctuation)
for word in list_words:
if len(word) > 1:
try:
exclude = set(string.punctuation)
word2 = ''.join(ch for ch in word if ch not in exclude)
float(word2)
except:
if 'other.' in word:
word = word[6:]
other_var += [_varname_or_index(word, None)[1:-1]]
word = _varname_or_index(word, list_col2)
word = other + "[:," + word + "]"
else:
word = _varname_or_index(word, list_col1)
word = first + "[" + word + "]"
final += word
return final, list(set(other_var)) #astuce pour avoir des valeurs uniques
class Matching(object):
#TODO: Faire des sous classes Matching_cells et matching_simple, ce serait plus propre
'''
Comment réaliser un matching de la table 1 et de la table 2
method : fafs : 'first arrived, first served
'''
def __init__(self, table1, table2, score):
self.table1 = table1
self.table2 = table2
if table2.columns.tolist() != table1.columns.tolist():
raise Exception("Les variables doivent être les mêmes dans les deux tables")
if not isinstance(score, basestring):
raise Exception("Le score doit être un caractere, désolé")
self.score_str = score
def evaluate(self, orderby, method):
table2 = self.table2
index_init = self.table1.index
if orderby is not None:
table1 = self.table1.sort(orderby)
else:
table1 = self.table1.loc[np.random.permutation(index_init)]
index_init = table1.index
#TODO: interdire les na
table2 = table2.fillna(0)
table1 = table1.fillna(0)
if len(table1) > len(table2):
print ("WARNING : La table de gauche doit être la plus petite, "\
"traduire le score dans l'autre sens et changer l'ordre." \
"pour l'instant table1 est reduite à la taille de table2. ")
table1 = table1[:len(table2)]
index_modif = table1.index
score_str, vars = _rewrite_score(self.score_str, 'temp', 'table2', table1.columns.tolist(), table2.columns.tolist())
n = len(table1)
if method=='cells':
groups2 = table2.groupby(vars)
cells_ini = pd.DataFrame(groups2.groups.keys(),columns =vars)
score_str, vars = _rewrite_score(self.score_str, 'temp', 'cells', table1.columns.tolist(), vars)
size = pd.DataFrame(groups2.size(), columns = ['size'])
if len(size) != len(cells_ini):
raise Exception('Oups, il y a un problème de valeurs nulles a priori')
cells_ini = cells_ini.merge(size, left_on=vars, right_index=True, how='left')
cells_ini['id'] = cells_ini.index
# conversion en numpy
#NOTE: initially dtype were np.int64 but sometime it's not enought
# however, it's not very important.
table1 = np.array(table1, dtype=np.int64)
cells = np.array(cells_ini, dtype=np.int64)
#definition de la boucle
nvar = len(vars)-1
else:
# conversion en numpy
table1 = np.array(table1, dtype=np.int64)
table2 = np.array(table2, dtype=np.int64)
# #definition de la boucle
# def real_match_cell(k, cells):
# temp = table1[k]
# score = eval(score_str)
# idx = score.argmax()
# idx2 = cells[idx,nvar+2]
# match[k] = idx2
# cells[idx,nvar+1] -= 1
# if cells[idx,nvar+1]==0:
# print idx
# cells = np.delete(cells, idx, 0)
# if cells[idx,nvar+1]<=0:
# pdb.set_trace()
# def real_match_simple(k, table2):
# temp = table1[k]
# score = eval(score_str)
# idx = score.argmax()
# idx2 = cells[idx,nvar+1]
# match[k] = idx2
# table2 = np.delete(table2, idx, 0)
match = np.empty(n, dtype=np.int64)
percent = 0
start = time.clock()
#check
assert cells[:, nvar + 1].min() > 0
if method == 'cells':
for k in xrange(n):
# real_match_cells(k,cells)
temp = table1[k]
score = eval(score_str)
idx = score.argmax()
# if score[idx] > 0:
# pdb.set_trace()
idx2 = cells[idx, nvar + 2]
match[k] = idx2
cells[idx, nvar + 1] -= 1
if cells[idx, nvar + 1]==0:
cells = np.delete(cells, idx, 0)
# update progress bar
percent_done = (k * 100) / n
to_display = percent_done - percent
if to_display:
chars_to_write = list("." * to_display)
offset = 9 - (percent % 10)
while offset < to_display:
chars_to_write[offset] = '|'
offset += 10
sys.stdout.write(''.join(chars_to_write))
percent = percent_done
else:
for k in xrange(n):
# real_match_simple(k,table2)
temp = table1[k]
score = eval(score_str)
idx = score.argmax()
idx2 = cells[idx,nvar+1]
match[k] = idx2
table2 = np.delete(table2, idx, 0)
# update progress bar
percent_done = (k * 100) / n
to_display = percent_done - percent
if to_display:
chars_to_write = list("." * to_display)
offset = 9 - (percent % 10)
while offset < to_display:
chars_to_write[offset] = '|'
offset += 10
sys.stdout.write(''.join(chars_to_write))
percent = percent_done
match = pd.Series(match, index = index_modif).sort_index()
if method == 'cells':
match_ini = match.copy()
match_count = match.value_counts()
for group in match_count.index:
nb_selected = match_count[group]
keys_group = cells_ini.loc[group,vars].tolist()
try:
match[match_ini==group] = groups2.groups[tuple(keys_group)][:nb_selected]
if nb_selected == 1 :
value = groups2.groups[tuple(keys_group)][:nb_selected][0]
match[match_ini==group] = int(value)
except:
pdb.set_trace()
print 'temps dédié au real_matching :', time.clock() - start
assert match.nunique() == len(match)
return match
| gpl-3.0 |
bhanu-mnit/EvoML | evoml/subsampling/auto_segment_FEMPT.py | 1 | 11138 | # -*- coding: utf-8 -*-
"""
Copyright 2016 Bhanu Pratap and Harsh Nisar.
This file is part of the Evoml library.
The Evoml library is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License v3 or later.
Check the licesne file recieved along with the software for further details.
"""
## Algorithm: FEMPO
## Fitness each model private oob.
## The fitness function of ensemble is the average of the RMSE of each child model over private
## oob set for respective models.
## mutators: same as before.
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import numpy as np
import pandas as pd
import random
# from mutators import segment_mutator
from .evaluators import eval_each_model_PT_KNN_EG
from .mutators import segment_mutator_EG
from .util import EstimatorGene
from .util import centroid_df
from .util import distance
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import check_X_y, check_is_fitted
from sklearn.linear_model import LinearRegression, LassoCV
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from sklearn.base import clone
from sklearn.metrics import mean_squared_error
def get_mdl_sample(sample_percentage, pool_data, base_estimator):
""" Returns an instance of EstimatorGene
fit with a random sample of the pool data.
Assumes the last column is the dependent column.
If sample percentage is given as None, then creates samples based
on random sample percentages.
"""
if sample_percentage == None:
#TODO: Can parameterize this min and max range too. But for now, let it flow.
sample_percentage = random.randrange(0.1, 1, _int=float)
data = pool_data.sample(frac=sample_percentage, replace = False)
X = data.iloc[:, 0:-1]
y = data.iloc[:, -1]
# Can only be used for FEMPT
return EstimatorGene(X, y, base_estimator, private_test = True)
def similar_individual(ind1, ind2):
return np.all(ind1.fitness.values == ind2.fitness.values)
class BasicSegmenter_FEMPT(BaseEstimator, RegressorMixin):
"""
Uses basic evolutionary algorithm to find the best subsets of X and trains
Linear Regression on each subset. For given row of input, prediction
is based on the model trained on segment closest to input.
Same as the BasicSegmenter, but uses list of trained models instead of DataFrames
as each individual. Done to boost performance.
Algorithm: FEMPO
Fitness each model private oob.
The fitness function of ensemble is the average of the RMSE of each child model over private
oob set for respective models.
Parameters
----------
n : Integer, optional, default, 10
The number of segments you want in your dataset.
base_estimator: estimator, default, LinearRegression
The basic estimator for all segments.
test_size : float, optional, default, 0.2
Test size that the algorithm internally uses in its
fitness function.
n_population : Integer, optional, default, 30
The number of ensembles present in population.
init_sample_percentage : float, optional, default, 0.2
Attributes
-----------
best_enstimator_ : estimator
segments_ : list of DataFrames
"""
def __init__(self, n = 10, test_size = 0.2,
n_population = 30, cxpb=0.5, mutpb=0.5, ngen=50, tournsize = 3,
init_sample_percentage = 0.2, indpb =0.20, crossover_func = tools.cxTwoPoint, statistics = True,
base_estimator = LinearRegression(), n_votes = 1, unseen_x = None, unseen_y = None):
self.n = n
self.test_size = test_size
self.cxpb = cxpb
self.mutpb = mutpb
self.ngen = ngen
self.tournsize = tournsize
self.init_sample_percentage = init_sample_percentage
#self.base_estimator = base_estimator
self.indpb = indpb
self.n_population = n_population
self.crossover_func = crossover_func
self.statistics = statistics
self.base_estimator = base_estimator
self.n_votes = n_votes
self.unseen_x = unseen_x
self.unseen_y = unseen_y
def fit(self, X, y):
# Is returning NDFrame and hence errors in concat.
# X, y = check_X_y(X, y)
self.X_ = X
self._X_mean = X.mean()
self._X_std = X.std()
X = (X - self._X_mean)/self._X_std
# X_train, X_test, y_train, y_test = train_test_split(X, y,
# test_size=self.test_size)
# There is no test created in this. Private oob is used.
df = pd.concat([X, y], axis = 1)
# print df_train.shape
# print df_test.shape
# #print df_train.columns
# mdl = LinearRegression().fit(df_train[x_columns], df_train[y_column])
# print df_train[y_column].ndim
# mdl = LassoCV().fit(df_train[x_columns], df_train[y_column])
# print np.sqrt(mean_squared_error(mdl.predict(df_test[x_columns]), df_test[y_column]))
### Setting toolbox
creator.create("FitnessMax", base.Fitness, weights=(-1.0,))
creator.create("Individual", list , fitness=creator.FitnessMax)
toolbox = base.Toolbox()
## In this case we will also need to pass the base estimator.
toolbox.register("mdl_sample", get_mdl_sample, self.init_sample_percentage, df, self.base_estimator)
## Thinking what our individual will be? A list of scikit mdls, a list of dataframes, or a mixed class.
## Evaluations on individuals are saved and not done again if the fitness remains unchanged.
## In that case models don't need to created again, but they need to be saved for evaluati
# n = 10, defines an ensemble of ten. #todo: Can push the parameter uptop later
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.mdl_sample, self.n)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", eval_each_model_PT_KNN_EG, df = df, base_estimator = self.base_estimator, n_votes = self.n_votes)
toolbox.register("mate", self.crossover_func)
toolbox.register("mutate", segment_mutator_EG, pool_data = df, indpb = self.indpb, private_test = True)
toolbox.register("select", tools.selTournament, tournsize= self.tournsize)
pop = toolbox.population(n = self.n_population)
hof = tools.HallOfFame(1, similar=similar_individual)
#hof = tools.ParetoFront(similar=similar_individual)
def eval_unseen_per_gen(ind, unseen_x = self.unseen_x, unseen_y = self.unseen_y):
"""
Unseen is taken from init params and is a complete
"""
ensembles = []
centroids = []
X = unseen_x.copy()
# scaling using the mean and std of the original train data.
X = (X - self._X_mean)/self._X_std
for eg_ in ind:
df_ = eg_.get_data()
ensembles.append(eg_.estimator)
centroids.append(centroid_df(df_.iloc[:,0:-1]))
y_preds_ensemble = []
ensembles = np.array(ensembles)
for row in X.values:
distances = np.array([distance(row, centroid) for centroid in centroids])
model_ixs = distances.argsort()[:self.n_votes]
models = ensembles[model_ixs]
y_pred = np.mean([mdl.predict(row)[0] for mdl in models])
y_preds_ensemble.append(y_pred)
## MSE
return mean_squared_error(y_preds_ensemble, unseen_y)
if self.statistics != None:
stats = tools.Statistics(lambda ind: ind.fitness.values)
# stats_fitness = tools.Statistics(key = lambda ind: ind.fitness.values)
# stats_unseen_performance = tools.Statistics(key = eval_unseen_per_gen)
# mstats = tools.MultiStatistics(fitness=stats_fitness, unseen = stats_unseen_performance)
# stats = tools.Statistics(eval_unseen_per_gen)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
# mstats.register("avg", np.mean)
# mstats.register("std", np.std)
# mstats.register("min", np.min)
# mstats.register("max", np.max)
else:
#None
stats = self.statistics
#stats = tools.Statistics(lambda ind: [x.shape[0] for x in ind])
self.pop, self.log = algorithms.eaSimple(pop, toolbox, cxpb=self.cxpb, mutpb=self.mutpb, ngen=self.ngen, stats=stats, halloffame= hof, verbose = True)
self.segments_ = hof[0]
# print self.segments_
#should be setting these pop, stats, hof
return self
def predict(self, X):
ensembles = []
centroids = []
# scaling using the mean and std of the original train data.
X = (X - self._X_mean)/self._X_std
for eg_ in self.segments_:
df_ = eg_.get_data()
# clf = LinearRegression()
# clf = self.base_estimator()
# clf = clf.fit(df_.iloc[:,0:-1], df_.iloc[:,-1])
# EG.estimator is already fit with the internal data.
ensembles.append(eg_.estimator)
centroids.append(centroid_df(df_.iloc[:,0:-1]))
## for sum of mse return uncomment these
#y_pred = clf.predict(df_test[x_columns])
#mse = mean_squared_error(y_pred, df_test[y_column])
#total_mse.append(mse)
#print total_mse
y_preds_ensemble = []
ensembles = np.array(ensembles)
for row in X.values:
distances = np.array([distance(row, centroid) for centroid in centroids])
# model_index = np.argmin(distances)
#todo: optional use the average of the 2 min distances ka prediction.
# get n_votes centroids closest to the row.
model_ixs = distances.argsort()[:self.n_votes]
models = ensembles[model_ixs]
# mean of all predictions.
y_pred = np.mean([mdl.predict(row)[0] for mdl in models])
y_preds_ensemble.append(y_pred)
return pd.Series(y_preds_ensemble)
| gpl-3.0 |
RPGOne/scikit-learn | sklearn/ensemble/partial_dependence.py | 25 | 15121 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
kyoungrok0517/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter2_MorePyMC/separation_plot.py | 86 | 1494 | # separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
| mit |
lxneng/incubator-airflow | scripts/perf/scheduler_ops_metrics.py | 5 | 7245 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import pandas as pd
import sys
from airflow import configuration, settings
from airflow.jobs import SchedulerJob
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.utils import timezone
from airflow.utils.state import State
SUBDIR = 'scripts/perf/dags'
DAG_IDS = ['perf_dag_1', 'perf_dag_2']
MAX_RUNTIME_SECS = 6
class SchedulerMetricsJob(SchedulerJob):
"""
This class extends SchedulerJob to instrument the execution performance of
task instances contained in each DAG. We want to know if any DAG
is starved of resources, and this will be reflected in the stats printed
out at the end of the test run. The following metrics will be instrumented
for each task instance (dag_id, task_id, execution_date) tuple:
1. Queuing delay - time taken from starting the executor to the task
instance to be added to the executor queue.
2. Start delay - time taken from starting the executor to the task instance
to start execution.
3. Land time - time taken from starting the executor to task instance
completion.
4. Duration - time taken for executing the task instance.
The DAGs implement bash operators that call the system wait command. This
is representative of typical operators run on Airflow - queries that are
run on remote systems and spend the majority of their time on I/O wait.
To Run:
$ python scripts/perf/scheduler_ops_metrics.py [timeout]
You can specify timeout in seconds as an optional parameter.
Its default value is 6 seconds.
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerMetricsJob'
}
def print_stats(self):
"""
Print operational metrics for the scheduler test.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
successful_tis = [x for x in tis if x.state == State.SUCCESS]
ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date,
(ti.queued_dttm - self.start_date).total_seconds(),
(ti.start_date - self.start_date).total_seconds(),
(ti.end_date - self.start_date).total_seconds(),
ti.duration) for ti in successful_tis]
ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id',
'execution_date',
'queue_delay',
'start_delay', 'land_time',
'duration'])
print('Performance Results')
print('###################')
for dag_id in DAG_IDS:
print('DAG {}'.format(dag_id))
print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])
print('###################')
if len(tis) > len(successful_tis):
print("WARNING!! The following task instances haven't completed")
print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state)
for ti in filter(lambda x: x.state != State.SUCCESS, tis)],
columns=['dag_id', 'task_id', 'execution_date', 'state']))
session.commit()
def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super(SchedulerMetricsJob, self).heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum([(timezone.utcnow() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(timezone.utcnow() - self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if len(successful_tis) == num_task_instances:
self.log.info("All tasks processed! Printing stats.")
else:
self.log.info("Test timeout reached. "
"Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit()
def clear_dag_runs():
"""
Remove any existing DAG runs for the perf test DAGs.
"""
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id.in_(DAG_IDS),
).all()
for dr in drs:
logging.info('Deleting DagRun :: {}'.format(dr))
session.delete(dr)
def clear_dag_task_instances():
"""
Remove any existing task instances for the perf test DAGs.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
for ti in tis:
logging.info('Deleting TaskInstance :: {}'.format(ti))
session.delete(ti)
session.commit()
def set_dags_paused_state(is_paused):
"""
Toggle the pause state of the DAGs in the test.
"""
session = settings.Session()
dms = session.query(DagModel).filter(
DagModel.dag_id.in_(DAG_IDS))
for dm in dms:
logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused))
dm.is_paused = is_paused
session.commit()
def main():
global MAX_RUNTIME_SECS
if len(sys.argv) > 1:
try:
max_runtime_secs = int(sys.argv[1])
if max_runtime_secs < 1:
raise ValueError
MAX_RUNTIME_SECS = max_runtime_secs
except ValueError:
logging.error('Specify a positive integer for timeout.')
sys.exit(1)
configuration.load_test_config()
set_dags_paused_state(False)
clear_dag_runs()
clear_dag_task_instances()
job = SchedulerMetricsJob(dag_ids=DAG_IDS, subdir=SUBDIR)
job.run()
if __name__ == "__main__":
main()
| apache-2.0 |
hmendozap/auto-sklearn | test/test_pipeline/components/classification/test_libsvm_svc.py | 1 | 3192 | import unittest
from autosklearn.pipeline.components.classification.libsvm_svc import LibSVM_SVC
from autosklearn.pipeline.util import _test_classifier, \
_test_classifier_predict_proba, get_dataset
import numpy as np
import sklearn.metrics
import sklearn.svm
class LibSVM_SVCComponentTest(unittest.TestCase):
def test_default_configuration(self):
for i in range(10):
predictions, targets = _test_classifier(LibSVM_SVC, dataset='iris')
self.assertAlmostEqual(0.96,
sklearn.metrics.accuracy_score(predictions, targets))
def test_default_configuration_predict_proba(self):
for i in range(10):
predictions, targets = _test_classifier_predict_proba(
LibSVM_SVC, sparse=True, dataset='digits',
train_size_maximum=500)
self.assertAlmostEqual(4.6680593525563063,
sklearn.metrics.log_loss(targets,
predictions))
for i in range(10):
predictions, targets = _test_classifier_predict_proba(
LibSVM_SVC, sparse=True, dataset='iris')
self.assertAlmostEqual(0.8649665185853217,
sklearn.metrics.log_loss(targets,
predictions))
# 2 class
for i in range(10):
X_train, Y_train, X_test, Y_test = get_dataset(dataset='iris')
remove_training_data = Y_train == 2
remove_test_data = Y_test == 2
X_train = X_train[~remove_training_data]
Y_train = Y_train[~remove_training_data]
X_test = X_test[~remove_test_data]
Y_test = Y_test[~remove_test_data]
ss = sklearn.preprocessing.StandardScaler()
X_train = ss.fit_transform(X_train)
configuration_space = LibSVM_SVC.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
cls = LibSVM_SVC(random_state=1, **{hp_name: default[hp_name]
for hp_name in default
if default[hp_name] is not None})
cls = cls.fit(X_train, Y_train)
prediction = cls.predict_proba(X_test)
self.assertAlmostEqual(sklearn.metrics.log_loss(Y_test, prediction),
0.69323680119641773)
def test_default_configuration_binary(self):
for i in range(10):
predictions, targets = _test_classifier(LibSVM_SVC,
make_binary=True)
self.assertAlmostEqual(1.0,
sklearn.metrics.accuracy_score(
predictions, targets))
def test_target_algorithm_multioutput_multiclass_support(self):
cls = sklearn.svm.SVC()
X = np.random.random((10, 10))
y = np.random.randint(0, 1, size=(10, 10))
self.assertRaisesRegexp(ValueError, 'bad input shape \(10, 10\)',
cls.fit, X, y)
| bsd-3-clause |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/nltk/draw/dispersion.py | 5 | 1744 | # Natural Language Toolkit: Dispersion Plots
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Steven Bird <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A utility for displaying lexical dispersion.
"""
def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"):
"""
Generate a lexical dispersion plot.
:param text: The source text
:type text: list(str) or enum(str)
:param words: The target words
:type words: list of str
:param ignore_case: flag to set if case should be ignored when searching text
:type ignore_case: bool
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
text = list(text)
words.reverse()
if ignore_case:
words_to_comp = list(map(str.lower, words))
text_to_comp = list(map(str.lower, text))
else:
words_to_comp = words
text_to_comp = text
points = [(x,y) for x in range(len(text_to_comp))
for y in range(len(words_to_comp))
if text_to_comp[x] == words_to_comp[y]]
if points:
x, y = list(zip(*points))
else:
x = y = ()
pylab.plot(x, y, "b|", scalex=.1)
pylab.yticks(list(range(len(words))), words, color="b")
pylab.ylim(-1, len(words))
pylab.title(title)
pylab.xlabel("Word Offset")
pylab.show()
if __name__ == '__main__':
import nltk.compat
from nltk.corpus import gutenberg
words = ['Elinor', 'Marianne', 'Edward', 'Willoughby']
dispersion_plot(gutenberg.words('austen-sense.txt'), words)
| gpl-2.0 |
legaultmarc/grstools | grstools/scripts/evaluate.py | 1 | 11742 | """
Evaluate the performance of constructed GRS or of the variant selection
procedure.
"""
# This file is part of grstools.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Marc-Andre Legault
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import argparse
import json
import logging
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from genetest.statistics import model_map
from genetest.phenotypes.dataframe import DataFrameContainer
from ..utils import regress as _regress
from ..utils import parse_computed_grs_file
plt.style.use("ggplot")
matplotlib.rc("font", size="7")
logger = logging.getLogger(__name__)
def _parse_data(args):
phenotypes = pd.read_csv(
args.phenotypes_filename,
index_col=args.phenotypes_sample_column,
sep=args.phenotypes_separator,
)
phenotypes.index = phenotypes.index.astype(str)
grs = parse_computed_grs_file(args.grs_filename)
return pd.merge(phenotypes, grs, left_index=True, right_index=True)
def regress(args):
# Parse the GRS and phenotypes.
df = _parse_data(args)
# Standardize if needed.
if args.std_y:
if args.test == "logistic":
logger.warning(
"Ignoring the --std-y option for logistic regression. This "
"flag should only be used for linear regression."
)
else:
y = df[args.phenotype]
df[args.phenotype] = (y - y.mean()) / y.std()
# Do the regression.
formula = "{} ~ grs".format(args.phenotype)
stats = _regress(formula, args.test, DataFrameContainer(df))
if args.no_plot:
print(json.dumps(stats))
return
# Rename columns for plotting.
df = df[[args.phenotype, "grs"]]
df.columns = ("y", "grs")
# Create the plot.
if args.test == "linear":
n = df.dropna().shape[0]
logger.info("Running linear regression based on {} samples.".format(n))
return _linear_regress_plot(df, stats, args.out, args.phenotype_label)
if args.test == "logistic":
return _logistic_regress_plot(df, stats, args.out, args.phenotype_label)
else:
raise ValueError()
def _get_dummy_artist():
return Rectangle(
(0, 0), 1, 1, fc="w", fill=False, edgecolor="none", linewidth=0
)
def _linear_regress_plot(df, stats, out, phenotype_label):
data_marker, = plt.plot(df["grs"], df["y"], "o", markersize=0.5)
xmin = df["grs"].min()
xmax = df["grs"].max()
line_marker, = plt.plot(
[xmin, xmax],
[stats["beta"] * xmin + stats["intercept"],
stats["beta"] * xmax + stats["intercept"]],
"-",
linewidth=0.5,
color="black",
)
plt.xlabel("GRS")
plt.ylabel(phenotype_label)
# Add extra info to the legend.
plt.legend(
(_get_dummy_artist(), data_marker, line_marker),
(
r"$\beta={:.3g}\ ({:.3g}, {:.3g}),\ (p={:.4g},\ R^2={:.3g})$"
"".format(stats["beta"], stats["CI"][0], stats["CI"][1],
stats["p-value"], stats["R2"]),
"data",
"$y = {:.3g}grs + {:.3g}$"
"".format(stats["beta"], stats["intercept"])
)
)
if out is None:
plt.show()
else:
if out.endswith("png"):
plt.savefig(out, dpi=500)
else:
plt.savefig(out)
def _logistic_regress_plot(df, stats, out, phenotype_label):
odds_ratio = np.exp(stats["beta"])
odds_ratio_ci = [np.exp(i) for i in stats["CI"]]
# Add the odd ratio or else the stats are for nothing.
artists = [_get_dummy_artist()]
labels = [
"OR={:.3f} ({:.3f}, {:.3f}) (p={:.3g})"
"".format(odds_ratio, odds_ratio_ci[0], odds_ratio_ci[1],
stats["p-value"])
]
levels = sorted(df["y"].dropna().unique())
boxplot_data = []
for i, level in enumerate(levels):
data = df.loc[df["y"] == level, "grs"]
boxplot_data.append(data.dropna().values)
noise = (np.random.random(data.shape[0]) - 0.5) / 4
lines, = plt.plot(
np.full_like(data, i + 1) + noise,
data,
"o",
markersize=0.5,
)
artists.append(lines)
labels.append(
"GRS mean = {:.4f} ($\sigma={:.4f}$)"
"".format(data.mean(), data.std())
)
plt.boxplot(boxplot_data, showfliers=False, medianprops={"color": "black"})
if phenotype_label == "Phenotype":
phenotype_label = "Phenotype level"
plt.xlabel(phenotype_label)
plt.xticks(range(1, len(levels) + 1), levels)
plt.ylabel("GRS")
plt.legend(artists, labels)
if out is None:
plt.show()
else:
if out.endswith("png"):
plt.savefig(out, dpi=500)
else:
plt.savefig(out)
def dichotomize_plot(args):
"""Compares differente quantiles of dichotomization."""
# Read the files.
df = _parse_data(args)
df["group"] = np.nan
df["intercept"] = 1
df = df[["group", "intercept", "grs", args.phenotype]]
# Init the statistical test.
test = model_map[args.test]()
qs = []
upper_ci = []
lower_ci = []
ns = []
betas = []
for q in np.linspace(0.05, 0.5, 200):
low, high = df[["grs"]].quantile([q, 1 - q]).values.T[0]
df["group"] = np.nan
df.loc[df["grs"] <= low, "group"] = 0
df.loc[df["grs"] >= high, "group"] = 1
cur = df.dropna()
stats = test.fit(
cur[[args.phenotype]], cur[["group", "intercept"]]
)
qs.append(q)
betas.append(stats["group"]["coef"])
ns.append(df.dropna().shape[0])
upper_ci.append(stats["group"]["upper_ci"])
lower_ci.append(stats["group"]["lower_ci"])
fig, ax1 = plt.subplots()
beta_line, = ax1.plot(qs, betas)
ci_line, = ax1.plot(qs, upper_ci, "--", color="gray", linewidth=0.2)
ax1.plot(qs, lower_ci, "--", color="gray", linewidth=0.2)
ax1.set_ylabel(r"$\beta$")
ax1.set_xlabel("Quantile used to form groups (0.5 is median)")
ax2 = ax1.twinx()
ax2.grid(False, which="both")
n_line, = ax2.plot(qs, ns, "-", linewidth=0.2)
ax2.set_ylabel("effective n")
plt.legend(
(beta_line, ci_line, n_line),
(r"$\beta$", "95% CI", "$n$"),
loc="upper center"
)
if args.out:
plt.savefig(args.out)
else:
plt.show()
def roc_curve(args):
from sklearn.metrics import roc_curve, auc
grs = None
grs_filenames = [args.grs_filename] + args.other_grs
for filename in grs_filenames:
name = os.path.basename(filename)
if grs is None:
grs = parse_computed_grs_file(filename)
grs.columns = [name]
else:
_cur = parse_computed_grs_file(filename)
_cur.columns = [name]
grs = pd.merge(grs, _cur, left_index=True, right_index=True)
grs_names = grs.columns
phenotypes = pd.read_csv(
args.phenotypes_filename,
index_col=args.phenotypes_sample_column,
sep=args.phenotypes_separator,
)
phenotypes.index = phenotypes.index.astype(str)
df = phenotypes.join(grs)
df = df.dropna()
artists = []
labels = []
plt.figure(figsize=(5, 5))
for name in grs_names:
fpr, tpr, _ = roc_curve(df[args.phenotype], df[name])
_auc = auc(fpr, tpr)
artist, = plt.plot(
fpr, tpr, linewidth=0.4,
)
artists.append(artist)
if len(grs_names) > 1:
labels.append("{}; AUC={:.3f}".format(name, _auc))
else:
labels.append("AUC={:.3f}".format(_auc))
plt.legend(artists, labels)
plt.plot([0, 1], [0, 1], "--", color="gray", linewidth=0.2)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
if args.out:
plt.savefig(args.out)
else:
plt.show()
def main():
args = parse_args()
command_handlers = {
"regress": regress,
"dichotomize-plot": dichotomize_plot,
"roc": roc_curve,
}
command_handlers[args.command](args)
def _add_phenotype_arguments(parser):
parser.add_argument("--phenotypes-filename", type=str)
parser.add_argument("--phenotypes-sample-column", type=str,
default="sample")
parser.add_argument("--phenotypes-separator", type=str,
default=",")
parser.add_argument("--phenotype", type=str)
def parse_args():
parser = argparse.ArgumentParser(
description="Utilities to evaluate the performance of GRS."
)
parent = argparse.ArgumentParser(add_help=False)
# General arguments.
parent.add_argument(
"grs_filename",
help="Path to the file containing the computed GRS."
)
parent.add_argument("--out", "-o", default=None, type=str)
subparser = parser.add_subparsers(
dest="command",
)
subparser.required = True
# Regress
# TODO
# To evaluate the performance of discretized GRS, it might be interesting
# to generate simular plots of y ~ GRS. Then it could be qregress for
# continuous GRS and dregress for discretized GRS.
regress_parse = subparser.add_parser(
"regress",
help="Regress the GRS on a discrete or continuous outcome.",
parents=[parent]
)
_add_phenotype_arguments(regress_parse)
regress_parse.add_argument("--test", type=str)
regress_parse.add_argument("--no-plot", action="store_true")
regress_parse.add_argument(
"--std-y",
action="store_true",
help="Standardize the phenotype before the regression. This should "
"only be used for linear regression."
)
regress_parse.add_argument(
"--phenotype-label",
default="Phenotype",
type=str,
help="Label for the phenotype axis."
)
# Dichotomize plot.
dichotomize_parse = subparser.add_parser(
"dichotomize-plot",
help="A plot to help identify ideal dichotmizatin parameters.",
parents=[parent]
)
_add_phenotype_arguments(dichotomize_parse)
dichotomize_parse.add_argument("--test", type=str)
# ROC and roll curves.
roc_parse = subparser.add_parser(
"roc",
help="Draw a ROC curve for a GRS (given a binary phenotype).",
parents=[parent]
)
roc_parse.add_argument(
"other_grs",
help="Other GRS to include in the ROC plot.",
nargs="*"
)
_add_phenotype_arguments(roc_parse)
return parser.parse_args()
| mit |
danieledwardknudsen/weather | modelbuilding.py | 1 | 8787 | import logging as l
import pandas as pd
import multiprocessing as mp
from time import time
import os
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
from sklearn import linear_model
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib
global count
global num_models
count = 0
num_models = 0
def run(cfg: dict) -> None:
""" Builds forecasting models and saves them and their results to disk"""
training_data = ""
# subsets of the training data that we will use to test model performance relative to input size
subsets = cfg['MODELING']['subsets'].split(', ')
# get X and Y columns
one_row = pd.read_csv(cfg['EXPORT']['filename'], nrows=1)
cols = one_row.columns
Y_cols = list()
# get all the y columns present in this dataset
# assuming they're identified with 'future'
for col in cols:
if (str(col)).startswith('future'):
Y_cols.append(str(col))
# get directory we will store our results in
base_dir = '{0}/{1}'.format(cfg['FILES']['cwd'],
cfg['MODELING']['results_directory'])
# if the directory doesn't exist, create it
if not os.path.exists(base_dir):
os.mkdir(base_dir)
num_models = 4 * len(Y_cols) * len(subsets)
count = 0
# read in the full training set
#training_data = pd.read_csv(
# cfg['EXPORT']['filename'])
# filter out any records that don't have a real reading in the target variable
# for y_col in Y_cols:
# training_data = training_data[training_data[y_col] > 0]
# just in case, remove any null values
#training_data.dropna(axis=0, how='any', inplace=True)
# for each subset of the training set...
for subset in subsets:
# create a subset directory
subs_dir = '{0}/{1}_{2}'.format(base_dir, 'subset_', subset)
if not os.path.exists(subs_dir):
os.mkdir(subs_dir)
# for each target feature...
for col in Y_cols:
# pool of workers
workers = mp.pool.Pool(
(int(cfg['RUN_CONFIGURATION']['max_threads'])))
# X data
training_data = pd.read_csv(cfg['EXPORT']['filename'])
training_data = training_data[training_data[col] > 0]
X = training_data.drop(Y_cols, axis=1)
# get the Y_set
Y = training_data[col]
del(training_data)
#Y = training_data[training_data[col] > 0][col]
#X = X.reindex(X.index.intersection(Y.index))
# split the training set into trainin data and testing data
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, train_size=int(subset), test_size=int(cfg['MODELING']['test_size']), random_state=int(time()), shuffle=True)
args = (X_train, Y_train, X_test, Y_test, col, cfg, subs_dir)
# Start ridge regression
if not cfg['MODELING']['ridge'] == 'False':
if cfg['MODELING']['ridge'] == 'True':
workers.apply_async(
func=generate_ridge_model, args=args, callback=done)
# Start linear regression
if not cfg['MODELING']['linreg'] == 'False':
if cfg['MODELING']['linreg'] == 'True':
workers.apply_async(
func=generate_linreg_model, args=args, callback=done)
# Start stochastic gradient descent regression
if not cfg['MODELING']['sgd'] == 'False':
if cfg['MODELING']['sgd'] == 'True':
workers.apply_async(
func=generate_sgd_model, args=args, callback=done)
# Start random forest regression
if not cfg['MODELING']['random_forest'] == 'False':
if cfg['MODELING']['random_forest'] == 'True':
workers.apply_async(
func=generate_rf_model, args=args, callback=done)
workers.close()
workers.join()
def done() -> None:
count = count + 1
print('completed {0} out of {1} models.'.format(
count, num_models))
def generate_rf_model(X_train: pd.DataFrame, Y_train: pd.DataFrame, X_test: pd.DataFrame, Y_test: pd.DataFrame, column: str, cfg: dict, subs_dir: str) -> None:
""" Generate a random forest model from a multi-column X dataframe,
a single column Y dataframe, and a column name string that will
be used to generate the model file name. The function then returns
a dataframe that is the intersection of the test set and result set
"""
excel_path, pkl_path = remove_existing_data(subs_dir, 'rf', column)
writer = pd.ExcelWriter(excel_path)
pipeline = make_pipeline(preprocessing.StandardScaler(
), RandomForestRegressor(criterion="mae"))
hyperparameters = {'randomforestregressor__max_features': ['sqrt', 'log2'],
'randomforestregressor__max_depth': [5],
'randomforestregressor__n_estimators': [10, 100, 500]}
clf = GridSearchCV(pipeline, hyperparameters, verbose=True)
clf.fit(X_train, Y_train)
joblib.dump(clf, pkl_path)
Y_pred = pd.DataFrame(clf.predict(X_test))
Y_test = pd.DataFrame(Y_test)
Y_pred.to_excel(writer, sheet_name='pred')
Y_test.to_excel(writer, sheet_name='test')
writer.save()
done()
def generate_sgd_model(X_train: pd.DataFrame, Y_train: pd.DataFrame, X_test: pd.DataFrame, Y_test: pd.DataFrame, column: str, cfg: dict, subs_dir: str) -> None:
""" Generate a stochastic gradient descent model from a multi-column X dataframe,
a single column Y dataframe, and a column name string that will
be used to generate the model file name.
"""
excel_path, pkl_path = remove_existing_data(subs_dir, 'sgd', column)
writer = pd.ExcelWriter(excel_path)
pipeline = make_pipeline(preprocessing.StandardScaler(
), linear_model.SGDRegressor(criterion="mae"))
hyperparameters = {'sgdregressor__alpha': [.00001, 1, 100],
'sgdregressor__max_iter': [10, 100, 1000, 10000], }
clf = GridSearchCV(pipeline, hyperparameters, verbose=True)
clf.fit(X_train, Y_train)
joblib.dump(clf, pkl_path)
Y_pred = pd.DataFrame(clf.predict(X_test))
Y_test = pd.DataFrame(Y_test)
Y_pred.to_excel(writer, sheet_name='pred')
Y_test.to_excel(writer, sheet_name='test')
writer.save()
done()
def generate_linreg_model(X_train: pd.DataFrame, Y_train: pd.DataFrame, X_test: pd.DataFrame, Y_test: pd.DataFrame, column: str, cfg: dict, subs_dir: str) -> None:
""" Generate a stochastic gradient descent model from a multi-column X dataframe,
a single column Y dataframe, and a column name string that will
be used to generate the file name.
"""
excel_path, pkl_path = remove_existing_data(subs_dir, 'linreg', column)
writer = pd.ExcelWriter(excel_path)
clf = linear_model.LinearRegression(normalize=True)
clf.fit(X_train, Y_train)
joblib.dump(clf, pkl_path)
Y_pred = pd.DataFrame(clf.predict(X_test))
Y_test = pd.DataFrame(Y_test)
Y_pred.to_excel(writer, sheet_name='pred')
Y_test.to_excel(writer, sheet_name='test')
writer.save()
done()
def generate_ridge_model(X_train: pd.DataFrame, Y_train: pd.DataFrame, X_test: pd.DataFrame, Y_test: pd.DataFrame, column: str, cfg: dict, subs_dir: str) -> None:
""" Generate a ridge model from a multi-column X dataframe,
a single column Y dataframe, and a column name string that will
be used to generate the model file name.
"""
excel_path, pkl_path = remove_existing_data(subs_dir, 'ridge', column)
writer = pd.ExcelWriter(excel_path)
clf = linear_model.RidgeCV(
alphas=(.0001, 0.1, 1.0, 10.0, 100), normalize=True)
clf.fit(X_train, Y_train)
joblib.dump(clf, pkl_path)
Y_pred = pd.DataFrame(clf.predict(X_test))
Y_test = pd.DataFrame(Y_test)
Y_pred.to_excel(writer, sheet_name='pred')
Y_test.to_excel(writer, sheet_name='test')
writer.save()
done()
def remove_existing_data(subs_dir: str, model: str, column: str)->(str, str):
""" Removes the existing pickle and spreadsheet for this model
returns the pickle path and spreadsheet path to save new results """
excel_path = '{0}/{1}{2}.xlsx'.format(subs_dir, model, column)
if(os.path.exists(excel_path)):
os.remove(excel_path)
pkl_path = '{0}/{1}{2}.pkl'.format(subs_dir, model, column)
if(os.path.exists(pkl_path)):
os.remove(pkl_path)
return (excel_path, pkl_path)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.